-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathcall_ai.py
More file actions
315 lines (270 loc) · 11.4 KB
/
call_ai.py
File metadata and controls
315 lines (270 loc) · 11.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
from colorama import Fore, Style
import requests
from typing import List, Dict
import concurrent.futures
import os
def send_message_to_api(
task: str,
messages: List[Dict],
api_key: str,
tools: List[Dict],
model: str = "gpt-4o-mini",
temperature: float = 0.7,
top_p: float = 1.0,
max_tokens: int = 500,
api_url: str = "https://openrouter.ai/api/v1/chat/completions",
verbose: bool = False,
is_first_step: bool = False,
tool_choice: str = None,
) -> Dict:
"""
Send a message to the OpenRouter API and return the assistant's response.
Will retry up to 3 times with increasing delay between retries.
"""
if verbose and is_first_step:
print(
f"\n{Fore.CYAN}╭──────────────────────────────────────────{Style.RESET_ALL}"
)
print(f"{Fore.CYAN}│ Sending Request to API{Style.RESET_ALL}")
print(
f"{Fore.CYAN}├──────────────────────────────────────────{Style.RESET_ALL}"
)
print(f"{Fore.CYAN}│ Model: {Style.RESET_ALL}{model}")
print(f"{Fore.CYAN}│ URL: {Style.RESET_ALL}{api_url}")
print(f"{Fore.CYAN}│ Temperature: {Style.RESET_ALL}{temperature}")
print(
f"{Fore.CYAN}╰──────────────────────────────────────────{Style.RESET_ALL}\n"
)
retries = 0
max_retries = 3
delay = 1 # Initial delay in seconds
# Prepare request data for logging
request_data = {
'model': model,
'messages': messages,
'tools': tools if tools else None,
'max_tokens': max_tokens,
'temperature': temperature,
'top_p': top_p,
}
if tool_choice:
request_data['tool_choice'] = tool_choice
while retries <= max_retries:
try:
print(
f"\n{Fore.BLUE}Making API Request (Attempt {retries + 1}/{max_retries + 1})...{Style.RESET_ALL}"
)
response = requests.post(
api_url,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
},
json=request_data,
timeout=60
)
print(f"{Fore.GREEN}Response received:{Style.RESET_ALL}")
print(f"{Fore.YELLOW}{response.json()}{Style.RESET_ALL}")
if verbose:
print(
f"{Fore.YELLOW}Response status: {response.status_code}{Style.RESET_ALL}"
)
if response.status_code != 200:
# Log failed request
import datetime
import os
import json
os.makedirs('api_error_logs', exist_ok=True)
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_file = f'api_error_logs/error_{timestamp}.json'
error_log = {
'timestamp': timestamp,
'status_code': response.status_code,
'error_message': response.text,
'response_json': response.json(),
'request_url': api_url,
'request_data': request_data,
'retry_attempt': retries + 1
}
with open(log_file, 'w') as f:
json.dump(error_log, f, indent=2)
raise Exception(
f"API request failed with status {response.status_code}: {response.text}"
)
response_data = response.json()
print(f"{Fore.GREEN}Successfully parsed response data{Style.RESET_ALL}")
return response_data["choices"][0]["message"]
except Exception as error:
print(
f"{Fore.RED}Error occurred during API call (Attempt {retries + 1})!{Style.RESET_ALL}"
)
print(f"{Fore.RED}{str(error)}{Style.RESET_ALL}")
# Log any other errors that occur
import datetime
import os
import json
os.makedirs('api_error_logs', exist_ok=True)
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
log_file = f'api_error_logs/error_{timestamp}.json'
error_log = {
'timestamp': timestamp,
'error_type': type(error).__name__,
'error_message': str(error),
'request_url': api_url,
'response_json': response.json(),
'request_data': request_data,
'retry_attempt': retries + 1
}
with open(log_file, 'w') as f:
json.dump(error_log, f, indent=2)
if retries == max_retries:
raise Exception(
f"Error sending message to API after {max_retries + 1} attempts: {str(error)}"
)
import time
wait_time = delay * (2**retries) # Exponential backoff
print(
f"{Fore.YELLOW}Waiting {wait_time} seconds before retrying...{Style.RESET_ALL}"
)
time.sleep(wait_time)
retries += 1
def generate_multiple_candidates(
task: str,
messages: List[Dict],
api_key: str,
tools: List[Dict],
num_candidates: int = 3,
model: str = "gpt-4o-mini",
temperature: float = 0.7,
top_p: float = 1.0,
max_tokens: int = 500,
api_url: str = "https://openrouter.ai/api/v1/chat/completions",
verbose: bool = False,
is_first_step: bool = False,
) -> List[Dict]:
"""
Generate multiple candidate responses in parallel using concurrent.futures.
Returns a list of candidate responses.
"""
print(
f"\n{Fore.MAGENTA}╭──────────────────────────────────────────{Style.RESET_ALL}"
)
print(f"{Fore.MAGENTA}│ Generating {num_candidates} Candidates{Style.RESET_ALL}")
print(f"{Fore.MAGENTA}╰──────────────────────────────────────────{Style.RESET_ALL}")
def generate_candidate():
return send_message_to_api(
task=task,
messages=messages,
api_key=api_key,
tools=tools,
model=model,
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
api_url=api_url,
verbose=verbose,
is_first_step=is_first_step,
)
candidates = []
with concurrent.futures.ThreadPoolExecutor(max_workers=num_candidates) as executor:
print(f"{Fore.CYAN}Starting parallel candidate generation...{Style.RESET_ALL}")
future_to_candidate = {
executor.submit(generate_candidate): i for i in range(num_candidates)
}
for future in concurrent.futures.as_completed(future_to_candidate):
try:
candidate = future.result()
candidates.append(candidate)
print(
f"{Fore.GREEN}Successfully generated candidate {len(candidates)}/{num_candidates}{Style.RESET_ALL}"
)
except Exception as e:
print(
f"{Fore.RED}Error generating candidate: {str(e)}{Style.RESET_ALL}"
)
print(
f"{Fore.GREEN}Generated {len(candidates)} candidates successfully{Style.RESET_ALL}"
)
return candidates
def generate_best_candidate(
task: str,
messages: List[Dict],
api_key: str,
tools: List[Dict],
num_candidates: int = 3,
model: str = "gpt-4o-mini",
temperature: float = 0.7,
top_p: float = 1.0,
max_tokens: int = 500,
api_url: str = "https://openrouter.ai/api/v1/chat/completions",
verbose: bool = False,
is_first_step: bool = False,
) -> Dict:
"""
Generate a list of candidate responses and return the best one.
"""
print(f"\n{Fore.CYAN}╭──────────────────────────────────────────{Style.RESET_ALL}")
print(f"{Fore.CYAN}│ Starting Best Candidate Selection{Style.RESET_ALL}")
print(f"{Fore.CYAN}╰──────────────────────────────────────────{Style.RESET_ALL}")
candidates = generate_multiple_candidates(
task,
messages,
api_key,
tools,
num_candidates,
model,
temperature,
top_p,
max_tokens,
api_url,
verbose,
is_first_step,
)
print(f"\n{Fore.YELLOW}Generated Candidates:{Style.RESET_ALL}")
print(f"{Fore.YELLOW}{candidates}{Style.RESET_ALL}")
print(f"\n{Fore.MAGENTA}Preparing evaluation prompt...{Style.RESET_ALL}")
evaluation_prompt = ""
i = 1
for candidate in candidates:
evaluation_prompt += f"Candidate {i}:\n{candidate}\n\n"
i += 1
SYSTEM_PROMPT = """You are a judge tasked with evaluating the viability of multiple candidate responses to a given task. Your goal is to identify the candidate that is most likely to lead to solving the task properly.
You will be given a <task> which describes the task at hand, a <previous_thoughts> section which contains the thoughts of the assistant before receiving the candidate responses, and a <next_thought_candidates> section which contains the candidate responses to be evaluated.
Evaluate the viability of each candidate response and output the number of the candidate that is most likely to lead to solving the task properly.
Do so in the following format:
<thinking>
Think through the viability of each candidate here.
</thinking>
<best_candidate_number>
Number of the best candidate
</best_candidate_number>
"""
evaluation_prompt += f"""<task>{task}</task>
<previous_thoughts>
{messages}
</previous_thoughts>
<next_thought_candidates>
{evaluation_prompt}
</next_thought_candidates>
Think it through inside the <thinking> section, and then output the number of the candidate that is most likely to lead to solving the <task> properly in the <best_candidate_number> section. In the <best_candidate_number> section, only output the number, nothing else. Possible numbers are: {', '.join(str(i) for i in range(1, num_candidates + 1))}"""
print(f"\n{Fore.BLUE}Sending evaluation request to API...{Style.RESET_ALL}")
best_candidate_response = send_message_to_api(
task="",
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": evaluation_prompt},
],
api_key=api_key,
tools=tools,
)
# Parse the best candidate number from the response
best_candidate_number = int(
best_candidate_response["content"]
.split("<best_candidate_number>")[1]
.split("</best_candidate_number>")[0]
.strip()
)
print(f"\n{Fore.GREEN}Selected best candidate:{Style.RESET_ALL}")
print(f"{Fore.GREEN}{best_candidate_number}{Style.RESET_ALL}")
# Return the best candidate
return candidates[best_candidate_number - 1]