-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmulti_provider.py
More file actions
38 lines (31 loc) · 1.11 KB
/
multi_provider.py
File metadata and controls
38 lines (31 loc) · 1.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
"""Using Veil with different LLM providers — same code, swap one header."""
import os
from openai import OpenAI
VEIL_BASE = "https://veil-api.com/v1"
VEIL_KEY = os.environ["VEIL_API_KEY"]
prompt = "Summarize: Customer John Smith (john@test.com) wants a refund for order #12345."
providers = [
("openai", os.environ.get("OPENAI_API_KEY", ""), "gpt-4o-mini"),
("together", os.environ.get("TOGETHER_API_KEY", ""), "meta-llama/Llama-3-8b-chat-hf"),
("groq", os.environ.get("GROQ_API_KEY", ""), "llama-3.1-8b-instant"),
]
for provider, key, model in providers:
if not key:
print(f"Skipping {provider} (no API key)")
continue
client = OpenAI(
api_key=key,
base_url=VEIL_BASE,
default_headers={
"Authorization": f"Bearer {VEIL_KEY}",
"x-upstream-key": key,
"x-upstream-provider": provider,
},
)
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
)
print(f"[{provider} / {model}]")
print(response.choices[0].message.content[:150])
print()