-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconfig.example.yaml
More file actions
90 lines (84 loc) · 1.71 KB
/
config.example.yaml
File metadata and controls
90 lines (84 loc) · 1.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
server:
host: 0.0.0.0
port: 8090
https:
enabled: false
cert_file: ""
key_file: ""
admin:
username: admin
# password_hash and session_secret are auto-generated on first run
providers:
gemini:
type: gemini
api_key: ""
default_model: gemini-2.5-flash
allowed_models:
- gemini-2.5-flash
- gemini-2.5-pro
- gemini-2.0-flash
timeout_seconds: 120
# Uncomment and configure providers as needed:
#
# openai:
# type: openai
# api_key: ""
# default_model: gpt-4o
# timeout_seconds: 120
#
# anthropic:
# type: anthropic
# api_key: ""
# default_model: claude-sonnet-4-20250514
# timeout_seconds: 120
#
# mistral:
# type: mistral
# api_key: ""
# default_model: mistral-large-latest
#
# perplexity:
# type: perplexity
# api_key: ""
# default_model: sonar-pro
#
# xai:
# type: xai
# api_key: ""
# default_model: grok-3
#
# cohere:
# type: cohere
# api_key: ""
# default_model: command-r-plus
#
# azure:
# type: azure-openai
# api_key: ""
# base_url: https://YOUR_RESOURCE.openai.azure.com
# default_model: gpt-4o
#
# ollama:
# type: ollama
# base_url: http://localhost:11434/v1
# default_model: llama3.2
#
# lmstudio:
# type: lmstudio
# base_url: http://localhost:1234/v1
defaults:
rate_limit:
requests_per_minute: 60
requests_per_hour: 1000
requests_per_day: 10000
quota:
max_input_tokens_per_day: 1000000
max_output_tokens_per_day: 500000
max_requests_per_day: 1000
max_input_tokens: 1000000
max_output_tokens: 8192
database:
path: ./data/gateway.db
logging:
level: info
file: ./logs/gateway.log