-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.example
More file actions
157 lines (126 loc) · 5.96 KB
/
.env.example
File metadata and controls
157 lines (126 loc) · 5.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# Application Settings
DEBUG=True
APP_ENV=development
# docker build args
NEXT_PUBLIC_API_URL="http://localhost:8000/api"
NEXT_PUBLIC_DEBUG_STREAM=false
AGENT_RECURSION_LIMIT=30
# CORS settings
# Comma-separated list of allowed origins for CORS requests
# Example: ALLOWED_ORIGINS=http://localhost:3000,http://example.com
ALLOWED_ORIGINS=http://localhost:3000
# Enable or disable MCP server configuration, the default is false.
# Please enable this feature before securing your front-end and back-end in a managed environment.
# Otherwise, you system could be compromised.
ENABLE_MCP_SERVER_CONFIGURATION=false
# Enable or disable PYTHON_REPL configuration, the default is false.
# Please enable this feature before securing your in a managed environment.
# Otherwise, you system could be compromised.
ENABLE_PYTHON_REPL=false
# Search Engine, Supported values: tavily (recommended), duckduckgo, brave_search, arxiv, searx
SEARCH_API=tavily
TAVILY_API_KEY=tvly-xxx
# SEARX_HOST=xxx # Required only if SEARCH_API is searx.(compatible with both Searx and SearxNG)
# BRAVE_SEARCH_API_KEY=xxx # Required only if SEARCH_API is brave_search
# JINA_API_KEY=jina_xxx # Optional, default is None
# Optional, RAG provider
# RAG_PROVIDER=vikingdb_knowledge_base
# VIKINGDB_KNOWLEDGE_BASE_API_URL="api-knowledgebase.mlp.cn-beijing.volces.com"
# VIKINGDB_KNOWLEDGE_BASE_API_AK="AKxxx"
# VIKINGDB_KNOWLEDGE_BASE_API_SK=""
# VIKINGDB_KNOWLEDGE_BASE_RETRIEVAL_SIZE=15
# RAG_PROVIDER=ragflow
# RAGFLOW_API_URL="http://localhost:9388"
# RAGFLOW_API_KEY="ragflow-xxx"
# RAGFLOW_RETRIEVAL_SIZE=10
# RAGFLOW_CROSS_LANGUAGES=English,Chinese,Spanish,French,German,Japanese,Korean # Optional. To use RAGFlow's cross-language search, please separate each language with a single comma
# RAG_PROVIDER=dify
# DIFY_API_URL="https://api.dify.ai/v1"
# DIFY_API_KEY="dataset-xxx"
# MOI is a hybrid database that mainly serves enterprise users (https://www.matrixorigin.io/matrixone-intelligence)
# RAG_PROVIDER=moi
# MOI_API_URL="https://cluster.matrixonecloud.cn"
# MOI_API_KEY="xxx-xxx-xxx-xxx"
# MOI_RETRIEVAL_SIZE=10
# MOI_LIST_LIMIT=10
# RAG_PROVIDER: milvus (using free milvus instance on zilliz cloud: https://docs.zilliz.com/docs/quick-start )
# RAG_PROVIDER=milvus
# MILVUS_URI=<endpoint_of_self_hosted_milvus_or_zilliz_cloud>
# MILVUS_USER=<username_of_self_hosted_milvus_or_zilliz_cloud>
# MILVUS_PASSWORD=<password_of_self_hosted_milvus_or_zilliz_cloud>
# MILVUS_COLLECTION=documents
# MILVUS_EMBEDDING_PROVIDER=openai # support openai,dashscope
# MILVUS_EMBEDDING_BASE_URL=
# MILVUS_EMBEDDING_MODEL=
# MILVUS_EMBEDDING_API_KEY=
# MILVUS_AUTO_LOAD_EXAMPLES=true
# RAG_PROVIDER: milvus (using milvus lite on Mac or Linux)
# RAG_PROVIDER=milvus
# MILVUS_URI=./milvus_demo.db
# MILVUS_COLLECTION=documents
# MILVUS_EMBEDDING_PROVIDER=openai # support openai,dashscope
# MILVUS_EMBEDDING_BASE_URL=
# MILVUS_EMBEDDING_MODEL=
# MILVUS_EMBEDDING_API_KEY=
# MILVUS_AUTO_LOAD_EXAMPLES=true
# Optional, volcengine TTS for generating podcast
VOLCENGINE_TTS_APPID=xxx
VOLCENGINE_TTS_ACCESS_TOKEN=xxx
# VOLCENGINE_TTS_CLUSTER=volcano_tts # Optional, default is volcano_tts
# VOLCENGINE_TTS_VOICE_TYPE=BV700_V2_streaming # Optional, default is BV700_V2_streaming
# Option, for langsmith tracing and monitoring
# LANGSMITH_TRACING=true
# LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
# LANGSMITH_API_KEY="xxx"
# LANGSMITH_PROJECT="xxx"
# [!NOTE]
# For model settings and other configurations, please refer to `docs/configuration_guide.md`
# Option, for langgraph mongodb checkpointer
# Enable LangGraph checkpoint saver, supports MongoDB, Postgres
#LANGGRAPH_CHECKPOINT_SAVER=true
# Set the database URL for saving checkpoints
#LANGGRAPH_CHECKPOINT_DB_URL=mongodb://localhost:27017/
#LANGGRAPH_CHECKPOINT_DB_URL=postgresql://localhost:5432/postgres
# When using the local dockerized Postgres service (`checkpoint-db`)
#POSTGRES_DB=checkpointing_db
#POSTGRES_USER=postgres
#POSTGRES_PASSWORD=postgres
#POSTGRES_HOST_PORT=5433
# Backend running on host (uv run server.py)
#LANGGRAPH_CHECKPOINT_DB_URL=postgresql://postgres:postgres@localhost:${POSTGRES_HOST_PORT:-5433}/checkpointing_db
# Backend running inside docker compose backend service
#LANGGRAPH_CHECKPOINT_DB_URL=postgresql://postgres:postgres@checkpoint-db:5432/checkpointing_db
# Langfuse configuration
# LANGFUSE_PUBLIC_KEY = your_langfuse_public_key
# LANGFUSE_SECRET_KEY = your_langfuse_secret_key
# LANGFUSE_HOST = https://cloud.langfuse.com
# Working directory for Deep Agent
DEEP_AGENT_WORKDIR =
# -------------------------------------------------------------------------
## DEEPAGENT CONFIGURATION ##
# -------------------------------------------------------------------------
# OpenAI
OPENAI_API_KEY="sk-xxx"
DEEP_AGENT_PROVIDER="openai"
# or DEEP_AGENT_LLM_TYPE=deepagent_openai
DEEPAGENT_MODEL__model_name= "gpt-4.1-nano"
DEEPAGENT_MODEL__temperature=0.2
# # CLAUDE
# ANTHROPIC_API_KEY= "sk-xxx"
# DEEP_AGENT_PROVIDER=anthropic # or DEEP_AGENT_LLM_TYPE=deepagent
# DEEPAGENT_MODEL__model_name= "clauude-xx"
# DEEPAGENT_MODEL__temperature=0.2
# DEEPSEEK
# DEEPSEEK_API_KEY = "sk-xxx"
# DEEP_AGENT_PROVIDER=deepseek
# # or DEEP_AGENT_LLM_TYPE=deepagent_deepseek
# DEEPAGENT_MODEL__model_name= "deepseek-chat"
# DEEPAGENT_MODEL__base_url=https://api.deepseek.com
# DEEPAGENT_MODEL__temperature=0.2
# -------------------------------------------------------------------------
# TOKEN LIMIT SETTINGS
# -------------------------------------------------------------------------
# DEEPAGENT_MODEL__token_limit = 150000 #The total context window you want the orchestration stack to assume for that model.
# We pass this value into the ContextManager and summarisation middleware to decide how aggressively to compress history, how many recent messages to keep, etc
# DEEPAGENT_MODEL__max_tokens = 32000 # The completion length (tokens the model may generate per response).
# We forward this directly to the LLM client (ChatOpenAI/ChatDeepSeek/etc.) as max_tokens so the model doesn’t over-generate on a single call.