-
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathconfig.example.toml
More file actions
195 lines (175 loc) · 8.25 KB
/
config.example.toml
File metadata and controls
195 lines (175 loc) · 8.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
# RustFox Configuration
[telegram]
# Get your bot token from @BotFather on Telegram
bot_token = "YOUR_TELEGRAM_BOT_TOKEN"
# Only these Telegram user IDs can interact with the bot
# Find your user ID by messaging @userinfobot on Telegram
allowed_user_ids = [123456789]
[openrouter]
# Get your API key from https://openrouter.ai/keys
api_key = "YOUR_OPENROUTER_API_KEY"
# Model to use (see https://openrouter.ai/models)
model = "moonshotai/kimi-k2.5"
# API base URL (usually no need to change)
base_url = "https://openrouter.ai/api/v1"
# Maximum tokens in response
max_tokens = 4096
# System prompt for the AI assistant
system_prompt = """You are a helpful AI assistant with access to tools. \
Use the available tools to help the user with their tasks. \
When using file or terminal tools, operate only within the allowed sandbox directory. \
Be concise and helpful."""
[sandbox]
# The directory where file operations and command execution are allowed
# The bot cannot access files outside this directory
allowed_directory = "/tmp/rustfox-sandbox"
[memory]
# Path to the SQLite database file for persistent memory
# Stores conversations, knowledge base, and vector embeddings
database_path = "rustfox.db"
# Query rewriting for memory search (optional; default: false)
# When enabled, ambiguous follow-up questions are rewritten into self-contained
# search queries using an LLM call before the RAG vector search.
# This improves recall accuracy but adds one extra LLM round-trip per message.
# Can also be toggled per-user at runtime via the /query-rewrite Telegram command.
# query_rewriter_enabled = false
[skills]
# Directory containing skill markdown files
# Skills are natural-language instructions loaded at startup
directory = "skills"
[general]
# Your location, injected into the system prompt so the AI knows your timezone/region
# Uncomment and set to your city/region (e.g. "Tokyo, Japan")
# location = "Tokyo, Japan"
# Agent loop (optional; defaults apply if section omitted)
# [agent]
# max_iterations = 25 # Agent loop cap (default 25)
# LangSmith observability (optional)
# Traces every LLM call and tool execution for debugging in the LangSmith UI.
# Get your API key at https://smith.langchain.com → Settings → API Keys
# [langsmith]
# api_key = "ls__..."
# project = "rustfox" # LangSmith project name (default: "default")
# Embedding API for vector search (optional)
# When configured, enables hybrid vector + FTS5 search for memory.
# Without this, falls back to FTS5 keyword search only.
# Works with any OpenAI-compatible /v1/embeddings endpoint.
# [embedding]
# api_key = "YOUR_OPENROUTER_API_KEY"
# base_url = "https://openrouter.ai/api/v1"
# model = "qwen/qwen3-embedding-8b"
# dimensions = 1536
# MCP Server Configurations
# Each [[mcp_servers]] block defines an MCP server to connect to
# The bot will discover and register tools from each server
# Example: Git MCP server (requires uvx/npx)
# [[mcp_servers]]
# name = "git"
# command = "uvx"
# args = ["mcp-server-git"]
# Example: Filesystem MCP server
# [[mcp_servers]]
# name = "filesystem"
# command = "npx"
# args = ["-y", "@modelcontextprotocol/server-filesystem", "/tmp/rustfox-sandbox"]
# Example: Google Workspace MCP server (Gmail, Calendar, Drive, Docs, Sheets, Slides)
# Note: use --from because the package name differs from the executable name
#
# OAuth setup:
# 1. https://console.cloud.google.com/apis/credentials
# → Create/select project → enable Drive, Gmail, Calendar, Docs, Sheets, Slides APIs
# 2. OAuth consent screen → External → add your email as a test user
# 3. Credentials → Create OAuth Client ID → Web application
# → add Authorised redirect URI: https://developers.google.com/oauthplayground
# → save your Client ID and Client Secret
# 4. Open https://developers.google.com/oauthplayground
# → gear icon → check "Use your own OAuth credentials" → enter Client ID + Secret
# → add scopes: https://www.googleapis.com/auth/drive
# https://www.googleapis.com/auth/gmail.modify
# https://www.googleapis.com/auth/calendar
# https://www.googleapis.com/auth/documents
# https://www.googleapis.com/auth/spreadsheets
# https://www.googleapis.com/auth/presentations
# → "Authorize APIs" → sign in → "Exchange authorization code for tokens"
# → copy the Refresh token value
#
# [[mcp_servers]]
# name = "google-workspace"
# command = "uvx"
# args = ["--from", "google-workspace-mcp", "google-workspace-worker"]
# [mcp_servers.env]
# GOOGLE_WORKSPACE_CLIENT_ID = "your-client-id.apps.googleusercontent.com"
# GOOGLE_WORKSPACE_CLIENT_SECRET = "your-client-secret"
# GOOGLE_WORKSPACE_REFRESH_TOKEN = "your-refresh-token" # from step 4 above
# GOOGLE_WORKSPACE_ENABLED_CAPABILITIES = '["drive","docs","gmail","calendar","sheets","slides"]'
# Example: Meta Threads MCP server (publish posts, read replies, analytics)
#
# Token setup:
# 1. Go to https://developers.facebook.com/apps → Create App → Business type
# → add the "Threads API" product from the dashboard
# 2. Under Threads API → Permissions → request:
# threads_basic, threads_content_publish,
# threads_manage_replies, threads_read_replies
# Add your Threads account as a Test User under App Roles → Roles
# 3. Under Threads API → Access Tokens → Generate Token for your test user
# → copy the long-lived token (valid ~60 days; regenerate before expiry)
#
# [[mcp_servers]]
# name = "threads"
# command = "npx"
# args = ["-y", "threads-mcp-server"]
# [mcp_servers.env]
# THREADS_ACCESS_TOKEN = "your-long-lived-access-token"
# Example: Web search MCP server with environment variables
# [[mcp_servers]]
# name = "brave-search"
# command = "npx"
# args = ["-y", "@brave/brave-search-mcp-server"]
# [mcp_servers.env]
# BRAVE_API_KEY = "your-brave-api-key"
# ── HTTP-based MCP Servers (Streamable HTTP transport) ────────────────────────
# These servers are reached over HTTPS and do not require a local command.
# Use `url` instead of `command`; optionally set `auth_token` for Bearer auth.
# Example: Notion MCP server (official HTTP MCP — no Node.js required)
# Docs: https://developers.notion.com/guides/mcp/mcp
#
# Recommended: use the setup wizard (cargo run --bin setup) to obtain an OAuth
# access token via the built-in OAuth 2.0 flow. The wizard also stores the
# refresh token and expiry so the bot can automatically renew the connection.
#
# The bot refreshes the access token automatically when it is within 5 minutes
# of expiry and writes the new credentials back to this file. Manual setup:
# 1. Create a Notion integration at https://www.notion.so/my-integrations
# and note your client_id and client_secret.
# 2. Complete the OAuth flow to obtain an access_token and refresh_token.
# 3. Fill in the fields below.
#
# [[mcp_servers]]
# name = "notion"
# url = "https://mcp.notion.com/mcp"
# auth_token = "your-notion-oauth-access-token"
# refresh_token = "your-notion-oauth-refresh-token"
# token_expires_at = 1234567890 # Unix timestamp; set by the wizard
# token_endpoint = "https://api.notion.com/v1/oauth/token"
# oauth_client_id = "your-notion-client-id"
# oauth_client_secret = "your-notion-client-secret" # omit for public clients
# Example: Exa AI web search (https://mcp.exa.ai)
# Get your API key at https://dashboard.exa.ai/api-keys
#
# Option A — Bearer token (recommended; keeps the key out of URLs and logs)
# [[mcp_servers]]
# name = "exa"
# url = "https://mcp.exa.ai/mcp"
# auth_token = "your-exa-api-key"
#
# Option B — Inline API key in URL (simpler; key will appear in logs)
# [[mcp_servers]]
# name = "exa"
# url = "https://mcp.exa.ai/mcp?exaApiKey=your-exa-api-key"
# ── Self-Learning (optional; defaults apply if section omitted) ─────────────
# [learning]
# user_model_path = "memory/USER.md" # Honcho-style user model file
# skill_extraction_enabled = true # Auto-generate skills from tool-heavy tasks
# skill_extraction_threshold = 5 # Min tool calls to trigger extraction
# user_model_update_interval = 10 # Update user model every N user messages
# user_model_cron = "0 0 3 * * SUN" # Weekly user model refresh (6-field cron)