-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy path.env.example
More file actions
33 lines (26 loc) · 1.57 KB
/
.env.example
File metadata and controls
33 lines (26 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# LLM Processing Configuration
LLM_PROVIDER=openrouter # Options: openrouter, local
LLM_PROCESSING_ENABLED=true
# OpenRouter Configuration (Recommended for cloud AI models)
OPENROUTER_API_KEY=your_openrouter_api_key_here
OPENROUTER_MODEL=anthropic/claude-3-haiku # Options: please check https://openrouter.ai/models
# Local LLM Configuration (Alternative to OpenRouter)
LOCAL_LLM_URL=http://localhost:11434 # Ollama: http://localhost:11434, LM Studio: http://localhost:1234 (auto-adds /v1)
LOCAL_LLM_MODEL=gemma3n # Model name for Ollama (LM Studio uses whatever model is loaded in its UI)
LOCAL_LLM_TYPE=ollama # Options: ollama, lmstudio
# Local LLM Context Configuration (Important for performance!)
LOCAL_LLM_MAX_CONTEXT=4096 # Maximum context tokens for your local model (e.g., 4096, 8192, 32768)
LOCAL_LLM_OPTIMAL_CHUNK=1024 # Optimal chunk size for batching (typically 1/4 of max context)
# LLM Timeout Configuration (Important for local models!)
LLM_TIMEOUT_SECONDS=120.0 # Default timeout for all LLM requests in seconds
LOCAL_LLM_TIMEOUT_SECONDS=600.0 # Override timeout for local LLM requests (10 minutes for slower local models)
# Quality Filtering
LLM_QUALITY_THRESHOLD=0.6 # Minimum quality score for practices (0.0-1.0, higher = stricter)
# Set to 0.0 to disable quality filtering for faster processing
# Scraping Configuration
SCRAPING_INTERVAL_HOURS=6 # For scheduled/periodic scraping (not yet implemented in CLI)
MAX_POSTS_PER_SCRAPE=100
SCRAPING_DELAY_SECONDS=2 # Delay between scraping pages/posts (be respectful to servers)
# Logging
LOG_LEVEL=INFO
LOG_FORMAT=pretty