make shell
# OR
cd python && ./scripts/run-txzshell.shCreates ~/.txzshell/config.yaml automatically.
# Use Groq (fast, free tier available)
export GROQ_API_KEY=gsk_your_key_here
make shell -- --provider groq
# Use OpenAI
export OPENAI_API_KEY=sk_your_key_here
make shell -- --provider openai
# Use Anthropic Claude
export ANTHROPIC_API_KEY=sk_ant_your_key_here
make shell -- --provider anthropic
# Default: Ollama (local)
make shellLocation: ~/.txzshell/config.yaml
llm:
active_provider: ollama # Change to: groq, openai, anthropic, etc.
providers:
ollama:
model: qwen2.5-coder:3b
api_base: http://localhost:11434
groq:
model: llama-3.1-70b-versatile
api_key: null # Set via GROQ_API_KEY env var
openai:
model: gpt-4
api_key: null # Set via OPENAI_API_KEY env var
anthropic:
model: claude-3-5-sonnet-20241022
api_key: null # Set via ANTHROPIC_API_KEY env var# Show current configuration
python -m src.txzshell_standalone --show-config
# List supported providers
python -m src.txzshell_standalone --list-providers
# Use specific provider (one-time)
python -m src.txzshell_standalone --provider groq
# Initialize/reset config file
python -m src.txzshell_standalone --init-config# Override provider
export LLM_PROVIDER=groq
# Provider-specific API keys
export GROQ_API_KEY=gsk_...
export OPENAI_API_KEY=sk_...
export ANTHROPIC_API_KEY=sk_ant_...
# Ollama settings
export OLLAMA_BASE_URL=http://localhost:11434
export OLLAMA_MODEL=qwen2.5-coder:3b| Provider | Speed | Cost | Best For |
|---|---|---|---|
| Ollama | Medium | Free | Privacy, offline, local dev |
| Groq | Very Fast | Free tier | Fast responses, dev/testing |
| OpenAI | Fast | $$ | Production, complex tasks |
| Anthropic | Fast | $$ | Long context, analysis |
llama-3.1-70b-versatile- Fast, balanced (recommended)llama-3.1-8b-instant- Very fast, smallermixtral-8x7b-32768- Long context
gpt-4- Most capablegpt-4-turbo- Faster GPT-4gpt-3.5-turbo- Fast, cheap
claude-3-5-sonnet-20241022- Latest, best (recommended)claude-3-opus-20240229- Highest intelligenceclaude-3-haiku-20240307- Fast, efficient
qwen2.5-coder:3b- Fast, code-focused (default)llama3.1:8b- General purposecodellama:7b- Code generation
# Get free API key from https://console.groq.com
export GROQ_API_KEY=gsk_your_key_here
# Edit config to use Groq by default
nano ~/.txzshell/config.yaml
# Change: active_provider: groq
# Run
make shellexport OPENAI_API_KEY=sk_your_key_here
python -m src.txzshell_standalone --provider openai# Install Ollama
curl -fsSL https://ollama.ai/install.sh | sh
# Start Ollama
ollama serve
# Pull model
ollama pull qwen2.5-coder:3b
# Run TxzShell (uses Ollama by default)
make shell# Groq with different model
nano ~/.txzshell/config.yaml
# Under groq: model: llama-3.1-8b-instant
# OpenAI with GPT-3.5
nano ~/.txzshell/config.yaml
# Under openai: model: gpt-3.5-turbo# Check if key is set
echo $GROQ_API_KEY
# Set it
export GROQ_API_KEY=gsk_your_key_here
# Or add to config
nano ~/.txzshell/config.yaml
# Add: api_key: gsk_your_key_here# Check if running
curl http://localhost:11434/api/tags
# Start Ollama
ollama serve
# Or use cloud provider instead
make shell -- --provider groq# Create default config
python -m src.txzshell_standalone --init-config- Never commit API keys to git
- Use environment variables for API keys:
# Add to ~/.bashrc or ~/.zshrc export GROQ_API_KEY=gsk_... export OPENAI_API_KEY=sk_...
- Protect config file:
chmod 600 ~/.txzshell/config.yaml
- Groq - Free tier with rate limits
- Ollama - Completely free (local)
- OpenAI GPT-3.5 - 10x cheaper than GPT-4
- Claude Haiku - Anthropic's cheapest model
- Groq Llama 8B - Free tier, fast
# Fast & cheap (development)
active_provider: groq
temperature: 0.5
max_tokens: 500
# Accurate & expensive (production)
active_provider: openai
temperature: 0.7
max_tokens: 2000