TxzShell supports flexible configuration through YAML files, environment variables, and command-line options.
TxzShell looks for configuration in ~/.txzshell/config.yaml. This file is automatically created on first run.
~/.txzshell/
├── config.yaml # Main configuration file
├── session.json # Session persistence (auto-generated)
└── history # Command history (auto-generated)
Settings are loaded in this order (highest priority first):
- Environment Variables - Override everything
- Config File (
~/.txzshell/config.yaml) - User preferences - Defaults - Built-in defaults
# TxzShell Configuration
# Location: ~/.txzshell/config.yaml
# LLM Provider Configuration
llm:
# Active provider: ollama, groq, openai, anthropic
active_provider: ollama
# Provider-specific settings
providers:
ollama:
provider: ollama
model: qwen2.5-coder:3b
api_base: http://localhost:11434
temperature: 0.7
groq:
provider: groq
model: llama-3.1-70b-versatile
api_key: null # Set via GROQ_API_KEY env var or here
temperature: 0.7
max_tokens: 8000
openai:
provider: openai
model: gpt-4
api_key: null # Set via OPENAI_API_KEY env var or here
temperature: 0.7
anthropic:
provider: anthropic
model: claude-3-5-sonnet-20241022
api_key: null # Set via ANTHROPIC_API_KEY env var or here
temperature: 0.7
# Shell Settings
shell:
session_file: ~/.txzshell/session.json
history_file: ~/.txzshell/history
max_history: 1000
max_conversation_turns: 5
show_welcome: true
auto_save: true
# Agent Settings
agent:
name: TxzShell Assistant
verbose: false
max_iterations: 10- Ollama - Local LLM server (default)
- Groq - Fast cloud inference
- OpenAI - GPT models
- Anthropic - Claude models
- Azure OpenAI - Microsoft Azure hosted models
- vLLM - High-performance inference server
- Together - Together.ai models
- Cohere - Cohere models
- HuggingFace - HuggingFace inference
llm:
active_provider: ollama
providers:
ollama:
provider: ollama
model: qwen2.5-coder:3b
api_base: http://localhost:11434
temperature: 0.7Environment Variables:
export OLLAMA_BASE_URL=http://localhost:11434
export OLLAMA_MODEL=qwen2.5-coder:3b
export LLM_PROVIDER=ollamallm:
active_provider: groq
providers:
groq:
provider: groq
model: llama-3.1-70b-versatile
api_key: gsk_... # Your Groq API key
temperature: 0.7
max_tokens: 8000Environment Variables:
export GROQ_API_KEY=gsk_your_api_key_here
export LLM_PROVIDER=groq
export GROQ_MODEL=llama-3.1-70b-versatileAvailable Groq Models:
llama-3.1-70b-versatile- Fast, balanced (recommended)llama-3.1-8b-instant- Very fast, smallermixtral-8x7b-32768- Good for long contextgemma-7b-it- Fast, efficient
llm:
active_provider: openai
providers:
openai:
provider: openai
model: gpt-4
api_key: sk-... # Your OpenAI API key
temperature: 0.7Environment Variables:
export OPENAI_API_KEY=sk_your_api_key_here
export LLM_PROVIDER=openai
export OPENAI_MODEL=gpt-4Available OpenAI Models:
gpt-4- Most capablegpt-4-turbo- Faster GPT-4gpt-3.5-turbo- Fast, cost-effective
llm:
active_provider: anthropic
providers:
anthropic:
provider: anthropic
model: claude-3-5-sonnet-20241022
api_key: sk-ant-... # Your Anthropic API key
temperature: 0.7Environment Variables:
export ANTHROPIC_API_KEY=sk_ant_your_api_key_here
export LLM_PROVIDER=anthropic
export ANTHROPIC_MODEL=claude-3-5-sonnet-20241022Available Anthropic Models:
claude-3-5-sonnet-20241022- Most capable (recommended)claude-3-opus-20240229- Highest intelligenceclaude-3-sonnet-20240229- Balancedclaude-3-haiku-20240307- Fast, efficient
# Override active provider
export LLM_PROVIDER=groq
# Provider-specific
export OLLAMA_BASE_URL=http://localhost:11434
export OLLAMA_MODEL=qwen2.5-coder:3b
export GROQ_API_KEY=gsk_your_key
export GROQ_MODEL=llama-3.1-70b-versatile
export OPENAI_API_KEY=sk_your_key
export OPENAI_MODEL=gpt-4
export ANTHROPIC_API_KEY=sk_ant_your_key
export ANTHROPIC_MODEL=claude-3-5-sonnet-20241022export AGENT_NAME="My Custom Agent"# Show current configuration
python -m src.txzshell_standalone --show-config# List all supported providers
python -m src.txzshell_standalone --list-providers# Use specific provider (overrides config file)
python -m src.txzshell_standalone --provider groq# Create/reset default config file
python -m src.txzshell_standalone --init-config# Install and run Ollama
curl -fsSL https://ollama.ai/install.sh | sh
ollama serve
# Pull a model
ollama pull qwen2.5-coder:3b
# Run TxzShell (uses default Ollama config)
make shell# Get API key from https://console.groq.com
export GROQ_API_KEY=gsk_your_api_key_here
# Edit config file
nano ~/.txzshell/config.yaml
# Change: active_provider: groq
# Add your API key to groq.api_key
# Or use environment variable
export LLM_PROVIDER=groq
# Run with Groq
make shell# Get API key from https://platform.openai.com
export OPENAI_API_KEY=sk_your_api_key_here
# Run with OpenAI
python -m src.txzshell_standalone --provider openai# Normally use Ollama, but try Groq for one session
export GROQ_API_KEY=gsk_your_key
python -m src.txzshell_standalone --provider groq- Never commit API keys to version control
- Use environment variables for API keys rather than config file
- Set restrictive permissions on config file:
chmod 600 ~/.txzshell/config.yaml
# For faster responses (lower quality)
temperature: 0.3
max_tokens: 500
# For better quality (slower)
temperature: 0.8
max_tokens: 2000
# Limit agent iterations for faster responses
agent:
max_iterations: 5# Use cheaper models for development
llm:
active_provider: groq # Free tier available
# Or use smaller models
providers:
openai:
model: gpt-3.5-turbo # Much cheaper than GPT-4# Initialize default config
python -m src.txzshell_standalone --init-config# Check if API key is set
echo $GROQ_API_KEY
# Verify in config
python -m src.txzshell_standalone --show-config# List supported providers
python -m src.txzshell_standalone --list-providers
# Test with different provider
python -m src.txzshell_standalone --provider ollama# Check if Ollama is running
curl http://localhost:11434/api/tags
# Start Ollama if needed
ollama servellm:
providers:
custom_vllm:
provider: vllm
model: my-custom-model
api_base: http://custom-server:8000
temperature: 0.7
extra_params:
top_p: 0.9
presence_penalty: 0.5Create different config files for different use cases:
# Work config (uses company OpenAI key)
cp ~/.txzshell/config.yaml ~/.txzshell/config-work.yaml
# Personal config (uses Ollama)
cp ~/.txzshell/config.yaml ~/.txzshell/config-personal.yaml
# Use specific config
export CONFIG_FILE=~/.txzshell/config-work.yaml
python -m src.txzshell_standalone