Quick reference for common operations and commands
# Setup
./setup.sh # Automated setup
python verify_setup.py # Verify installation
python ava_prime_integration.py # Run demo
# Daily Operations
python codessa_sync_daemon.py # Sync Notion ↔ GitHub
python health_check.py # System health
python generate_briefing.py # Morning briefing
# Testing
pytest -v # Run all tests
pytest --cov # With coverage
pytest -m "not slow" # Skip slow tests
# Deployment
python deploy_v2.py --environment staging --enable-features all# Essential
NOTION_API_KEY=secret_xxx
GITHUB_TOKEN=ghp_xxx
ANTHROPIC_API_KEY=sk-ant_xxx
# Configuration
ENVIRONMENT=development|staging|production
DEBUG=true|false
LOG_LEVEL=DEBUG|INFO|WARNING|ERROR
# Feature Flags
ENABLE_RBAC=true
ENABLE_AUDIT_LOGGING=true
ENABLE_RATE_LIMITING=true| Role | Execute Commands | AI Prompts | Modify Data | View Only |
|---|---|---|---|---|
| Admin | ✅ | ✅ | ✅ | ✅ |
| Developer | ❌ | ✅ | ✅ | ✅ |
| Viewer | ❌ | ❌ | ❌ | ✅ |
# Check user permissions
from ava_prime_integration import get_user
user = get_user("user@example.com")
print(user.has_permission(Permission.EXECUTE_COMMANDS))
# Validate command before execution
from ava_prime_integration import CommandSandbox
sandbox = CommandSandbox(config)
sandbox.validate_command("python3", ["script.py"])
# Sanitize AI prompt
from ava_prime_integration import PromptSanitizer
safe_prompt = PromptSanitizer.build_safe_prompt(template, variables)
# Validate AI response
from ava_prime_integration import ResponseValidator
validated = ResponseValidator.validate_code_review(response)# View recent events
tail -n 100 logs/audit.log
# Search for failed operations
grep '"success": false' logs/audit.log
# Find permission denials
grep "permission_denied" logs/audit.log
# User activity
grep '"user_email": "admin@example.com"' logs/audit.log
# Critical failures
grep '"action_type": "external_command_execution"' logs/audit.log | grep 'false'template = """Review this code:
---CODE_START---
{code_content}
---CODE_END---
Provide JSON:
{
"ecl_score": 0.0-1.0,
"recommended_status": "string",
"strengths": ["string"],
"weaknesses": ["string"]
}
"""template = """Generate briefing for {date}
Data: {intelligence_streams}
Format:
## Executive Summary
## Key Themes
## Recommended Actions
"""# Execute with retry
from ava_prime_integration import WorkflowEngine
engine = WorkflowEngine(max_retries=3, retry_delay=60)
result = engine.execute_with_retry(
your_function,
"Workflow Name",
**kwargs
)
if result["status"] == "success":
print(f"Completed in {result['duration_ms']}ms")
else:
print(f"Failed: {result['error']}")# Use cache decorator
from ava_prime_integration import cache
@cache.cached(ttl_seconds=300) # 5 minutes
def expensive_operation(param):
return compute_result(param)
# Manual caching
cache.set("key", value, ttl_seconds=3600)
result = cache.get("key")
# Invalidate cache
cache.invalidate("key")
cache.clear() # Clear all# Check rate limit
from ava_prime_integration import rate_limiter
if rate_limiter.check_rate_limit(user_id, "ai_prompts"):
# Execute operation
pass
else:
raise RateLimitExceeded("Too many requests")
# Get remaining quota
remaining = rate_limiter.get_remaining(user_id, "ai_prompts")# System health
python -c "from ava_prime_integration import HealthMonitor;
monitor = HealthMonitor(); print(monitor.run_all_checks())"
# Check API connectivity
curl -H "Authorization: Bearer $NOTION_API_KEY" \
https://api.notion.com/v1/users/me
# Database status
python -c "from ava_prime_integration import check_database; check_database()"
# Cache statistics
python -c "from ava_prime_integration import cache;
print(f'Size: {cache.get_size_mb()}MB, Hits: {cache.hit_count}')"from ava_prime_integration import rate_limiter
rate_limiter.reset_user("user_id")rm -rf cache/*
python -c "from ava_prime_integration import cache; cache.clear()"python -c "from ava_prime_integration import AuditLogger;
logger = AuditLogger(); logger.generate_report('2025-11-01', '2025-11-11')"from ava_prime_integration import retry_failed_workflows
retry_failed_workflows(max_age_hours=24)import logging
logging.basicConfig(level=logging.DEBUG)import http.client
http.client.HTTPConnection.debuglevel = 1import cProfile
cProfile.run('your_function()')import tracemalloc
tracemalloc.start()
# ... your code ...
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
for stat in top_stats[:10]:
print(stat)# Increase cache size
CACHE_MAX_SIZE_MB=500
# Adjust batch size
BATCH_SIZE=100
# Tune rate limits
RATE_LIMIT_AI_PROMPTS_PER_HOUR=20
RATE_LIMIT_COMMANDS_PER_HOUR=10
# Worker pool size
MAX_WORKERS=10pkill -f codessa_sync_daemon
pkill -f ava_primeBACKUP_DIR="backups/production/$(ls -t backups/production | head -1)"
cp $BACKUP_DIR/*.json .
cp $BACKUP_DIR/.env .# 1. Revoke in provider dashboard (Notion/GitHub)
# 2. Generate new keys
# 3. Update .env
nano .env
# 4. Restart services
./restart.shtail -f logs/audit.log | grep '"success": false'- Emergency: security@codessa.ai
- Support: support@codessa.ai
- GitHub Issues: https://github.com/codessian/codessian-cortex/issues
-
Always test in development first
ENVIRONMENT=development python your_script.py
-
Use feature flags for gradual rollout
ENABLE_NEW_FEATURE=true # Test first -
Monitor audit logs regularly
watch -n 60 'tail -20 logs/audit.log' -
Keep backups recent
# Add to crontab 0 */6 * * * cd /path/to/ava-prime && ./backup.sh
-
Update dependencies monthly
pip list --outdated pip install --upgrade -r requirements.txt
Quick Help: Run python -c "from ava_prime_integration import show_help; show_help()"
Last Updated: 2025-11-11