┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ OpenWebUI │ │ n8n │ │ SearxNG │
│ (Main UI) │◄──►│ (Orchestrator) │◄──►│ (Web Search) │
└─────────────────┘ └─────────────────┘ └─────────────────┘
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ Qdrant │ │ PostgreSQL │ │ Redis │
│ (Vector Store) │ │ (Database) │ │ (Cache) │
└─────────────────┘ └─────────────────┘ └─────────────────┘
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ ClickHouse │ │ MinIO │ │ Langfuse │
│ (Analytics) │ │ (Storage) │ │ (Observability) │
└─────────────────┘ └─────────────────┘ └─────────────────┘
- Containerization: Docker + Docker Compose
- AI Framework: OpenWebUI + Ollama
- Orchestration: n8n
- Vector Database: Qdrant
- Relational DB: PostgreSQL
- Cache: Redis
- Analytics: ClickHouse
- Storage: MinIO (S3-compatible)
- Observability: Langfuse
# Required tools
docker --version # 24.0+
docker compose version
git --version
curl --version
# Optional development tools
code # VS Code
docker-compose # Alternative to docker compose# Clone repository
git clone https://github.com/FlowTech-Lab/FlowTech-AI.git
cd FlowTech-AI
# Create development environment
cp .env.example .env.dev
# Edit .env.dev with development settings
# Start development stack
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d# docker-compose.dev.yml
version: '3.8'
services:
openwebui:
volumes:
- ./src:/app/src # Mount source code
environment:
- DEBUG=true
- LOG_LEVEL=debug# Custom pipeline example
def custom_pipeline(input_data):
# Process input
processed = process_input(input_data)
# Call n8n webhook
response = call_n8n_webhook(processed)
# Return result
return format_response(response)// Custom function example
function processDocument(document) {
// Extract text
const text = extractText(document);
// Process with AI
const analysis = analyzeText(text);
// Store in Qdrant
storeInQdrant(analysis);
return analysis;
}from qdrant_client import QdrantClient
# Initialize client
client = QdrantClient(host="qdrant", port=6333)
# Create collection
client.create_collection(
collection_name="documents",
vectors_config={"size": 384, "distance": "Cosine"}
)
# Insert vectors
client.upsert(
collection_name="documents",
points=[
{
"id": 1,
"vector": [0.1, 0.2, 0.3, ...],
"payload": {"text": "document content"}
}
]
)# Authentication
curl -H "Authorization: Bearer $API_KEY" \
http://localhost:8081/api/v1/chat
# Chat completion
curl -X POST http://localhost:8081/api/v1/chat \
-H "Content-Type: application/json" \
-d '{
"model": "qwen2.5:7b",
"messages": [{"role": "user", "content": "Hello"}]
}'# Trigger workflow
curl -X POST http://localhost:5678/webhook/trigger-name \
-H "Content-Type: application/json" \
-d '{"input": "data"}'
# Get workflow status
curl http://localhost:5678/api/v1/executions/{execution_id}from langfuse import Langfuse
# Initialize client
langfuse = Langfuse(
public_key="lf_pk_...",
secret_key="lf_sk_...",
host="http://localhost:3300"
)
# Create trace
trace = langfuse.trace(
name="chat-completion",
input={"user_input": "Hello world"}
)
# Create generation
generation = trace.generation(
name="openai-chat-completion",
model="qwen2.5:7b",
input={"messages": [...]},
output={"content": "AI response"}
)-- Main tables created by n8n
CREATE TABLE workflow_entity (
id SERIAL PRIMARY KEY,
name VARCHAR(255),
active BOOLEAN DEFAULT false,
created_at TIMESTAMP DEFAULT NOW()
);
CREATE TABLE execution_entity (
id SERIAL PRIMARY KEY,
workflow_id INTEGER REFERENCES workflow_entity(id),
mode VARCHAR(50),
started_at TIMESTAMP DEFAULT NOW()
);-- Langfuse analytics tables
CREATE TABLE traces (
id String,
timestamp DateTime64(3),
name String,
input String,
output String,
user_id String
) ENGINE = MergeTree()
ORDER BY (timestamp, id);- Define service in
docker-compose.yml - Configure networking in
flow-ai-network - Add environment variables to
.env - Update init.sh for service initialization
- Add health checks and monitoring
# Pipeline structure
class CustomPipeline:
def __init__(self, config):
self.config = config
self.qdrant_client = QdrantClient()
def process(self, input_data):
# 1. Preprocess input
processed = self.preprocess(input_data)
# 2. Generate embeddings
embeddings = self.generate_embeddings(processed)
# 3. Search vector database
results = self.search_vectors(embeddings)
# 4. Generate response
response = self.generate_response(results)
return response// n8n custom node
export class CustomNode implements INodeType {
description: INodeTypeDescription = {
displayName: 'Custom Node',
name: 'customNode',
icon: 'file:custom.svg',
group: ['transform'],
version: 1,
description: 'Custom processing node',
defaults: {
name: 'Custom Node',
},
inputs: ['main'],
outputs: ['main'],
properties: [
{
displayName: 'Input Field',
name: 'inputField',
type: 'string',
default: '',
description: 'Input field description',
},
],
};
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
// Node execution logic
return [items];
}
}# Run service tests
docker compose exec openwebui python -m pytest tests/
docker compose exec n8n npm test
# Integration tests
./scripts/test-integration.sh# Test API endpoints
./scripts/load-test.sh
# Monitor performance
docker stats# Service health checks
./scripts/health-check.sh
# Performance metrics
curl http://localhost:3300/api/public/health # Langfuse
curl http://localhost:6333/health # Qdrant# Build production images
docker compose -f docker-compose.prod.yml build
# Deploy to production
docker compose -f docker-compose.prod.yml up -d# Development
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
# Staging
docker compose -f docker-compose.yml -f docker-compose.staging.yml up -d
# Production
docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d# Service logs
docker compose logs -f [service-name]
# Application logs
tail -f logs/init-*.log
# System logs
journalctl -u docker.service -f# Resource usage
docker stats
# Network monitoring
docker compose exec [service] netstat -tuln
# Database performance
docker compose exec postgres psql -c "SELECT * FROM pg_stat_activity;"# Container shell access
docker compose exec [service] /bin/bash
# Database access
docker compose exec postgres psql -U n8n -d n8n
# Redis CLI
docker compose exec redis redis-cli
# Qdrant admin
curl http://localhost:6333/dashboard- Python: Follow PEP 8
- JavaScript: Use ESLint configuration
- Docker: Multi-stage builds, minimal images
- Documentation: Markdown with clear examples
# Feature development
git checkout -b feature/new-feature
git commit -m "Add: New feature implementation"
git push origin feature/new-feature
# Create pull request
# After review and approval, merge to main- Version bump in
docker-compose.yml - Update changelog in
CHANGELOG.md - Create release tag
- Deploy to production
Next Steps: See ARCHITECTURE.md for detailed technical specifications.