|
1 | 1 | PYTHON_DIRS = tests examples scripts eval_protocol |
| 2 | +PY ?= uv run python |
2 | 3 |
|
3 | 4 | .PHONY: clean build dist upload test lint typecheck format release sync-docs version tag-version show-version bump-major bump-minor bump-patch full-release quick-release |
| 5 | +## ----------------------------- |
| 6 | +## Local Langfuse + LiteLLM E2E |
| 7 | +## ----------------------------- |
| 8 | + |
| 9 | +.PHONY: local-install local-langfuse-up local-langfuse-up-local local-langfuse-wait local-litellm-up local-litellm-smoke local-adapter-smoke local-generate-traces local-generate-chinook local-eval local-eval-fireworks-only local-quick-run |
| 10 | + |
| 11 | +local-install: |
| 12 | + uv pip install -e ".[langfuse]" |
| 13 | + |
| 14 | +# 1) Start Langfuse per official docs (run from Langfuse repo). Here we just export env. |
| 15 | +local-langfuse-up: |
| 16 | + @echo "Ensure you started Langfuse via docker compose as per docs." |
| 17 | + @echo "Docs: https://langfuse.com/self-hosting/deployment/docker-compose" |
| 18 | + @echo "Exporting LANGFUSE env vars for SDK..." |
| 19 | + LANGFUSE_PUBLIC_KEY=$${LANGFUSE_PUBLIC_KEY:-local}; \ |
| 20 | + LANGFUSE_SECRET_KEY=$${LANGFUSE_SECRET_KEY:-local}; \ |
| 21 | + LANGFUSE_HOST=$${LANGFUSE_HOST:-http://localhost:3000}; \ |
| 22 | + printf "LANGFUSE_PUBLIC_KEY=%s\nLANGFUSE_SECRET_KEY=%s\nLANGFUSE_HOST=%s\n" $$LANGFUSE_PUBLIC_KEY $$LANGFUSE_SECRET_KEY $$LANGFUSE_HOST |
| 23 | + |
| 24 | +# Start Langfuse using local compose file |
| 25 | +local-langfuse-up-local: |
| 26 | + docker compose -f examples/local_langfuse_litellm_ollama/langfuse-docker-compose.yml up -d |
| 27 | + |
| 28 | +# Wait until Langfuse UI responds |
| 29 | +local-langfuse-wait: |
| 30 | + LANGFUSE_HOST=$${LANGFUSE_HOST:-http://localhost:3000}; \ |
| 31 | + echo "Waiting for $$LANGFUSE_HOST ..."; \ |
| 32 | + for i in $$(seq 1 60); do \ |
| 33 | + code=$$(curl -s -o /dev/null -w "%{http_code}" $$LANGFUSE_HOST); \ |
| 34 | + if [ "$$code" = "200" ] || [ "$$code" = "302" ]; then echo "Langfuse is up (HTTP $$code)"; exit 0; fi; \ |
| 35 | + sleep 2; \ |
| 36 | + done; \ |
| 37 | + echo "Langfuse did not become ready in time."; exit 1 |
| 38 | + |
| 39 | +# 2) Start LiteLLM router (requires litellm installed). Keep foreground. |
| 40 | +local-litellm-up: |
| 41 | + LITELLM_API_KEY=$${LITELLM_API_KEY:-local-demo-key}; \ |
| 42 | + printf "LITELLM_API_KEY=%s\n" $$LITELLM_API_KEY; \ |
| 43 | + LITELLM_API_KEY=$$LITELLM_API_KEY uv run litellm --config examples/local_langfuse_litellm_ollama/litellm-config.yaml --port 4000 |
| 44 | + |
| 45 | +# 2b) Smoke test LiteLLM endpoints |
| 46 | +local-litellm-smoke: |
| 47 | + @test -n "$$LITELLM_API_KEY" || (echo "LITELLM_API_KEY not set" && exit 1) |
| 48 | + curl -s -H "Authorization: Bearer $$LITELLM_API_KEY" http://127.0.0.1:4000/v1/models | head -n 5 | cat |
| 49 | + curl -s \ |
| 50 | + -H "Authorization: Bearer $$LITELLM_API_KEY" \ |
| 51 | + -H "Content-Type: application/json" \ |
| 52 | + http://127.0.0.1:4000/v1/chat/completions \ |
| 53 | + -d '{"model":"ollama/llama3.1","messages":[{"role":"user","content":"Say hi"}]}' \ |
| 54 | + | head -n 40 | cat |
| 55 | + |
| 56 | +# 3) Seed one trace into Langfuse |
| 57 | + |
| 58 | +# 4) Adapter smoke test (fetch 1 row) |
| 59 | +local-adapter-smoke: |
| 60 | + LANGFUSE_HOST=$${LANGFUSE_HOST:-http://localhost:3000}; \ |
| 61 | + code=$$(curl -s -o /dev/null -w "%{http_code}" $$LANGFUSE_HOST); \ |
| 62 | + if [ "$$code" != "200" ] && [ "$$code" != "302" ]; then \ |
| 63 | + echo "Langfuse not reachable at $$LANGFUSE_HOST (HTTP $$code). Start it per docs."; \ |
| 64 | + exit 1; \ |
| 65 | + fi; \ |
| 66 | + LANGFUSE_PUBLIC_KEY=$${LANGFUSE_PUBLIC_KEY:-local}; \ |
| 67 | + LANGFUSE_SECRET_KEY=$${LANGFUSE_SECRET_KEY:-local}; \ |
| 68 | + LANGFUSE_PUBLIC_KEY=$$LANGFUSE_PUBLIC_KEY LANGFUSE_SECRET_KEY=$$LANGFUSE_SECRET_KEY LANGFUSE_HOST=$$LANGFUSE_HOST \ |
| 69 | + $(PY) -c "from eval_protocol.adapters.langfuse import create_langfuse_adapter; a=create_langfuse_adapter(); rows=a.get_evaluation_rows(limit=1, sample_size=1); print('Fetched rows:', len(rows))" |
| 70 | + |
| 71 | +# Generate realistic traces into Langfuse (Chinook) using Fireworks models |
| 72 | +local-generate-traces: |
| 73 | + @test -n "$$FIREWORKS_API_KEY" || (echo "FIREWORKS_API_KEY not set" && exit 1) |
| 74 | + uv pip install -e ".[pydantic,fireworks,chinook]" >/dev/null || true |
| 75 | + CHINOOK_USE_STUB_DB=1 uv run pytest tests/chinook/langfuse/generate_traces.py -q |
| 76 | + |
| 77 | +# Force-run Chinook generator with stub DB and Langfuse observe |
| 78 | +local-generate-chinook: |
| 79 | + @test -n "$$FIREWORKS_API_KEY" || (echo "FIREWORKS_API_KEY not set" && exit 1) |
| 80 | + uv pip install -e ".[pydantic,fireworks,chinook]" >/dev/null || true |
| 81 | + CHINOOK_USE_STUB_DB=1 uv run pytest tests/chinook/langfuse/generate_traces.py -q |
| 82 | + |
| 83 | +# Fallback generator that does not need external DBs |
| 84 | + |
| 85 | +# 5) Run the local evaluation test (uses Fireworks as judge; requires FIREWORKS_API_KEY) |
| 86 | +local-eval: |
| 87 | + @test -n "$$FIREWORKS_API_KEY" || (echo "FIREWORKS_API_KEY not set" && exit 1) |
| 88 | + uv run pytest eval_protocol/quickstart/llm_judge_langfuse_local.py -k test_llm_judge_local -q |
| 89 | + |
| 90 | +# Run evaluation by calling Fireworks directly (skip LiteLLM router) |
| 91 | +local-eval-fireworks-only: |
| 92 | + @test -n "$$FIREWORKS_API_KEY" || (echo "FIREWORKS_API_KEY not set" && exit 1) |
| 93 | + uv run pytest eval_protocol/quickstart/llm_judge_langfuse_fireworks_only.py -k test_llm_judge_fireworks_only -q |
| 94 | + |
| 95 | +# One-shot: assumes Langfuse is already up externally and LiteLLM already running in another shell |
| 96 | +local-quick-run: local-seed-langfuse local-adapter-smoke local-eval |
| 97 | + @echo "Done. Check Langfuse UI for scores." |
| 98 | + |
4 | 99 |
|
5 | 100 | clean: |
6 | 101 | rm -rf build/ dist/ *.egg-info/ |
|
0 commit comments