Skip to content

Commit 845d9b5

Browse files
committed
feat(litellm): migrate litellm wrapper to integrations API
Move the LiteLLM tracing implementation from wrappers/litellm.py into the integrations API under integrations/litellm/. This replaces the manual LiteLLMWrapper class approach with FunctionWrapperPatcher subclasses that use wrapt's wrap_function_wrapper for idempotent monkey-patching. Key changes: - Add integrations/litellm/ with tracing.py, patchers.py, and integration.py following the standard integrations API pattern - Add wrap_litellm() helper that instruments a specific litellm module object using the patchers, complementing the existing patch_litellm() which patches the globally-imported module - Consolidate patcher list into _ALL_PATCHERS, shared by both LiteLLMIntegration.patchers and wrap_litellm() - Reduce wrappers/litellm.py to compatibility re-exports - Move tests and cassettes to integrations/litellm/ - Add cassettes_dir parameter to autoinstrument_test_context
1 parent 0b717a9 commit 845d9b5

34 files changed

Lines changed: 770 additions & 836 deletions

py/noxfile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def test_litellm(session, version):
243243
# Install fastapi and orjson as they're required by litellm for proxy/responses operations
244244
session.install("openai<=1.99.9", "--force-reinstall", "fastapi", "orjson")
245245
_install(session, "litellm", version)
246-
_run_tests(session, f"{WRAPPER_DIR}/test_litellm.py")
246+
_run_tests(session, f"{INTEGRATION_DIR}/litellm/test_litellm.py")
247247
_run_core_tests(session)
248248

249249

py/src/braintrust/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,9 @@ def is_equal(expected, output):
7373
from .integrations.anthropic import (
7474
wrap_anthropic, # noqa: F401 # type: ignore[reportUnusedImport]
7575
)
76+
from .integrations.litellm import (
77+
wrap_litellm, # noqa: F401 # type: ignore[reportUnusedImport]
78+
)
7679
from .logger import *
7780
from .logger import (
7881
_internal_get_global_state, # noqa: F401 # type: ignore[reportUnusedImport]
@@ -92,9 +95,6 @@ def is_equal(expected, output):
9295
BT_IS_ASYNC_ATTRIBUTE, # noqa: F401 # type: ignore[reportUnusedImport]
9396
MarkAsyncWrapper, # noqa: F401 # type: ignore[reportUnusedImport]
9497
)
95-
from .wrappers.litellm import (
96-
wrap_litellm, # noqa: F401 # type: ignore[reportUnusedImport]
97-
)
9898
from .wrappers.pydantic_ai import (
9999
setup_pydantic_ai, # noqa: F401 # type: ignore[reportUnusedImport]
100100
)

py/src/braintrust/auto.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
ClaudeAgentSDKIntegration,
1515
DSPyIntegration,
1616
GoogleGenAIIntegration,
17+
LiteLLMIntegration,
1718
)
1819

1920

@@ -116,7 +117,7 @@ def auto_instrument(
116117
if anthropic:
117118
results["anthropic"] = _instrument_integration(AnthropicIntegration)
118119
if litellm:
119-
results["litellm"] = _instrument_litellm()
120+
results["litellm"] = _instrument_integration(LiteLLMIntegration)
120121
if pydantic_ai:
121122
results["pydantic_ai"] = _instrument_pydantic_ai()
122123
if google_genai:
@@ -147,14 +148,6 @@ def _instrument_integration(integration) -> bool:
147148
return False
148149

149150

150-
def _instrument_litellm() -> bool:
151-
with _try_patch():
152-
from braintrust.wrappers.litellm import patch_litellm
153-
154-
return patch_litellm()
155-
return False
156-
157-
158151
def _instrument_pydantic_ai() -> bool:
159152
with _try_patch():
160153
from braintrust.wrappers.pydantic_ai import setup_pydantic_ai

py/src/braintrust/integrations/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from .claude_agent_sdk import ClaudeAgentSDKIntegration
55
from .dspy import DSPyIntegration
66
from .google_genai import GoogleGenAIIntegration
7+
from .litellm import LiteLLMIntegration
78

89

910
__all__ = [
@@ -13,4 +14,5 @@
1314
"ClaudeAgentSDKIntegration",
1415
"DSPyIntegration",
1516
"GoogleGenAIIntegration",
17+
"LiteLLMIntegration",
1618
]

py/src/braintrust/integrations/auto_test_scripts/test_auto_litellm.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,29 @@
11
"""Test auto_instrument for LiteLLM."""
22

3+
from pathlib import Path
4+
35
import litellm
46
from braintrust.auto import auto_instrument
7+
from braintrust.integrations.litellm import LiteLLMIntegration
58
from braintrust.wrappers.test_utils import autoinstrument_test_context
69

710

11+
_CASSETTES_DIR = Path(__file__).resolve().parent.parent / "litellm" / "cassettes"
12+
813
# 1. Verify not patched initially
9-
assert not hasattr(litellm, "_braintrust_wrapped")
14+
assert not LiteLLMIntegration.patchers[0].is_patched(litellm, None)
1015

1116
# 2. Instrument
1217
results = auto_instrument()
1318
assert results.get("litellm") == True
14-
assert hasattr(litellm, "_braintrust_wrapped")
19+
assert LiteLLMIntegration.patchers[0].is_patched(litellm, None)
1520

1621
# 3. Idempotent
1722
results2 = auto_instrument()
1823
assert results2.get("litellm") == True
1924

2025
# 4. Make API call and verify span
21-
with autoinstrument_test_context("test_auto_litellm") as memory_logger:
26+
with autoinstrument_test_context("test_auto_litellm", cassettes_dir=_CASSETTES_DIR) as memory_logger:
2227
response = litellm.completion(
2328
model="gpt-4o-mini",
2429
messages=[{"role": "user", "content": "Say hi"}],

py/src/braintrust/integrations/auto_test_scripts/test_patch_litellm_aresponses.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,20 @@
11
"""Test that patch_litellm() patches aresponses."""
22

33
import asyncio
4+
from pathlib import Path
45

56
import litellm
6-
from braintrust.wrappers.litellm import patch_litellm
7+
from braintrust.integrations.litellm import patch_litellm
78
from braintrust.wrappers.test_utils import autoinstrument_test_context
89

910

11+
_CASSETTES_DIR = Path(__file__).resolve().parent.parent / "litellm" / "cassettes"
12+
1013
patch_litellm()
1114

1215

1316
async def main():
14-
with autoinstrument_test_context("test_patch_litellm_aresponses") as memory_logger:
17+
with autoinstrument_test_context("test_patch_litellm_aresponses", cassettes_dir=_CASSETTES_DIR) as memory_logger:
1518
response = await litellm.aresponses(
1619
model="gpt-4o-mini",
1720
input="What's 12 + 12?",

py/src/braintrust/integrations/auto_test_scripts/test_patch_litellm_responses.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,17 @@
11
"""Test that patch_litellm() patches responses."""
22

3+
from pathlib import Path
4+
35
import litellm
4-
from braintrust.wrappers.litellm import patch_litellm
6+
from braintrust.integrations.litellm import patch_litellm
57
from braintrust.wrappers.test_utils import autoinstrument_test_context
68

79

10+
_CASSETTES_DIR = Path(__file__).resolve().parent.parent / "litellm" / "cassettes"
11+
812
patch_litellm()
913

10-
with autoinstrument_test_context("test_patch_litellm_responses") as memory_logger:
14+
with autoinstrument_test_context("test_patch_litellm_responses", cassettes_dir=_CASSETTES_DIR) as memory_logger:
1115
response = litellm.responses(
1216
model="gpt-4o-mini",
1317
input="What's 12 + 12?",

py/src/braintrust/integrations/dspy/tracing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class BraintrustDSpyCallback(BaseCallback):
5050
and disable DSPy's disk cache:
5151
5252
```python
53-
from braintrust.wrappers.litellm import patch_litellm
53+
from braintrust.integrations.litellm import patch_litellm
5454
patch_litellm()
5555
5656
import dspy
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
"""Braintrust LiteLLM integration."""
2+
3+
from .integration import LiteLLMIntegration
4+
from .patchers import wrap_litellm
5+
6+
7+
def patch_litellm() -> bool:
8+
"""Patch LiteLLM to add Braintrust tracing.
9+
10+
This wraps litellm.completion, litellm.acompletion, litellm.responses,
11+
litellm.aresponses, litellm.embedding, and litellm.moderation to
12+
automatically create Braintrust spans with detailed token metrics,
13+
timing, and costs.
14+
15+
Returns:
16+
True if LiteLLM was patched (or already patched), False if LiteLLM is not installed.
17+
18+
Example:
19+
```python
20+
import braintrust
21+
braintrust.patch_litellm()
22+
23+
import litellm
24+
from braintrust import init_logger
25+
26+
logger = init_logger(project="my-project")
27+
response = litellm.completion(
28+
model="gpt-4o-mini",
29+
messages=[{"role": "user", "content": "Hello"}]
30+
)
31+
```
32+
"""
33+
return LiteLLMIntegration.setup()
34+
35+
36+
__all__ = [
37+
"LiteLLMIntegration",
38+
"patch_litellm",
39+
"wrap_litellm",
40+
]

py/src/braintrust/wrappers/cassettes/test_auto_litellm.yaml renamed to py/src/braintrust/integrations/litellm/cassettes/test_auto_litellm.yaml

File renamed without changes.

0 commit comments

Comments
 (0)