diff --git a/.changeset/langgraph-auto-instrumentation.md b/.changeset/langgraph-auto-instrumentation.md new file mode 100644 index 000000000..b3e99bb86 --- /dev/null +++ b/.changeset/langgraph-auto-instrumentation.md @@ -0,0 +1,5 @@ +--- +"braintrust": patch +--- + +feat: Add LangChain and LangGraph auto-instrumentation diff --git a/e2e/config/pr-comment-scenarios.json b/e2e/config/pr-comment-scenarios.json index 57cd3b616..3bee9763d 100644 --- a/e2e/config/pr-comment-scenarios.json +++ b/e2e/config/pr-comment-scenarios.json @@ -61,6 +61,11 @@ { "variantKey": "huggingface-v41315", "label": "v4.13.15" } ] }, + { + "scenarioDirName": "langgraph-auto-instrumentation", + "label": "LangGraph Auto-Instrumentation", + "metadataScenario": "langgraph-auto-instrumentation" + }, { "scenarioDirName": "mistral-instrumentation", "label": "Mistral Instrumentation", diff --git a/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/log-payloads.json b/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/log-payloads.json new file mode 100644 index 000000000..e7dc08d26 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/log-payloads.json @@ -0,0 +1,664 @@ +[ + { + "_is_merge": false, + "context": { + "caller_filename": "/e2e/helpers/provider-runtime.mjs", + "caller_functionname": "runTracedScenario", + "caller_lineno": 0 + }, + "created": "", + "id": "", + "log_id": "g", + "metadata": { + "scenario": "langgraph-auto-instrumentation", + "testRunId": "" + }, + "metrics": { + "start": 0 + }, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 0, + "name": "langgraph-auto-instrumentation-root", + "type": "task" + }, + "span_id": "" + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "end": 0 + }, + "project_id": "", + "root_span_id": "", + "span_id": "" + }, + { + "_is_merge": false, + "context": { + "caller_filename": "", + "caller_functionname": "", + "caller_lineno": 0 + }, + "created": "", + "id": "", + "input": {}, + "log_id": "g", + "metadata": { + "braintrust": { + "integration_name": "langchain-js", + "integration_version": "0.2.0", + "sdk_language": "javascript" + }, + "metadata": {}, + "name": "LangGraph", + "run_id": "", + "serialized": { + "id": [ + "langgraph", + "pregel", + "CompiledStateGraph" + ], + "lc": 1, + "type": "not_implemented" + }, + "tags": [] + }, + "metrics": { + "start": 0 + }, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 1, + "name": "LangGraph", + "type": "task" + }, + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metadata": { + "tags": [] + }, + "output": { + "message": "hello from langgraph" + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "end": 0 + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": false, + "context": { + "caller_filename": "", + "caller_functionname": "", + "caller_lineno": 0 + }, + "created": "", + "id": "", + "input": { + "message": "" + }, + "log_id": "g", + "metadata": { + "braintrust": { + "integration_name": "langchain-js", + "integration_version": "0.2.0", + "sdk_language": "javascript" + }, + "metadata": { + "checkpoint_ns": "sayHello:", + "langgraph_checkpoint_ns": "sayHello:", + "langgraph_node": "sayHello", + "langgraph_path": [ + "__pregel_pull", + "sayHello" + ], + "langgraph_step": 1, + "langgraph_triggers": [ + "branch:to:sayHello" + ] + }, + "name": "sayHello", + "parent_run_id": "", + "run_id": "", + "serialized": { + "id": [ + "langchain_core", + "runnables", + "RunnableSequence" + ], + "kwargs": { + "first": { + "id": [ + "langgraph", + "RunnableCallable" + ], + "lc": 1, + "type": "not_implemented" + }, + "last": { + "id": [ + "langgraph", + "ChannelWrite" + ], + "lc": 1, + "type": "not_implemented" + }, + "middle": [ + { + "id": [ + "langgraph", + "ChannelWrite" + ], + "lc": 1, + "type": "not_implemented" + }, + { + "id": [ + "langgraph", + "RunnableCallable" + ], + "lc": 1, + "type": "not_implemented" + } + ], + "omit_sequence_tags": true + }, + "lc": 1, + "type": "constructor" + }, + "tags": [ + "graph:step:1" + ] + }, + "metrics": { + "start": 0 + }, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 2, + "name": "sayHello", + "type": "task" + }, + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metadata": { + "tags": [ + "graph:step:1" + ] + }, + "output": { + "message": "hello from langgraph" + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "end": 0 + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": false, + "context": { + "caller_filename": "", + "caller_functionname": "", + "caller_lineno": 0 + }, + "created": "", + "id": "", + "input": [ + [ + { + "id": [ + "langchain_core", + "messages", + "HumanMessage" + ], + "kwargs": { + "additional_kwargs": {}, + "content": "Reply with exactly: hello from langgraph", + "response_metadata": {} + }, + "lc": 1, + "type": "constructor" + } + ] + ], + "log_id": "g", + "metadata": { + "batch_size": 1, + "braintrust": { + "integration_name": "langchain-js", + "integration_version": "0.2.0", + "sdk_language": "javascript" + }, + "invocation_params": { + "max_tokens": 24, + "model": "gpt-4o-mini-2024-07-18", + "stream": false, + "temperature": 0 + }, + "metadata": { + "checkpoint_ns": "sayHello:", + "langgraph_checkpoint_ns": "sayHello:", + "langgraph_node": "sayHello", + "langgraph_path": [ + "__pregel_pull", + "sayHello" + ], + "langgraph_step": 1, + "langgraph_triggers": [ + "branch:to:sayHello" + ], + "ls_max_tokens": 24, + "ls_model_name": "gpt-4o-mini-2024-07-18", + "ls_model_type": "chat", + "ls_provider": "openai", + "ls_temperature": 0, + "versions": { + "@langchain/core": "", + "@langchain/openai": "" + } + }, + "options": { + "signal": {} + }, + "parent_run_id": "", + "run_id": "", + "serialized": { + "id": [ + "langchain", + "chat_models", + "openai", + "ChatOpenAI" + ], + "kwargs": { + "max_tokens": 24, + "model": "gpt-4o-mini-2024-07-18", + "openai_api_key": { + "id": [ + "OPENAI_API_KEY" + ], + "lc": 1, + "type": "secret" + }, + "temperature": 0 + }, + "lc": 1, + "type": "constructor" + }, + "tags": [] + }, + "metrics": { + "start": 0 + }, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 3, + "name": "ChatOpenAI", + "type": "llm" + }, + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "completion_tokens": "", + "prompt_cached_tokens": "", + "prompt_tokens": "", + "total_tokens": "" + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metadata": { + "model": "gpt-4o-mini-2024-07-18", + "tags": [] + }, + "output": { + "generations": [ + [ + { + "generationInfo": { + "finish_reason": "stop" + }, + "message": { + "id": [ + "langchain_core", + "messages", + "AIMessage" + ], + "kwargs": { + "additional_kwargs": {}, + "content": "", + "id": "", + "invalid_tool_calls": [], + "response_metadata": { + "finish_reason": "stop", + "model_name": "gpt-4o-mini-2024-07-18", + "model_provider": "openai", + "system_fingerprint": "", + "tokenUsage": { + "completionTokens": 4, + "promptTokens": 15, + "totalTokens": 19 + }, + "usage": { + "completion_tokens": 4, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens": 15, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + }, + "total_tokens": 19 + } + }, + "tool_calls": [], + "type": "ai", + "usage_metadata": { + "input_token_details": { + "audio": 0, + "cache_read": 0 + }, + "input_tokens": "", + "output_token_details": { + "audio": 0, + "reasoning": 0 + }, + "output_tokens": "", + "total_tokens": "" + } + }, + "lc": 1, + "type": "constructor" + }, + "text": "" + } + ] + ], + "llmOutput": { + "tokenUsage": { + "completionTokens": 4, + "promptTokens": 15, + "totalTokens": 19 + } + } + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "end": 0 + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "context": {}, + "created": "", + "id": "", + "input": [ + { + "content": "Reply with exactly: hello from langgraph", + "role": "user" + } + ], + "log_id": "g", + "metadata": { + "max_tokens": 24, + "model": "gpt-4o-mini-2024-07-18", + "provider": "openai", + "stream": false, + "temperature": 0 + }, + "metrics": { + "start": 0 + }, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 4, + "name": "Chat Completion", + "type": "llm" + }, + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "completion_accepted_prediction_tokens": "", + "completion_audio_tokens": "", + "completion_reasoning_tokens": "", + "completion_rejected_prediction_tokens": "", + "completion_tokens": "", + "prompt_audio_tokens": "", + "prompt_cached_tokens": "", + "prompt_tokens": "", + "time_to_first_token": "", + "tokens": "" + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "output": [ + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "message": { + "annotations": [], + "content": "hello from langgraph", + "refusal": null, + "role": "assistant" + } + } + ], + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "_is_merge": true, + "id": "", + "log_id": "g", + "metrics": { + "end": 0 + }, + "project_id": "", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ] + }, + { + "context": { + "caller_filename": "", + "caller_functionname": "", + "caller_lineno": 0 + }, + "created": "", + "id": "", + "input": { + "message": "hello from langgraph" + }, + "log_id": "g", + "metadata": { + "braintrust": { + "integration_name": "langchain-js", + "integration_version": "0.2.0", + "sdk_language": "javascript" + }, + "metadata": { + "checkpoint_ns": "sayBye:", + "langgraph_checkpoint_ns": "sayBye:", + "langgraph_node": "sayBye", + "langgraph_path": [ + "__pregel_pull", + "sayBye" + ], + "langgraph_step": 2, + "langgraph_triggers": [ + "branch:to:sayBye" + ] + }, + "name": "sayBye", + "parent_run_id": "", + "run_id": "", + "serialized": { + "id": [ + "langchain_core", + "runnables", + "RunnableSequence" + ], + "kwargs": { + "first": { + "id": [ + "langgraph", + "RunnableCallable" + ], + "lc": 1, + "type": "not_implemented" + }, + "last": { + "id": [ + "langgraph", + "RunnableCallable" + ], + "lc": 1, + "type": "not_implemented" + }, + "middle": [ + { + "id": [ + "langgraph", + "ChannelWrite" + ], + "lc": 1, + "type": "not_implemented" + } + ], + "omit_sequence_tags": true + }, + "lc": 1, + "type": "constructor" + }, + "tags": [ + "graph:step:2" + ] + }, + "metrics": { + "end": 0, + "start": 0 + }, + "output": {}, + "project_id": "", + "root_span_id": "", + "span_attributes": { + "exec_counter": 5, + "name": "sayBye", + "type": "task" + }, + "span_id": "", + "span_parents": [ + "" + ] + } +] diff --git a/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/span-events.json b/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/span-events.json new file mode 100644 index 000000000..8d21c2de7 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/__snapshots__/span-events.json @@ -0,0 +1,74 @@ +[ + { + "has_input": false, + "has_output": false, + "metadata": { + "scenario": "langgraph-auto-instrumentation" + }, + "metric_keys": [], + "name": "langgraph-auto-instrumentation-root", + "root_span_id": "", + "span_id": "", + "span_parents": [], + "type": "task" + }, + { + "has_input": true, + "has_output": true, + "metadata": null, + "metric_keys": [], + "name": "LangGraph", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ], + "type": "task" + }, + { + "has_input": true, + "has_output": false, + "metadata": null, + "metric_keys": [], + "name": "sayHello", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ], + "type": "task" + }, + { + "has_input": true, + "has_output": true, + "metadata": { + "model": "gpt-4o-mini-2024-07-18" + }, + "metric_keys": [ + "completion_tokens", + "prompt_cached_tokens", + "prompt_tokens", + "total_tokens" + ], + "name": "ChatOpenAI", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ], + "type": "llm" + }, + { + "has_input": true, + "has_output": true, + "metadata": null, + "metric_keys": [], + "name": "sayBye", + "root_span_id": "", + "span_id": "", + "span_parents": [ + "" + ], + "type": "task" + } +] diff --git a/e2e/scenarios/langgraph-auto-instrumentation/assertions.ts b/e2e/scenarios/langgraph-auto-instrumentation/assertions.ts new file mode 100644 index 000000000..0346abb61 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/assertions.ts @@ -0,0 +1,220 @@ +import { expect } from "vitest"; +import { normalizeForSnapshot, type Json } from "../../helpers/normalize"; +import type { + CapturedLogEvent, + CapturedLogPayload, +} from "../../helpers/mock-braintrust-server"; +import { findChildSpans, findLatestSpan } from "../../helpers/trace-selectors"; +import { + payloadRowsForRootSpan, + summarizeWrapperContract, +} from "../../helpers/wrapper-contract"; +import { ROOT_NAME, SCENARIO_NAME } from "./constants.mjs"; + +function findDescendantSpan( + events: CapturedLogEvent[], + name: string, + ancestorId: string | undefined, + predicate: (event: CapturedLogEvent) => boolean = () => true, +): CapturedLogEvent | undefined { + if (!ancestorId) { + return undefined; + } + + const queue = [ancestorId]; + const visited = new Set(); + + while (queue.length > 0) { + const current = queue.shift(); + if (!current || visited.has(current)) { + continue; + } + visited.add(current); + + for (const event of events) { + if (!event.span.parentIds.includes(current)) { + continue; + } + if (event.span.name === name && predicate(event)) { + return event; + } + if (event.span.id) { + queue.push(event.span.id); + } + } + } + + return undefined; +} + +function normalizeLangGraphPayloadRows(rows: unknown[]): unknown[] { + return rows.map((row) => { + if (!row || typeof row !== "object") { + return row; + } + + const normalized = structuredClone(row) as Record; + normalizeTokenMetrics(normalized.metrics); + normalizeLLMOutput(normalized.output); + normalizeLangchainMetadata(normalized); + return normalized; + }); +} + +const LANGCHAIN_LS_VOLATILE_KEYS = new Set([ + "max_tokens", + "model", + "stream", + "stream_options", + "temperature", +]); + +function normalizeLangchainMetadata(value: unknown): void { + if (!value || typeof value !== "object") { + return; + } + + if (Array.isArray(value)) { + for (const item of value) { + normalizeLangchainMetadata(item); + } + return; + } + + const record = value as Record; + delete record.__pregel_task_id; + delete record.ls_integration; + + if ( + record.versions && + typeof record.versions === "object" && + !Array.isArray(record.versions) + ) { + const versions = record.versions as Record; + for (const [key, version] of Object.entries(versions)) { + if (key.startsWith("@langchain/") && typeof version === "string") { + versions[key] = ""; + } + } + } + + const hasLsKey = Object.keys(record).some((key) => key.startsWith("ls_")); + if (hasLsKey) { + for (const key of LANGCHAIN_LS_VOLATILE_KEYS) { + delete record[key]; + } + } + + for (const nested of Object.values(record)) { + normalizeLangchainMetadata(nested); + } +} + +function normalizeTokenMetrics(value: unknown): void { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return; + } + + const metrics = value as Record; + for (const [key, metricValue] of Object.entries(metrics)) { + if (key.includes("token") && typeof metricValue === "number") { + metrics[key] = ""; + } + } +} + +function normalizeLLMOutput(value: unknown): void { + if (!value || typeof value !== "object") { + return; + } + + if (Array.isArray(value)) { + for (const item of value) { + normalizeLLMOutput(item); + } + return; + } + + const record = value as Record; + if (typeof record.text === "string") { + record.text = ""; + } + + const kwargs = record.kwargs; + if (kwargs && typeof kwargs === "object" && !Array.isArray(kwargs)) { + const kwargsRecord = kwargs as Record; + if (typeof kwargsRecord.content === "string") { + kwargsRecord.content = ""; + } + normalizeTokenMetrics(kwargsRecord.usage_metadata); + } + + for (const nested of Object.values(record)) { + normalizeLLMOutput(nested); + } +} + +export function assertLangGraphAutoInstrumentation(options: { + capturedEvents: CapturedLogEvent[]; + payloads: CapturedLogPayload[]; +}): { payloadSummary: Json; spanSummary: Json } { + const root = findLatestSpan(options.capturedEvents, ROOT_NAME); + expect(root).toBeDefined(); + expect(root?.row.metadata).toMatchObject({ + scenario: SCENARIO_NAME, + }); + + const graphSpan = findChildSpans( + options.capturedEvents, + "LangGraph", + root?.span.id, + )[0]; + expect(graphSpan).toBeDefined(); + expect(graphSpan?.span.type).toBe("task"); + + const sayHelloSpan = findDescendantSpan( + options.capturedEvents, + "sayHello", + graphSpan?.span.id, + ); + expect(sayHelloSpan).toBeDefined(); + expect(sayHelloSpan?.span.type).toBe("task"); + + const sayByeSpan = findDescendantSpan( + options.capturedEvents, + "sayBye", + graphSpan?.span.id, + ); + expect(sayByeSpan).toBeDefined(); + expect(sayByeSpan?.span.type).toBe("task"); + + const llmSpan = findDescendantSpan( + options.capturedEvents, + "ChatOpenAI", + sayHelloSpan?.span.id, + (event) => + typeof event.metrics?.completion_tokens === "number" && + typeof event.metrics?.prompt_tokens === "number" && + typeof event.metrics?.total_tokens === "number", + ); + expect(llmSpan).toBeDefined(); + expect(llmSpan?.span.type).toBe("llm"); + expect(llmSpan?.metrics).toMatchObject({ + completion_tokens: expect.any(Number), + prompt_tokens: expect.any(Number), + total_tokens: expect.any(Number), + }); + + return { + spanSummary: normalizeForSnapshot( + [root, graphSpan, sayHelloSpan, llmSpan, sayByeSpan].map((event) => + summarizeWrapperContract(event!, ["model", "scenario"]), + ) as Json, + ), + payloadSummary: normalizeForSnapshot( + normalizeLangGraphPayloadRows( + payloadRowsForRootSpan(options.payloads, root?.span.id), + ) as Json, + ), + }; +} diff --git a/e2e/scenarios/langgraph-auto-instrumentation/constants.mjs b/e2e/scenarios/langgraph-auto-instrumentation/constants.mjs new file mode 100644 index 000000000..0dded58b9 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/constants.mjs @@ -0,0 +1,2 @@ +export const ROOT_NAME = "langgraph-auto-instrumentation-root"; +export const SCENARIO_NAME = "langgraph-auto-instrumentation"; diff --git a/e2e/scenarios/langgraph-auto-instrumentation/package.json b/e2e/scenarios/langgraph-auto-instrumentation/package.json new file mode 100644 index 000000000..a1c948718 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/package.json @@ -0,0 +1,18 @@ +{ + "name": "@braintrust/e2e-langgraph-auto-instrumentation", + "private": true, + "braintrustScenario": { + "canary": { + "dependencies": { + "@langchain/core": "latest", + "@langchain/langgraph": "latest", + "@langchain/openai": "latest" + } + } + }, + "dependencies": { + "@langchain/core": "1.1.35", + "@langchain/langgraph": "1.0.7", + "@langchain/openai": "1.3.0" + } +} diff --git a/e2e/scenarios/langgraph-auto-instrumentation/pnpm-lock.yaml b/e2e/scenarios/langgraph-auto-instrumentation/pnpm-lock.yaml new file mode 100644 index 000000000..4c91a9297 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/pnpm-lock.yaml @@ -0,0 +1,277 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@langchain/core': + specifier: 1.1.35 + version: 1.1.35(openai@6.34.0(zod@4.3.6)) + '@langchain/langgraph': + specifier: 1.0.7 + version: 1.0.7(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6)))(zod@4.3.6) + '@langchain/openai': + specifier: 1.3.0 + version: 1.3.0(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6))) + +packages: + + '@cfworker/json-schema@4.1.1': + resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} + + '@langchain/core@1.1.35': + resolution: {integrity: sha512-TM0idLSAx17IEMqEIN7F8MDdXpN53p4A4vhhjZxf7LedwShB/8Zx1F5D5Nq1Bbn0zh9QDzSHpWyfCrTtggKf7g==} + engines: {node: '>=20'} + + '@langchain/langgraph-checkpoint@1.0.1': + resolution: {integrity: sha512-HM0cJLRpIsSlWBQ/xuDC67l52SqZ62Bh2Y61DX+Xorqwoh5e1KxYvfCD7GnSTbWWhjBOutvnR0vPhu4orFkZfw==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': ^1.0.1 + + '@langchain/langgraph-sdk@1.3.1': + resolution: {integrity: sha512-zTi7DZHwqtMEzapvm3I1FL4Q7OZsxtq9tTXy6s2gcCxyIU3sphqRboqytqVN7dNHLdTCLb8nXy49QKurs2MIBg==} + peerDependencies: + '@langchain/core': ^1.0.1 + react: ^18 || ^19 + react-dom: ^18 || ^19 + peerDependenciesMeta: + '@langchain/core': + optional: true + react: + optional: true + react-dom: + optional: true + + '@langchain/langgraph@1.0.7': + resolution: {integrity: sha512-EBGqNOWoRiEoLUaeuiXRpUM8/DE6QcwiirNyd97XhezStebBoTTilWH8CUt6S94JRGl5zwfBBRHfzotDnZS/eA==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': ^1.0.1 + zod: ^3.25.32 || ^4.1.0 + zod-to-json-schema: ^3.x + peerDependenciesMeta: + zod-to-json-schema: + optional: true + + '@langchain/openai@1.3.0': + resolution: {integrity: sha512-FDsF6xKCvFduiZcX57fL2Md+DZ+fJubcUN1iwUaEwJOQnq7zFFYj3a/KuQ7EiOFR3hEsnhPilSfxO1VW85wMZw==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.1.33 + + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + + '@types/retry@0.12.0': + resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} + + ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + + eventemitter3@4.0.7: + resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} + + js-tiktoken@1.0.21: + resolution: {integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==} + + langsmith@0.5.22: + resolution: {integrity: sha512-ed/Qi65m/yB+D13u+Y49IutbODmzOZfZQX+RT+vRIYb6FoI3Z3E4uQK2UIXuPbQpnqPcvG/MqUP2Mq55wVzE7g==} + peerDependencies: + '@opentelemetry/api': '*' + '@opentelemetry/exporter-trace-otlp-proto': '*' + '@opentelemetry/sdk-trace-base': '*' + openai: '*' + ws: '>=7' + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@opentelemetry/exporter-trace-otlp-proto': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + openai: + optional: true + ws: + optional: true + + mustache@4.2.0: + resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} + hasBin: true + + openai@6.34.0: + resolution: {integrity: sha512-yEr2jdGf4tVFYG6ohmr3pF6VJuveP0EA/sS8TBx+4Eq5NT10alu5zg2dmxMXMgqpihRDQlFGpRt2XwsGj+Fyxw==} + hasBin: true + peerDependencies: + ws: ^8.18.0 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + ws: + optional: true + zod: + optional: true + + p-finally@1.0.0: + resolution: {integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==} + engines: {node: '>=4'} + + p-queue@6.6.2: + resolution: {integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==} + engines: {node: '>=8'} + + p-retry@4.6.2: + resolution: {integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==} + engines: {node: '>=8'} + + p-timeout@3.2.0: + resolution: {integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==} + engines: {node: '>=8'} + + retry@0.13.1: + resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} + engines: {node: '>= 4'} + + uuid@10.0.0: + resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} + hasBin: true + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + uuid@9.0.1: + resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + hasBin: true + + zod@4.3.6: + resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + +snapshots: + + '@cfworker/json-schema@4.1.1': {} + + '@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6))': + dependencies: + '@cfworker/json-schema': 4.1.1 + '@standard-schema/spec': 1.1.0 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.5.22(openai@6.34.0(zod@4.3.6)) + mustache: 4.2.0 + p-queue: 6.6.2 + uuid: 11.1.0 + zod: 4.3.6 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws + + '@langchain/langgraph-checkpoint@1.0.1(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6)))': + dependencies: + '@langchain/core': 1.1.35(openai@6.34.0(zod@4.3.6)) + uuid: 10.0.0 + + '@langchain/langgraph-sdk@1.3.1(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6)))': + dependencies: + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 9.0.1 + optionalDependencies: + '@langchain/core': 1.1.35(openai@6.34.0(zod@4.3.6)) + + '@langchain/langgraph@1.0.7(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6)))(zod@4.3.6)': + dependencies: + '@langchain/core': 1.1.35(openai@6.34.0(zod@4.3.6)) + '@langchain/langgraph-checkpoint': 1.0.1(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6))) + '@langchain/langgraph-sdk': 1.3.1(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6))) + uuid: 10.0.0 + zod: 4.3.6 + transitivePeerDependencies: + - react + - react-dom + + '@langchain/openai@1.3.0(@langchain/core@1.1.35(openai@6.34.0(zod@4.3.6)))': + dependencies: + '@langchain/core': 1.1.35(openai@6.34.0(zod@4.3.6)) + js-tiktoken: 1.0.21 + openai: 6.34.0(zod@4.3.6) + zod: 4.3.6 + transitivePeerDependencies: + - ws + + '@standard-schema/spec@1.1.0': {} + + '@types/retry@0.12.0': {} + + ansi-styles@5.2.0: {} + + base64-js@1.5.1: {} + + camelcase@6.3.0: {} + + decamelize@1.2.0: {} + + eventemitter3@4.0.7: {} + + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + + langsmith@0.5.22(openai@6.34.0(zod@4.3.6)): + dependencies: + p-queue: 6.6.2 + uuid: 10.0.0 + optionalDependencies: + openai: 6.34.0(zod@4.3.6) + + mustache@4.2.0: {} + + openai@6.34.0(zod@4.3.6): + optionalDependencies: + zod: 4.3.6 + + p-finally@1.0.0: {} + + p-queue@6.6.2: + dependencies: + eventemitter3: 4.0.7 + p-timeout: 3.2.0 + + p-retry@4.6.2: + dependencies: + '@types/retry': 0.12.0 + retry: 0.13.1 + + p-timeout@3.2.0: + dependencies: + p-finally: 1.0.0 + + retry@0.13.1: {} + + uuid@10.0.0: {} + + uuid@11.1.0: {} + + uuid@9.0.1: {} + + zod@4.3.6: {} diff --git a/e2e/scenarios/langgraph-auto-instrumentation/scenario.mjs b/e2e/scenarios/langgraph-auto-instrumentation/scenario.mjs new file mode 100644 index 000000000..88b8cb94c --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/scenario.mjs @@ -0,0 +1,61 @@ +import { HumanMessage } from "@langchain/core/messages"; +import { Annotation, END, START, StateGraph } from "@langchain/langgraph"; +import { ChatOpenAI } from "@langchain/openai"; +import { runMain, runTracedScenario } from "../../helpers/provider-runtime.mjs"; +import { ROOT_NAME, SCENARIO_NAME } from "./constants.mjs"; + +const OPENAI_MODEL = "gpt-4o-mini-2024-07-18"; + +runMain(async () => { + if (!process.env.OPENAI_API_KEY) { + throw new Error("OPENAI_API_KEY is required for this e2e scenario"); + } + + await runTracedScenario({ + callback: async () => { + const GraphState = Annotation.Root({ + message: Annotation({ + reducer: (_, value) => value, + default: () => "", + }), + }); + + const model = new ChatOpenAI({ + model: OPENAI_MODEL, + maxTokens: 24, + temperature: 0, + }); + + async function sayHello() { + const response = await model.invoke([ + new HumanMessage("Reply with exactly: hello from langgraph"), + ]); + + return { + message: typeof response.content === "string" ? response.content : "", + }; + } + + function sayBye() { + return {}; + } + + const graph = new StateGraph(GraphState) + .addNode("sayHello", sayHello) + .addNode("sayBye", sayBye) + .addEdge(START, "sayHello") + .addEdge("sayHello", "sayBye") + .addEdge("sayBye", END) + .compile(); + + await graph.invoke({}); + }, + flushCount: 2, + flushDelayMs: 100, + metadata: { + scenario: SCENARIO_NAME, + }, + projectNameBase: "e2e-langgraph-auto-instrumentation", + rootName: ROOT_NAME, + }); +}); diff --git a/e2e/scenarios/langgraph-auto-instrumentation/scenario.test.ts b/e2e/scenarios/langgraph-auto-instrumentation/scenario.test.ts new file mode 100644 index 000000000..4b0b9f3e7 --- /dev/null +++ b/e2e/scenarios/langgraph-auto-instrumentation/scenario.test.ts @@ -0,0 +1,55 @@ +import { expect, test } from "vitest"; +import { + formatJsonFileSnapshot, + resolveFileSnapshotPath, +} from "../../helpers/file-snapshot"; +import { + prepareScenarioDir, + resolveScenarioDir, + withScenarioHarness, +} from "../../helpers/scenario-harness"; +import { assertLangGraphAutoInstrumentation } from "./assertions"; + +const scenarioDir = await prepareScenarioDir({ + scenarioDir: resolveScenarioDir(import.meta.url), +}); +const TIMEOUT_MS = 120_000; + +test( + "langgraph auto-instrumentation captures spans via the braintrust hook", + { + timeout: TIMEOUT_MS, + }, + async () => { + if (!process.env.OPENAI_API_KEY) { + throw new Error("OPENAI_API_KEY is required for this e2e scenario"); + } + + await withScenarioHarness( + async ({ events, payloads, runNodeScenarioDir }) => { + await runNodeScenarioDir({ + entry: "scenario.mjs", + nodeArgs: ["--import", "braintrust/hook.mjs"], + scenarioDir, + timeoutMs: TIMEOUT_MS, + }); + + const summaries = assertLangGraphAutoInstrumentation({ + capturedEvents: events(), + payloads: payloads(), + }); + + await expect( + formatJsonFileSnapshot(summaries.spanSummary), + ).toMatchFileSnapshot( + resolveFileSnapshotPath(import.meta.url, "span-events.json"), + ); + await expect( + formatJsonFileSnapshot(summaries.payloadSummary), + ).toMatchFileSnapshot( + resolveFileSnapshotPath(import.meta.url, "log-payloads.json"), + ); + }, + ); + }, +); diff --git a/js/src/auto-instrumentations/configs/langchain.test.ts b/js/src/auto-instrumentations/configs/langchain.test.ts new file mode 100644 index 000000000..51e2ea528 --- /dev/null +++ b/js/src/auto-instrumentations/configs/langchain.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it } from "vitest"; +import { langChainChannels } from "../../instrumentation/plugins/langchain-channels"; +import { langchainConfigs } from "./langchain"; + +describe("langchain auto-instrumentation configs", () => { + it("targets CallbackManager configure methods in @langchain/core", () => { + expect(langchainConfigs).toEqual([ + { + channelName: langChainChannels.configure.channelName, + module: { + name: "@langchain/core", + versionRange: ">=0.3.42", + filePath: "dist/callbacks/manager.js", + }, + functionQuery: { + className: "CallbackManager", + methodName: "configure", + kind: "Sync", + }, + }, + { + channelName: langChainChannels.configureSync.channelName, + module: { + name: "@langchain/core", + versionRange: ">=0.3.42", + filePath: "dist/callbacks/manager.js", + }, + functionQuery: { + className: "CallbackManager", + methodName: "_configureSync", + kind: "Sync", + }, + }, + ]); + }); +}); diff --git a/js/src/auto-instrumentations/configs/langchain.ts b/js/src/auto-instrumentations/configs/langchain.ts new file mode 100644 index 000000000..eb1a2f776 --- /dev/null +++ b/js/src/auto-instrumentations/configs/langchain.ts @@ -0,0 +1,34 @@ +import type { InstrumentationConfig } from "@apm-js-collab/code-transformer"; +import { langChainChannels } from "../../instrumentation/plugins/langchain-channels"; + +const langChainCoreVersionRange = ">=0.3.42"; +const langChainCallbackManagerFilePath = "dist/callbacks/manager.js"; + +export const langchainConfigs: InstrumentationConfig[] = [ + { + channelName: langChainChannels.configure.channelName, + module: { + name: "@langchain/core", + versionRange: langChainCoreVersionRange, + filePath: langChainCallbackManagerFilePath, + }, + functionQuery: { + className: "CallbackManager", + methodName: "configure", + kind: "Sync", + }, + }, + { + channelName: langChainChannels.configureSync.channelName, + module: { + name: "@langchain/core", + versionRange: langChainCoreVersionRange, + filePath: langChainCallbackManagerFilePath, + }, + functionQuery: { + className: "CallbackManager", + methodName: "_configureSync", + kind: "Sync", + }, + }, +]; diff --git a/js/src/auto-instrumentations/hook.mts b/js/src/auto-instrumentations/hook.mts index 7c6035cf7..c03f99fa9 100644 --- a/js/src/auto-instrumentations/hook.mts +++ b/js/src/auto-instrumentations/hook.mts @@ -28,6 +28,7 @@ import { googleADKConfigs } from "./configs/google-adk.js"; import { cohereConfigs } from "./configs/cohere.js"; import { groqConfigs } from "./configs/groq.js"; import { gitHubCopilotConfigs } from "./configs/github-copilot.js"; +import { langchainConfigs } from "./configs/langchain.js"; import { ModulePatch } from "./loader/cjs-patch.js"; import { patchTracingChannel } from "./patch-tracing-channel.js"; @@ -97,6 +98,9 @@ const allConfigs = [ ) ? [] : gitHubCopilotConfigs), + ...(isDisabled(disabledIntegrations, "langchain", "langchain-js", "langgraph") + ? [] + : langchainConfigs), ]; // 1. Register ESM loader for ESM modules diff --git a/js/src/auto-instrumentations/index.ts b/js/src/auto-instrumentations/index.ts index 3fa9bbd67..f1cf5f52c 100644 --- a/js/src/auto-instrumentations/index.ts +++ b/js/src/auto-instrumentations/index.ts @@ -42,6 +42,7 @@ export { googleADKConfigs } from "./configs/google-adk"; export { cohereConfigs } from "./configs/cohere"; export { groqConfigs } from "./configs/groq"; export { gitHubCopilotConfigs } from "./configs/github-copilot"; +export { langchainConfigs } from "./configs/langchain"; // Re-export orchestrion configuration types // Note: ModuleMetadata and FunctionQuery are properties of InstrumentationConfig, diff --git a/js/src/instrumentation/braintrust-plugin.test.ts b/js/src/instrumentation/braintrust-plugin.test.ts index f35e36aea..9260dfb69 100644 --- a/js/src/instrumentation/braintrust-plugin.test.ts +++ b/js/src/instrumentation/braintrust-plugin.test.ts @@ -12,6 +12,7 @@ import { MistralPlugin } from "./plugins/mistral-plugin"; import { CoherePlugin } from "./plugins/cohere-plugin"; import { GroqPlugin } from "./plugins/groq-plugin"; import { GitHubCopilotPlugin } from "./plugins/github-copilot-plugin"; +import { LangChainPlugin } from "./plugins/langchain-plugin"; function createPluginClassMock() { return vi.fn(function MockPlugin(this: { @@ -78,6 +79,10 @@ vi.mock("./plugins/github-copilot-plugin", () => ({ GitHubCopilotPlugin: createPluginClassMock(), })); +vi.mock("./plugins/langchain-plugin", () => ({ + LangChainPlugin: createPluginClassMock(), +})); + describe("BraintrustPlugin", () => { beforeEach(() => { vi.clearAllMocks(); @@ -195,6 +200,15 @@ describe("BraintrustPlugin", () => { expect(mockInstance.enable).toHaveBeenCalledTimes(1); }); + it("should create and enable LangChain plugin by default", () => { + const plugin = new BraintrustPlugin(); + plugin.enable(); + + expect(LangChainPlugin).toHaveBeenCalledTimes(1); + const mockInstance = vi.mocked(LangChainPlugin).mock.results[0].value; + expect(mockInstance.enable).toHaveBeenCalledTimes(1); + }); + it("should create all plugins when enabled with no config", () => { const plugin = new BraintrustPlugin(); plugin.enable(); @@ -211,6 +225,7 @@ describe("BraintrustPlugin", () => { expect(CoherePlugin).toHaveBeenCalledTimes(1); expect(GroqPlugin).toHaveBeenCalledTimes(1); expect(GitHubCopilotPlugin).toHaveBeenCalledTimes(1); + expect(LangChainPlugin).toHaveBeenCalledTimes(1); }); it("should create all plugins when enabled with empty config", () => { @@ -229,6 +244,7 @@ describe("BraintrustPlugin", () => { expect(CoherePlugin).toHaveBeenCalledTimes(1); expect(GroqPlugin).toHaveBeenCalledTimes(1); expect(GitHubCopilotPlugin).toHaveBeenCalledTimes(1); + expect(LangChainPlugin).toHaveBeenCalledTimes(1); }); it("should create all plugins when enabled with empty integrations config", () => { @@ -246,6 +262,7 @@ describe("BraintrustPlugin", () => { expect(MistralPlugin).toHaveBeenCalledTimes(1); expect(CoherePlugin).toHaveBeenCalledTimes(1); expect(GroqPlugin).toHaveBeenCalledTimes(1); + expect(LangChainPlugin).toHaveBeenCalledTimes(1); }); }); @@ -430,6 +447,17 @@ describe("BraintrustPlugin", () => { expect(GroqPlugin).toHaveBeenCalledTimes(1); }); + it("should not create LangChain plugin when langchain: false", () => { + const plugin = new BraintrustPlugin({ + integrations: { langchain: false }, + }); + plugin.enable(); + + expect(LangChainPlugin).not.toHaveBeenCalled(); + expect(OpenAIPlugin).toHaveBeenCalledTimes(1); + expect(AnthropicPlugin).toHaveBeenCalledTimes(1); + }); + it("should not create OpenRouter Agent plugin when openrouterAgent: false", () => { const plugin = new BraintrustPlugin({ integrations: { openrouterAgent: false }, @@ -455,6 +483,7 @@ describe("BraintrustPlugin", () => { cohere: false, groq: false, gitHubCopilot: false, + langchain: false, }, }); plugin.enable(); @@ -471,6 +500,7 @@ describe("BraintrustPlugin", () => { expect(CoherePlugin).not.toHaveBeenCalled(); expect(GroqPlugin).not.toHaveBeenCalled(); expect(GitHubCopilotPlugin).not.toHaveBeenCalled(); + expect(LangChainPlugin).not.toHaveBeenCalled(); }); it("should allow selective enabling of plugins", () => { @@ -535,6 +565,24 @@ describe("BraintrustPlugin", () => { expect(MistralPlugin).toHaveBeenCalledTimes(1); }); + it("should not create LangChain plugin when langchainJS: false (legacy)", () => { + const plugin = new BraintrustPlugin({ + integrations: { langchainJS: false }, + }); + plugin.enable(); + + expect(LangChainPlugin).not.toHaveBeenCalled(); + }); + + it("should not create LangChain plugin when langgraph: false (alias)", () => { + const plugin = new BraintrustPlugin({ + integrations: { langgraph: false }, + }); + plugin.enable(); + + expect(LangChainPlugin).not.toHaveBeenCalled(); + }); + it("should not create AI SDK plugin when both aisdk and vercel are false", () => { const plugin = new BraintrustPlugin({ integrations: { aisdk: false, vercel: false }, @@ -610,6 +658,7 @@ describe("BraintrustPlugin", () => { const mistralMock = vi.mocked(MistralPlugin).mock.results[0].value; const cohereMock = vi.mocked(CoherePlugin).mock.results[0].value; const groqMock = vi.mocked(GroqPlugin).mock.results[0].value; + const langChainMock = vi.mocked(LangChainPlugin).mock.results[0].value; expect(openaiMock.enable).toHaveBeenCalledTimes(1); expect(anthropicMock.enable).toHaveBeenCalledTimes(1); @@ -622,6 +671,7 @@ describe("BraintrustPlugin", () => { expect(mistralMock.enable).toHaveBeenCalledTimes(1); expect(cohereMock.enable).toHaveBeenCalledTimes(1); expect(groqMock.enable).toHaveBeenCalledTimes(1); + expect(langChainMock.enable).toHaveBeenCalledTimes(1); }); it("should disable and nullify all sub-plugins when disabled", () => { @@ -643,6 +693,7 @@ describe("BraintrustPlugin", () => { const mistralMock = vi.mocked(MistralPlugin).mock.results[0].value; const cohereMock = vi.mocked(CoherePlugin).mock.results[0].value; const groqMock = vi.mocked(GroqPlugin).mock.results[0].value; + const langChainMock = vi.mocked(LangChainPlugin).mock.results[0].value; plugin.disable(); @@ -657,6 +708,7 @@ describe("BraintrustPlugin", () => { expect(mistralMock.disable).toHaveBeenCalledTimes(1); expect(cohereMock.disable).toHaveBeenCalledTimes(1); expect(groqMock.disable).toHaveBeenCalledTimes(1); + expect(langChainMock.disable).toHaveBeenCalledTimes(1); }); it("should be idempotent on multiple enable calls", () => { @@ -723,6 +775,7 @@ describe("BraintrustPlugin", () => { expect(MistralPlugin).toHaveBeenCalledTimes(1); expect(CoherePlugin).toHaveBeenCalledTimes(1); expect(GroqPlugin).toHaveBeenCalledTimes(1); + expect(LangChainPlugin).toHaveBeenCalledTimes(1); }); it("should only disable plugins that were enabled", () => { @@ -739,6 +792,7 @@ describe("BraintrustPlugin", () => { mistral: false, cohere: false, groq: true, + langchain: true, }, }); plugin.enable(); @@ -753,6 +807,7 @@ describe("BraintrustPlugin", () => { const openRouterAgentMock = vi.mocked(OpenRouterAgentPlugin).mock .results[0].value; const groqMock = vi.mocked(GroqPlugin).mock.results[0].value; + const langChainMock = vi.mocked(LangChainPlugin).mock.results[0].value; plugin.disable(); @@ -763,6 +818,7 @@ describe("BraintrustPlugin", () => { expect(openRouterMock.disable).toHaveBeenCalledTimes(1); expect(openRouterAgentMock.disable).toHaveBeenCalledTimes(1); expect(groqMock.disable).toHaveBeenCalledTimes(1); + expect(langChainMock.disable).toHaveBeenCalledTimes(1); expect(MistralPlugin).not.toHaveBeenCalled(); expect(CoherePlugin).not.toHaveBeenCalled(); }); diff --git a/js/src/instrumentation/braintrust-plugin.ts b/js/src/instrumentation/braintrust-plugin.ts index 0ce0390d1..27bb67d6e 100644 --- a/js/src/instrumentation/braintrust-plugin.ts +++ b/js/src/instrumentation/braintrust-plugin.ts @@ -13,6 +13,7 @@ import { GoogleADKPlugin } from "./plugins/google-adk-plugin"; import { CoherePlugin } from "./plugins/cohere-plugin"; import { GroqPlugin } from "./plugins/groq-plugin"; import { GitHubCopilotPlugin } from "./plugins/github-copilot-plugin"; +import { LangChainPlugin } from "./plugins/langchain-plugin"; export interface BraintrustPluginConfig { integrations?: { @@ -32,6 +33,10 @@ export interface BraintrustPluginConfig { googleADK?: boolean; cohere?: boolean; groq?: boolean; + gitHubCopilot?: boolean; + langchain?: boolean; + langchainJS?: boolean; + langgraph?: boolean; }; } @@ -52,6 +57,7 @@ function getIntegrationConfig( * - Vercel AI SDK (generateText, streamText, etc.) * - Google GenAI SDK * - HuggingFace Inference SDK + * - LangChain.js and LangGraph * - Mistral SDK * - Cohere SDK * @@ -74,6 +80,7 @@ export class BraintrustPlugin extends BasePlugin { private coherePlugin: CoherePlugin | null = null; private groqPlugin: GroqPlugin | null = null; private gitHubCopilotPlugin: GitHubCopilotPlugin | null = null; + private langChainPlugin: LangChainPlugin | null = null; constructor(config: BraintrustPluginConfig = {}) { super(); @@ -160,6 +167,15 @@ export class BraintrustPlugin extends BasePlugin { this.gitHubCopilotPlugin = new GitHubCopilotPlugin(); this.gitHubCopilotPlugin.enable(); } + + if ( + integrations.langchain !== false && + integrations.langchainJS !== false && + integrations.langgraph !== false + ) { + this.langChainPlugin = new LangChainPlugin(); + this.langChainPlugin.enable(); + } } protected onDisable(): void { @@ -232,6 +248,11 @@ export class BraintrustPlugin extends BasePlugin { this.gitHubCopilotPlugin.disable(); this.gitHubCopilotPlugin = null; } + + if (this.langChainPlugin) { + this.langChainPlugin.disable(); + this.langChainPlugin = null; + } } } diff --git a/js/src/instrumentation/plugins/langchain-callback-handler.ts b/js/src/instrumentation/plugins/langchain-callback-handler.ts new file mode 100644 index 000000000..ed27ed2e6 --- /dev/null +++ b/js/src/instrumentation/plugins/langchain-callback-handler.ts @@ -0,0 +1,538 @@ +import { currentSpan, initLogger, NOOP_SPAN, startSpan } from "../../logger"; +import type { Span } from "../../logger"; +import type { + LangChainCallbackHandlerOptions, + LangChainEndSpanArgs, + LangChainLLMResult, + LangChainSerialized, + LangChainStartSpanArgs, +} from "../../vendor-sdk-types/langchain"; + +export const BRAINTRUST_LANGCHAIN_CALLBACK_HANDLER_NAME = + "BraintrustCallbackHandler"; + +export class BraintrustLangChainCallbackHandler< + IsAsyncFlush extends boolean = true, +> { + name = BRAINTRUST_LANGCHAIN_CALLBACK_HANDLER_NAME; + private spans = new Map(); + private skippedRuns = new Set(); + private parent?: Span | (() => Span); + private rootRunId?: string; + private options: LangChainCallbackHandlerOptions; + private startTimes = new Map(); + private firstTokenTimes = new Map(); + private ttftMs = new Map(); + + constructor( + options?: Partial>, + ) { + this.parent = options?.parent; + this.options = { + debug: options?.debug ?? false, + excludeMetadataProps: + options?.excludeMetadataProps ?? + /^(l[sc]_|langgraph_|__pregel_|checkpoint_ns)/, + logger: options?.logger, + }; + } + + protected startSpan({ + runId, + parentRunId, + ...args + }: LangChainStartSpanArgs): void { + if (this.spans.has(runId)) { + return; + } + + if (!parentRunId) { + this.rootRunId = runId; + } + + const tags = args.event?.tags; + const spanAttributes = args.spanAttributes || {}; + spanAttributes.type = args.type || spanAttributes.type || "task"; + args.type = spanAttributes.type; + + const currentParent = + (typeof this.parent === "function" ? this.parent() : this.parent) ?? + currentSpan(); + let parentSpan: Span; + if (parentRunId && this.spans.has(parentRunId)) { + parentSpan = this.spans.get(parentRunId)!; + } else if (!Object.is(currentParent, NOOP_SPAN)) { + parentSpan = currentParent; + } else if (this.options.logger) { + parentSpan = this.options.logger as unknown as Span; + } else { + parentSpan = { startSpan } as unknown as Span; + } + + args.event = { + ...args.event, + tags: undefined, + metadata: { + ...(tags ? { tags } : {}), + ...args.event?.metadata, + braintrust: { + integration_name: "langchain-js", + integration_version: "0.2.0", + sdk_language: "javascript", + }, + run_id: runId, + parent_run_id: parentRunId, + ...(this.options.debug ? { runId, parentRunId } : {}), + }, + }; + + let span = parentSpan.startSpan(args); + + if ( + !Object.is(this.options.logger, NOOP_SPAN) && + Object.is(span, NOOP_SPAN) + ) { + span = initLogger().startSpan(args); + } + + this.spans.set(runId, span); + } + + protected endSpan({ + runId, + parentRunId, + tags, + metadata, + ...args + }: LangChainEndSpanArgs): void { + if (!this.spans.has(runId)) { + return; + } + + if (this.skippedRuns.has(runId)) { + this.skippedRuns.delete(runId); + return; + } + + const span = this.spans.get(runId)!; + this.spans.delete(runId); + if (runId === this.rootRunId) { + this.rootRunId = undefined; + } + + span.log({ ...args, metadata: { tags, ...metadata } }); + span.end(); + } + + async handleLLMStart( + llm: LangChainSerialized, + prompts: string[], + runId: string, + parentRunId?: string, + extraParams?: Record, + tags?: string[], + metadata?: Record, + runName?: string, + ): Promise { + this.startSpan({ + runId, + parentRunId, + name: runName ?? getSerializedName(llm) ?? "LLM", + type: "llm", + event: { + input: prompts, + tags, + metadata: { + serialized: llm, + name: runName, + metadata, + ...extraParams, + }, + }, + }); + } + + async handleLLMError( + err: Error, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, error: err, tags }); + } + + async handleLLMEnd( + output: LangChainLLMResult, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + const metrics = getMetricsFromResponse(output); + const modelName = getModelNameFromResponse(output); + const ttft = this.ttftMs.get(runId); + if (ttft !== undefined) { + metrics.time_to_first_token = ttft; + } + + this.startTimes.delete(runId); + this.firstTokenTimes.delete(runId); + this.ttftMs.delete(runId); + + this.endSpan({ + runId, + parentRunId, + output, + metrics, + tags, + metadata: { + model: modelName, + }, + }); + } + + async handleChatModelStart( + llm: LangChainSerialized, + messages: unknown[][], + runId: string, + parentRunId?: string, + extraParams?: Record, + tags?: string[], + metadata?: Record, + runName?: string, + ): Promise { + this.startTimes.set(runId, Date.now()); + this.firstTokenTimes.delete(runId); + this.ttftMs.delete(runId); + + this.startSpan({ + runId, + parentRunId, + name: runName ?? getSerializedName(llm) ?? "Chat Model", + type: "llm", + event: { + input: messages, + tags, + metadata: { + serialized: llm, + name: runName, + metadata, + ...extraParams, + }, + }, + }); + } + + async handleChainStart( + chain: LangChainSerialized, + inputs: unknown, + runId: string, + parentRunId?: string, + tags?: string[], + metadata?: Record, + runType?: string, + runName?: string, + ): Promise { + if (tags?.includes("langsmith:hidden")) { + this.skippedRuns.add(runId); + return; + } + + this.startSpan({ + runId, + parentRunId, + name: runName ?? getSerializedName(chain) ?? "Chain", + event: { + input: inputs, + tags, + metadata: { + serialized: chain, + name: runName, + metadata, + run_type: runType, + }, + }, + }); + } + + async handleChainError( + err: Error, + runId: string, + parentRunId?: string, + tags?: string[], + kwargs?: { + inputs?: Record; + }, + ): Promise { + this.endSpan({ runId, parentRunId, error: err, tags, metadata: kwargs }); + } + + async handleChainEnd( + outputs: unknown, + runId: string, + parentRunId?: string, + tags?: string[], + kwargs?: { inputs?: Record }, + ): Promise { + this.endSpan({ + runId, + parentRunId, + tags, + output: outputs, + metadata: { ...kwargs }, + }); + } + + async handleToolStart( + tool: LangChainSerialized, + input: string, + runId: string, + parentRunId?: string, + tags?: string[], + metadata?: Record, + runName?: string, + ): Promise { + this.startSpan({ + runId, + parentRunId, + name: runName ?? getSerializedName(tool) ?? "Tool", + type: "llm", + event: { + input: safeJsonParse(input), + tags, + metadata: { + metadata, + serialized: tool, + input_str: input, + input: safeJsonParse(input), + name: runName, + }, + }, + }); + } + + async handleToolError( + err: Error, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, error: err, tags }); + } + + async handleToolEnd( + output: unknown, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, output, tags }); + } + + async handleAgentAction( + action: Record, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.startSpan({ + runId, + parentRunId, + type: "llm", + name: typeof action.tool === "string" ? action.tool : "Agent", + event: { + input: action, + tags, + }, + }); + } + + async handleAgentEnd( + action: unknown, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, output: action, tags }); + } + + async handleRetrieverStart( + retriever: LangChainSerialized, + query: string, + runId: string, + parentRunId?: string, + tags?: string[], + metadata?: Record, + name?: string, + ): Promise { + this.startSpan({ + runId, + parentRunId, + name: name ?? getSerializedName(retriever) ?? "Retriever", + type: "function", + event: { + input: query, + tags, + metadata: { + serialized: retriever, + metadata, + name, + }, + }, + }); + } + + async handleRetrieverEnd( + documents: unknown[], + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, output: documents, tags }); + } + + async handleRetrieverError( + err: Error, + runId: string, + parentRunId?: string, + tags?: string[], + ): Promise { + this.endSpan({ runId, parentRunId, error: err, tags }); + } + + async handleLLMNewToken( + _token: string, + _idx: { prompt: number; completion: number }, + runId: string, + _parentRunId?: string, + _tags?: string[], + ): Promise { + if (!this.firstTokenTimes.has(runId)) { + const now = Date.now(); + this.firstTokenTimes.set(runId, now); + const start = this.startTimes.get(runId); + if (start !== undefined) { + this.ttftMs.set(runId, (now - start) / 1000); + } + } + } +} + +function getSerializedName( + serialized: LangChainSerialized, +): string | undefined { + if (typeof serialized.name === "string") { + return serialized.name; + } + + const lastIdPart = serialized.id?.at(-1); + return typeof lastIdPart === "string" ? lastIdPart : undefined; +} + +function cleanObject(obj: Record): Record { + return Object.fromEntries( + Object.entries(obj).filter(([, value]) => { + if (typeof value !== "number") { + return false; + } + return Number.isFinite(value); + }), + ) as Record; +} + +function walkGenerations( + response: LangChainLLMResult, +): Record[] { + const result: Record[] = []; + const generations = response.generations || []; + for (const batch of generations) { + if (Array.isArray(batch)) { + for (const generation of batch) { + if (isRecord(generation)) { + result.push(generation); + } + } + } else if (isRecord(batch)) { + result.push(batch); + } + } + return result; +} + +function getModelNameFromResponse( + response: LangChainLLMResult, +): string | undefined { + for (const generation of walkGenerations(response)) { + const message = generation.message; + if (!isRecord(message)) { + continue; + } + + const responseMetadata = message.response_metadata; + if (!isRecord(responseMetadata)) { + continue; + } + + const modelName = responseMetadata.model_name ?? responseMetadata.model; + if (typeof modelName === "string") { + return modelName; + } + } + + const llmOutput = response.llmOutput || {}; + const modelName = llmOutput.model_name ?? llmOutput.model; + return typeof modelName === "string" ? modelName : undefined; +} + +function getMetricsFromResponse( + response: LangChainLLMResult, +): Record { + for (const generation of walkGenerations(response)) { + const message = generation.message; + if (!isRecord(message)) { + continue; + } + + const usageMetadata = message.usage_metadata; + if (!isRecord(usageMetadata)) { + continue; + } + + const inputTokenDetails = usageMetadata.input_token_details; + return cleanObject({ + total_tokens: usageMetadata.total_tokens, + prompt_tokens: usageMetadata.input_tokens, + completion_tokens: usageMetadata.output_tokens, + prompt_cache_creation_tokens: isRecord(inputTokenDetails) + ? inputTokenDetails.cache_creation + : undefined, + prompt_cached_tokens: isRecord(inputTokenDetails) + ? inputTokenDetails.cache_read + : undefined, + }); + } + + const llmOutput = response.llmOutput || {}; + const tokenUsage = isRecord(llmOutput.tokenUsage) + ? llmOutput.tokenUsage + : isRecord(llmOutput.estimatedTokens) + ? llmOutput.estimatedTokens + : {}; + + return cleanObject({ + total_tokens: tokenUsage.totalTokens, + prompt_tokens: tokenUsage.promptTokens, + completion_tokens: tokenUsage.completionTokens, + }); +} + +function safeJsonParse(input: string): unknown { + try { + return JSON.parse(input); + } catch { + return input; + } +} + +function isRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} diff --git a/js/src/instrumentation/plugins/langchain-channels.ts b/js/src/instrumentation/plugins/langchain-channels.ts new file mode 100644 index 000000000..ed6e23aff --- /dev/null +++ b/js/src/instrumentation/plugins/langchain-channels.ts @@ -0,0 +1,13 @@ +import { channel, defineChannels } from "../core/channel-definitions"; +import type { LangChainCallbackManagerConfigureResult } from "../../vendor-sdk-types/langchain"; + +export const langChainChannels = defineChannels("@langchain/core", { + configure: channel({ + channelName: "CallbackManager.configure", + kind: "sync-stream", + }), + configureSync: channel({ + channelName: "CallbackManager._configureSync", + kind: "sync-stream", + }), +}); diff --git a/js/src/instrumentation/plugins/langchain-plugin.test.ts b/js/src/instrumentation/plugins/langchain-plugin.test.ts new file mode 100644 index 000000000..df9e663a4 --- /dev/null +++ b/js/src/instrumentation/plugins/langchain-plugin.test.ts @@ -0,0 +1,124 @@ +import * as diagnosticsChannel from "node:diagnostics_channel"; +import { describe, expect, it } from "vitest"; +import iso from "../../isomorph"; +import { LangChainPlugin } from "./langchain-plugin"; +import { langChainChannels } from "./langchain-channels"; + +iso.newTracingChannel = (nameOrChannels: string | object) => + diagnosticsChannel.tracingChannel( + nameOrChannels as string, + ) as never as ReturnType>; + +function createManager(handlers: unknown[] = []) { + return { + handlers, + addHandler(handler: unknown) { + this.handlers.push(handler); + }, + }; +} + +function traceConfigureResult(result: unknown) { + return langChainChannels.configure.traceSync(() => result, { + arguments: [], + }); +} + +function traceConfigureArguments(args: unknown[]) { + return langChainChannels.configure.traceSync(() => args, { + arguments: args, + }); +} + +function traceConfigureArgumentsObject(args: IArguments) { + return langChainChannels.configure.traceSync(() => args, { + arguments: args, + }); +} + +function createArgumentsObject(...args: unknown[]): IArguments { + return (function getArgumentsObject() { + return arguments; + })(...args); +} + +describe("LangChainPlugin", () => { + it("injects a Braintrust callback handler into empty CallbackManager.configure() arguments", () => { + const plugin = new LangChainPlugin(); + const args: unknown[] = []; + + plugin.enable(); + traceConfigureArguments(args); + plugin.disable(); + + expect(args[0]).toEqual([ + expect.objectContaining({ + name: "BraintrustCallbackHandler", + }), + ]); + }); + + it("injects a Braintrust callback handler into real arguments objects", () => { + const plugin = new LangChainPlugin(); + const args = createArgumentsObject(); + + plugin.enable(); + traceConfigureArgumentsObject(args); + plugin.disable(); + + expect(args[0]).toEqual([ + expect.objectContaining({ + name: "BraintrustCallbackHandler", + }), + ]); + }); + + it("injects a Braintrust callback handler into CallbackManager.configure() results", () => { + const plugin = new LangChainPlugin(); + const manager = createManager(); + + plugin.enable(); + traceConfigureResult(manager); + plugin.disable(); + + expect(manager.handlers).toHaveLength(1); + expect(manager.handlers[0]).toMatchObject({ + name: "BraintrustCallbackHandler", + }); + }); + + it("does not inject duplicate handlers into the same manager", () => { + const plugin = new LangChainPlugin(); + const manager = createManager(); + + plugin.enable(); + traceConfigureResult(manager); + traceConfigureResult(manager); + plugin.disable(); + + expect(manager.handlers).toHaveLength(1); + }); + + it("does not inject when a Braintrust callback handler is already present", () => { + const plugin = new LangChainPlugin(); + const existingHandler = { name: "BraintrustCallbackHandler" }; + const manager = createManager([existingHandler]); + + plugin.enable(); + traceConfigureResult(manager); + plugin.disable(); + + expect(manager.handlers).toEqual([existingHandler]); + }); + + it("gracefully ignores undefined and non-manager results", () => { + const plugin = new LangChainPlugin(); + + plugin.enable(); + + expect(() => traceConfigureResult(undefined)).not.toThrow(); + expect(() => traceConfigureResult({ handlers: [] })).not.toThrow(); + + plugin.disable(); + }); +}); diff --git a/js/src/instrumentation/plugins/langchain-plugin.ts b/js/src/instrumentation/plugins/langchain-plugin.ts new file mode 100644 index 000000000..cc6366d9a --- /dev/null +++ b/js/src/instrumentation/plugins/langchain-plugin.ts @@ -0,0 +1,128 @@ +import { BasePlugin } from "../core"; +import type { ChannelMessage } from "../core/channel-definitions"; +import type { IsoChannelHandlers, IsoTracingChannel } from "../../isomorph"; +import type { LangChainCallbackManager } from "../../vendor-sdk-types/langchain"; +import { + BRAINTRUST_LANGCHAIN_CALLBACK_HANDLER_NAME, + BraintrustLangChainCallbackHandler, +} from "./langchain-callback-handler"; +import { langChainChannels } from "./langchain-channels"; + +type LangChainConfigureChannel = + | typeof langChainChannels.configure + | typeof langChainChannels.configureSync; + +export class LangChainPlugin extends BasePlugin { + private injectedManagers = new WeakSet(); + + protected onEnable(): void { + this.subscribeToConfigure(langChainChannels.configure); + this.subscribeToConfigure(langChainChannels.configureSync); + } + + protected onDisable(): void { + for (const unsubscribe of this.unsubscribers) { + unsubscribe(); + } + this.unsubscribers = []; + this.injectedManagers = new WeakSet(); + } + + private subscribeToConfigure(channel: LangChainConfigureChannel): void { + const tracingChannel = channel.tracingChannel() as IsoTracingChannel< + ChannelMessage + >; + + const handlers: IsoChannelHandlers< + ChannelMessage + > = { + start: (event) => { + injectHandlerIntoArguments(event.arguments); + }, + end: (event) => { + this.injectHandler(event.result); + }, + }; + + tracingChannel.subscribe(handlers); + this.unsubscribers.push(() => { + tracingChannel.unsubscribe(handlers); + }); + } + + private injectHandler(result: unknown): void { + if (!isCallbackManager(result)) { + return; + } + + if (this.injectedManagers.has(result) || hasBraintrustHandler(result)) { + return; + } + + try { + result.addHandler(new BraintrustLangChainCallbackHandler(), true); + this.injectedManagers.add(result); + } catch { + // Instrumentation must never break LangChain user code. + } + } +} + +function isCallbackManager(value: unknown): value is LangChainCallbackManager & + object & { + addHandler: (handler: unknown, inherit?: boolean) => void; + } { + if (typeof value !== "object" || value === null) { + return false; + } + + const maybeManager = value as LangChainCallbackManager; + return typeof maybeManager.addHandler === "function"; +} + +function hasBraintrustHandler(manager: LangChainCallbackManager): boolean { + return ( + manager.handlers?.some((handler) => { + if (typeof handler !== "object" || handler === null) { + return false; + } + const name = Reflect.get(handler, "name"); + return name === BRAINTRUST_LANGCHAIN_CALLBACK_HANDLER_NAME; + }) ?? false + ); +} + +function injectHandlerIntoArguments(args: ArrayLike): void { + if (!isWritableArgumentsObject(args)) { + return; + } + + const inheritedHandlers = Reflect.get(args, "0"); + const handler = new BraintrustLangChainCallbackHandler(); + + if (inheritedHandlers === undefined || inheritedHandlers === null) { + Reflect.set(args, "0", [handler]); + return; + } + + if (Array.isArray(inheritedHandlers)) { + if (!inheritedHandlers.some(isBraintrustHandler)) { + inheritedHandlers.push(handler); + } + } +} + +function isWritableArgumentsObject( + args: ArrayLike, +): args is ArrayLike & object { + return typeof args === "object" && args !== null; +} + +function isBraintrustHandler(handler: unknown): boolean { + if (typeof handler !== "object" || handler === null) { + return false; + } + return ( + Reflect.get(handler, "name") === BRAINTRUST_LANGCHAIN_CALLBACK_HANDLER_NAME + ); +} diff --git a/js/src/instrumentation/registry.ts b/js/src/instrumentation/registry.ts index 44fa2f024..1aedcc6b3 100644 --- a/js/src/instrumentation/registry.ts +++ b/js/src/instrumentation/registry.ts @@ -19,6 +19,7 @@ export interface InstrumentationConfig { vercel?: boolean; aisdk?: boolean; google?: boolean; + googleADK?: boolean; huggingface?: boolean; claudeAgentSDK?: boolean; cursor?: boolean; @@ -27,6 +28,10 @@ export interface InstrumentationConfig { openrouterAgent?: boolean; mistral?: boolean; cohere?: boolean; + gitHubCopilot?: boolean; + langchain?: boolean; + langchainJS?: boolean; + langgraph?: boolean; }; } @@ -118,8 +123,12 @@ class PluginRegistry { openrouter: true, openrouterAgent: true, mistral: true, + googleADK: true, cohere: true, gitHubCopilot: true, + langchain: true, + langchainJS: true, + langgraph: true, }; } @@ -138,10 +147,12 @@ class PluginRegistry { .filter((s) => s.length > 0); for (const sdk of disabled) { + integrations[sdk] = false; if (sdk === "cursor-sdk") { integrations.cursorSDK = false; - } else { - integrations[sdk] = false; + } + if (sdk === "langchain-js") { + integrations.langchainJS = false; } } } diff --git a/js/src/vendor-sdk-types/langchain.ts b/js/src/vendor-sdk-types/langchain.ts new file mode 100644 index 000000000..fa6553bb9 --- /dev/null +++ b/js/src/vendor-sdk-types/langchain.ts @@ -0,0 +1,41 @@ +import type { Logger, Span, StartSpanArgs } from "../logger"; +import type { ExperimentLogPartialArgs } from "../util"; + +export type LangChainSerialized = { + id?: unknown[]; + name?: string; +}; + +export type LangChainRunnableConfig = Record; + +export type LangChainCallbackManager = { + handlers?: unknown[]; + addHandler?: (handler: unknown, inherit?: boolean) => void; +}; + +export type LangChainCallbackManagerConfigureResult = + | LangChainCallbackManager + | undefined; + +export type LangChainCallbackHandlerOptions = { + debug: boolean; + excludeMetadataProps: RegExp; + logger?: Logger | Span; + parent?: Span | (() => Span); +}; + +export type LangChainStartSpanArgs = StartSpanArgs & { + parentRunId?: string; + runId: string; +}; + +export type LangChainEndSpanArgs = ExperimentLogPartialArgs & { + parentRunId?: string; + runId: string; + tags?: string[]; +}; + +export type LangChainLLMResult = { + generations?: unknown[]; + llmOutput?: Record; +}; diff --git a/js/tests/api-compatibility/api-compatibility.test.ts b/js/tests/api-compatibility/api-compatibility.test.ts index 2fa625c1c..47176e09f 100644 --- a/js/tests/api-compatibility/api-compatibility.test.ts +++ b/js/tests/api-compatibility/api-compatibility.test.ts @@ -1337,6 +1337,16 @@ function areInterfaceSignaturesCompatible( const newTypeNorm = normalizeType(newField.type); if (oldTypeNorm !== newTypeNorm) { + // Nested object types can gain optional fields without breaking callers. + // This covers config shapes such as { integrations?: { newKey?: boolean } }. + if ( + oldField.type.trim().startsWith("{") && + newField.type.trim().startsWith("{") && + areObjectTypeDefinitionsCompatible(oldField.type, newField.type) + ) { + continue; + } + // Check if it's a union type widening (backwards compatible) if (!isUnionTypeWidening(oldTypeNorm, newTypeNorm)) { // Field type changed in an incompatible way - breaking change @@ -2155,6 +2165,14 @@ describe("areInterfaceSignaturesCompatible", () => { expect(result).toBe(true); }); + test("should allow adding optional fields to nested object interface fields", () => { + const oldInterface = `export interface InstrumentationConfig { integrations?: { openai?: boolean; cohere?: boolean; }; }`; + const newInterface = `export interface InstrumentationConfig { integrations?: { openai?: boolean; cohere?: boolean; langchain?: boolean; }; }`; + + const result = areInterfaceSignaturesCompatible(oldInterface, newInterface); + expect(result).toBe(true); + }); + test("should reject removing fields from interface", () => { const oldInterface = `export interface LogOptions { asyncFlush?: IsAsyncFlush; computeMetadataArgs?: Record; }`; const newInterface = `export interface LogOptions { asyncFlush?: IsAsyncFlush; }`;