Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -60,5 +60,8 @@
"micromatch": "^4.0.8",
"@babel/helpers": "^7.26.10",
"semantic-release": "^25.0.0"
},
"dependencies": {
"@langchain/anthropic": "^0.3.17"
}
}
1 change: 1 addition & 0 deletions packages/ai-proxy/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
},
"dependencies": {
"@forestadmin/datasource-toolkit": "1.50.1",
"@langchain/anthropic": "^0.3.17",
"@langchain/community": "1.1.4",
"@langchain/core": "1.1.15",
"@langchain/langgraph": "^1.1.0",
Expand Down
211 changes: 202 additions & 9 deletions packages/ai-proxy/src/provider-dispatcher.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,34 @@
import type { RemoteTools } from './remote-tools';
import type { BaseMessageLike } from '@langchain/core/messages';
import type { AnthropicInput } from '@langchain/anthropic';
import type { BaseMessage, BaseMessageLike } from '@langchain/core/messages';
import type { ChatOpenAIFields, OpenAIChatModelId } from '@langchain/openai';
import type OpenAI from 'openai';

import { ChatAnthropic } from '@langchain/anthropic';
import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
import { convertToOpenAIFunction } from '@langchain/core/utils/function_calling';
import { ChatOpenAI } from '@langchain/openai';

import { AINotConfiguredError, OpenAIUnprocessableError } from './types/errors';
import {
AINotConfiguredError,
AnthropicUnprocessableError,
OpenAIUnprocessableError,
} from './types/errors';

export const ANTHROPIC_MODELS = [
'claude-sonnet-4-5-20250514',
'claude-opus-4-20250514',
'claude-3-5-sonnet-latest',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-latest',
'claude-3-5-haiku-20241022',
'claude-3-opus-latest',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
] as const;

export type AnthropicModel = (typeof ANTHROPIC_MODELS)[number];

/**
* OpenAI model prefixes that do NOT support function calling (tools).
Expand Down Expand Up @@ -65,8 +87,19 @@ export type OpenAiConfiguration = BaseAiConfiguration &
model: OpenAIChatModelId | (string & NonNullable<unknown>);
};

export type AiProvider = 'openai';
export type AiConfiguration = OpenAiConfiguration;
/**
* Anthropic-specific configuration.
* Extends base with all ChatAnthropic options (temperature, maxTokens, etc.)
* Supports both `apiKey` (unified) and `anthropicApiKey` (native) for flexibility.
*/
export type AnthropicConfiguration = BaseAiConfiguration &
Omit<AnthropicInput, 'model' | 'apiKey'> & {
provider: 'anthropic';
model: AnthropicModel;
};

export type AiProvider = 'openai' | 'anthropic';
export type AiConfiguration = OpenAiConfiguration | AnthropicConfiguration;

export type ChatCompletionResponse = OpenAI.Chat.Completions.ChatCompletion;
export type ChatCompletionMessage = OpenAI.Chat.Completions.ChatCompletionMessageParam;
Expand All @@ -79,8 +112,25 @@ export type DispatchBody = {
tool_choice?: ChatCompletionToolChoice;
};

interface OpenAIMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: Array<{
id: string;
function: {
name: string;
arguments: string;
};
}>;
tool_call_id?: string;
}

export class ProviderDispatcher {
private readonly chatModel: ChatOpenAI | null = null;
private readonly openaiModel: ChatOpenAI | null = null;

private readonly anthropicModel: ChatAnthropic | null = null;

private readonly modelName: string | null = null;

private readonly remoteTools: RemoteTools;

Expand All @@ -89,22 +139,37 @@ export class ProviderDispatcher {

if (configuration?.provider === 'openai') {
const { provider, name, ...chatOpenAIOptions } = configuration;
this.chatModel = new ChatOpenAI({
this.openaiModel = new ChatOpenAI({
...chatOpenAIOptions,
__includeRawResponse: true,
});
} else if (configuration?.provider === 'anthropic') {
const { provider, name, model, ...clientOptions } = configuration;
this.anthropicModel = new ChatAnthropic({
...clientOptions,
model,
});
this.modelName = model;
}
}

async dispatch(body: DispatchBody): Promise<ChatCompletionResponse> {
if (!this.chatModel) {
throw new AINotConfiguredError();
if (this.openaiModel) {
return this.dispatchOpenAI(body);
}

if (this.anthropicModel) {
return this.dispatchAnthropic(body);
}

throw new AINotConfiguredError();
}

private async dispatchOpenAI(body: DispatchBody): Promise<ChatCompletionResponse> {
const { tools, messages, tool_choice: toolChoice } = body;

const enrichedTools = this.enrichToolDefinitions(tools);
const model = this.bindToolsIfNeeded(this.chatModel, enrichedTools, toolChoice);
const model = this.bindToolsIfNeeded(this.openaiModel!, enrichedTools, toolChoice);

try {
const response = await model.invoke(messages as BaseMessageLike[]);
Expand Down Expand Up @@ -136,6 +201,32 @@ export class ProviderDispatcher {
}
}

private async dispatchAnthropic(body: DispatchBody): Promise<ChatCompletionResponse> {
const { tools, messages, tool_choice: toolChoice } = body;

try {
const langChainMessages = this.convertMessagesToLangChain(messages as OpenAIMessage[]);
const enhancedTools = tools ? this.enrichToolDefinitions(tools) : undefined;
let response: AIMessage;

if (enhancedTools?.length) {
const langChainTools = this.convertToolsToLangChain(enhancedTools);
const clientWithTools = this.anthropicModel!.bindTools(langChainTools, {
tool_choice: this.convertToolChoiceToLangChain(toolChoice),
});
response = await clientWithTools.invoke(langChainMessages);
} else {
response = await this.anthropicModel!.invoke(langChainMessages);
}

return this.convertLangChainResponseToOpenAI(response);
} catch (error) {
throw new AnthropicUnprocessableError(
`Error while calling Anthropic: ${(error as Error).message}`,
);
}
}

private bindToolsIfNeeded(
chatModel: ChatOpenAI,
tools: ChatCompletionTool[] | undefined,
Expand All @@ -150,6 +241,108 @@ export class ProviderDispatcher {
});
}

private convertMessagesToLangChain(messages: OpenAIMessage[]): BaseMessage[] {
return messages.map(msg => {
switch (msg.role) {
case 'system':
return new SystemMessage(msg.content || '');
case 'user':
return new HumanMessage(msg.content || '');
case 'assistant':
if (msg.tool_calls) {
return new AIMessage({
content: msg.content || '',
tool_calls: msg.tool_calls.map(tc => ({
id: tc.id,
name: tc.function.name,
args: JSON.parse(tc.function.arguments),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

JSON.parse() can throw if tc.function.arguments contains malformed JSON. This call is outside the try-catch block (line 210), so errors would propagate as unhandled SyntaxError instead of being wrapped in AnthropicUnprocessableError.

OpenAI API can return malformed JSON in edge cases (documented issues).

Fix packages/ai-proxy/src/provider-dispatcher.ts:259: Wrap JSON.parse in try-catch or move convertMessagesToLangChain call inside the existing try-catch block at line 210

})),
});
}

return new AIMessage(msg.content || '');
case 'tool':
return new ToolMessage({
content: msg.content || '',
tool_call_id: msg.tool_call_id!,
});
default:
return new HumanMessage(msg.content || '');
}
});
}

private convertToolsToLangChain(tools: ChatCompletionTool[]): Array<{
type: 'function';
function: { name: string; description?: string; parameters?: Record<string, unknown> };
}> {
return tools
.filter((tool): tool is ChatCompletionTool & { type: 'function' } => tool.type === 'function')
.map(tool => ({
type: 'function' as const,
function: {
name: tool.function.name,
description: tool.function.description,
parameters: tool.function.parameters as Record<string, unknown> | undefined,
},
}));
}

private convertToolChoiceToLangChain(
toolChoice: ChatCompletionToolChoice | undefined,
): 'auto' | 'any' | 'none' | { type: 'tool'; name: string } | undefined {
if (!toolChoice) return undefined;
if (toolChoice === 'auto') return 'auto';
if (toolChoice === 'none') return 'none';
if (toolChoice === 'required') return 'any';

if (typeof toolChoice === 'object' && toolChoice.type === 'function') {
return { type: 'tool', name: toolChoice.function.name };
}

return undefined;
}

private convertLangChainResponseToOpenAI(response: AIMessage): ChatCompletionResponse {
const toolCalls = response.tool_calls?.map(tc => ({
id: tc.id || `call_${Date.now()}`,
type: 'function' as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.args),
},
}));

const usageMetadata = response.usage_metadata as
| { input_tokens?: number; output_tokens?: number; total_tokens?: number }
| undefined;

return {
id: response.id || `msg_${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: this.modelName!,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: typeof response.content === 'string' ? response.content : null,
refusal: null,
tool_calls: toolCalls?.length ? toolCalls : undefined,
},
finish_reason: toolCalls?.length ? 'tool_calls' : 'stop',
logprobs: null,
},
],
usage: {
prompt_tokens: usageMetadata?.input_tokens || 0,
completion_tokens: usageMetadata?.output_tokens || 0,
total_tokens: usageMetadata?.total_tokens || 0,
},
};
}

private enrichToolDefinitions(tools?: ChatCompletionTool[]) {
if (!tools || !Array.isArray(tools)) return tools;

Expand Down
7 changes: 7 additions & 0 deletions packages/ai-proxy/src/types/errors.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,13 @@ export class OpenAIUnprocessableError extends AIUnprocessableError {
}
}

export class AnthropicUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
this.name = 'AnthropicError';
}
}

export class AIToolUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
Expand Down
Loading
Loading