From 3e74b95417b551cee065ef21a2f63a18ede5c944 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 23 Nov 2025 10:25:49 +0000 Subject: [PATCH 1/8] feat: implement Anthropic provider with Messages API adapter Add complete Anthropic provider implementation with the following features: - MessagesAdapter: Translates between internal format and Anthropic Messages API - Converts OpenAI-style messages to Anthropic format - Handles system prompts as top-level parameter - Converts tool specifications to Anthropic input_schema format - Supports multimodal content (text and images) - Streams responses with SSE event handling - Converts Anthropic responses back to OpenAI-compatible format - AnthropicProvider: Manages API communication with Anthropic - Authentication via x-api-key header - Anthropic API version header support - Upstream request/response logging - Tool support with automatic schema conversion - Prompt caching support - Default model: claude-3-5-sonnet-20241022 Implementation follows existing provider patterns and maintains OpenAI API compatibility. --- backend/src/lib/adapters/messagesAdapter.js | 523 ++++++++++++++++++ .../src/lib/providers/anthropicProvider.js | 222 +++++++- 2 files changed, 730 insertions(+), 15 deletions(-) create mode 100644 backend/src/lib/adapters/messagesAdapter.js diff --git a/backend/src/lib/adapters/messagesAdapter.js b/backend/src/lib/adapters/messagesAdapter.js new file mode 100644 index 00000000..070cfc41 --- /dev/null +++ b/backend/src/lib/adapters/messagesAdapter.js @@ -0,0 +1,523 @@ +import { BaseAdapter } from './baseAdapter.js'; +import { convertContentPartImage } from '../localImageEncoder.js'; + +const ANTHROPIC_ALLOWED_REQUEST_KEYS = new Set([ + 'max_tokens', + 'metadata', + 'stop_sequences', + 'stream', + 'temperature', + 'tool_choice', + 'top_k', + 'top_p', +]); + +const RESERVED_INTERNAL_KEYS = new Set([ + 'conversation_id', + 'provider_id', + 'provider', + 'streamingEnabled', + 'toolsEnabled', + 'qualityLevel', + 'researchMode', + 'systemPrompt', + 'system_prompt', + 'previous_response_id', +]); + +/** + * Convert OpenAI-style message format to Anthropic Messages API format + */ +async function normalizeMessageForAnthropic(message) { + if (!message || typeof message !== 'object') return null; + + const role = typeof message.role === 'string' ? message.role : undefined; + if (!role) return null; + + // Anthropic doesn't support 'system' role in messages array + // System messages should be extracted to top-level system parameter + if (role === 'system') return null; + + const normalized = { role: role === 'assistant' ? 'assistant' : 'user' }; + + if ('content' in message) { + const content = message.content; + if (Array.isArray(content)) { + // Convert multimodal content + const convertedParts = await Promise.all(content.map(async (part) => { + if (typeof part === 'string') { + return { type: 'text', text: part }; + } + if (part?.type === 'text') { + return { type: 'text', text: part.text }; + } + if (part?.type === 'image_url') { + // Convert OpenAI image format to Anthropic format + const converted = await convertContentPartImage(part); + if (converted?.type === 'image_url') { + const url = converted.image_url?.url || converted.image_url; + // Extract base64 data if present + if (typeof url === 'string' && url.startsWith('data:image/')) { + const matches = url.match(/^data:image\/(\w+);base64,(.+)$/); + if (matches) { + const [, mediaType, data] = matches; + return { + type: 'image', + source: { + type: 'base64', + media_type: `image/${mediaType}`, + data, + }, + }; + } + } + } + return null; + } + if (part?.type === 'tool_result') { + // Anthropic tool result format + return part; + } + if (part?.type === 'tool_use') { + // Anthropic tool use format + return part; + } + return null; + })); + normalized.content = convertedParts.filter(Boolean); + } else if (typeof content === 'string') { + normalized.content = content; + } else if (content === null) { + normalized.content = ''; + } + } + + // Handle tool calls from OpenAI format + if (Array.isArray(message.tool_calls) && message.tool_calls.length > 0) { + const toolUseBlocks = message.tool_calls.map((toolCall) => { + const fn = toolCall.function || {}; + let input = {}; + try { + input = typeof fn.arguments === 'string' ? JSON.parse(fn.arguments) : fn.arguments || {}; + } catch { + input = {}; + } + return { + type: 'tool_use', + id: toolCall.id || `tool_${Date.now()}`, + name: fn.name, + input, + }; + }); + + // If content is empty, initialize it as array + if (!normalized.content) { + normalized.content = toolUseBlocks; + } else if (Array.isArray(normalized.content)) { + normalized.content.push(...toolUseBlocks); + } else { + // Convert string content to array and add tool use blocks + normalized.content = [ + { type: 'text', text: normalized.content }, + ...toolUseBlocks, + ]; + } + } + + // Handle tool results from OpenAI format (tool role) + if (role === 'tool' && message.tool_call_id && message.content) { + return { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: message.tool_call_id, + content: message.content, + }, + ], + }; + } + + // Preserve cache_control for prompt caching + if (message.cache_control && typeof message.cache_control === 'object') { + normalized.cache_control = message.cache_control; + } + + return normalized; +} + +async function normalizeMessagesForAnthropic(messages) { + if (!Array.isArray(messages)) return { system: undefined, messages: [] }; + + // Extract system messages + const systemMessages = messages.filter((m) => m.role === 'system'); + const nonSystemMessages = messages.filter((m) => m.role !== 'system'); + + // Combine system messages into a single system prompt + const system = systemMessages.length > 0 + ? systemMessages.map((m) => m.content).filter(Boolean).join('\n\n') + : undefined; + + const normalized = await Promise.all( + nonSystemMessages.map((message) => normalizeMessageForAnthropic(message)) + ); + + return { + system, + messages: normalized.filter(Boolean), + }; +} + +/** + * Convert OpenAI-style tool spec to Anthropic format + */ +function normalizeToolForAnthropic(tool) { + if (!tool) return null; + if (typeof tool === 'string') { + return { + name: tool, + description: '', + input_schema: { type: 'object', properties: {} }, + }; + } + if (typeof tool !== 'object') return null; + + const fn = tool.function || tool; + if (!fn.name) return null; + + return { + name: fn.name, + description: fn.description || '', + input_schema: fn.parameters || fn.input_schema || { type: 'object', properties: {} }, + }; +} + +function normalizeToolsForAnthropic(tools) { + if (!Array.isArray(tools)) return undefined; + const normalized = tools.map(normalizeToolForAnthropic).filter(Boolean); + return normalized.length > 0 ? normalized : undefined; +} + +function omitReservedKeys(payload) { + if (!payload || typeof payload !== 'object') return {}; + const result = {}; + for (const [key, value] of Object.entries(payload)) { + if (RESERVED_INTERNAL_KEYS.has(key)) continue; + result[key] = value; + } + return result; +} + +/** + * Adapter for Anthropic Messages API + */ +export class MessagesAdapter extends BaseAdapter { + constructor(options = {}) { + super(options); + this.getDefaultModel = options.getDefaultModel || (() => undefined); + } + + async translateRequest(internalRequest = {}, context = {}) { + const payload = omitReservedKeys(internalRequest); + + const resolveDefaultModel = context.getDefaultModel || this.getDefaultModel; + const model = payload.model || resolveDefaultModel(); + if (!model) { + throw new Error('Anthropic provider requires a model'); + } + + const { system, messages } = await normalizeMessagesForAnthropic(payload.messages); + + if (messages.length === 0) { + throw new Error('Anthropic provider requires at least one non-system message'); + } + + const normalized = { model, messages }; + + // Add system prompt if present + if (system) { + normalized.system = system; + } + + // Anthropic requires max_tokens + if (payload.max_tokens) { + normalized.max_tokens = payload.max_tokens; + } else { + // Use a sensible default if not provided + normalized.max_tokens = 4096; + } + + if ('stream' in payload) { + normalized.stream = Boolean(payload.stream); + } + + // Handle tools + const tools = normalizeToolsForAnthropic(payload.tools); + if (tools) { + normalized.tools = tools; + if (payload.tool_choice !== undefined) { + // Convert OpenAI tool_choice format to Anthropic format + if (typeof payload.tool_choice === 'string') { + if (payload.tool_choice === 'auto') { + normalized.tool_choice = { type: 'auto' }; + } else if (payload.tool_choice === 'required') { + normalized.tool_choice = { type: 'any' }; + } + } else if (payload.tool_choice?.type === 'function') { + normalized.tool_choice = { + type: 'tool', + name: payload.tool_choice.function?.name, + }; + } + } + } + + // Copy allowed parameters + for (const [key, value] of Object.entries(payload)) { + if (value === undefined) continue; + if (key === 'messages' || key === 'model' || key === 'tools' || key === 'tool_choice' || key === 'stream' || key === 'max_tokens') + continue; + if (ANTHROPIC_ALLOWED_REQUEST_KEYS.has(key)) { + normalized[key] = value; + } + } + + return normalized; + } + + translateResponse(providerResponse, _context = {}) { + if (typeof providerResponse === 'string') { + try { + const parsed = JSON.parse(providerResponse); + return this.convertAnthropicToOpenAI(parsed); + } catch { + return providerResponse; + } + } + if (providerResponse && typeof providerResponse === 'object') { + return this.convertAnthropicToOpenAI(providerResponse); + } + return providerResponse; + } + + translateStreamChunk(chunk, _context = {}) { + if (!chunk) return null; + if (typeof chunk === 'string') { + const trimmed = chunk.trim(); + if (!trimmed) return null; + try { + const parsed = JSON.parse(trimmed); + return this.convertAnthropicStreamToOpenAI(parsed); + } catch { + return null; + } + } + if (chunk && typeof chunk === 'object') { + return this.convertAnthropicStreamToOpenAI(chunk); + } + return chunk; + } + + /** + * Convert Anthropic response to OpenAI format + */ + convertAnthropicToOpenAI(anthropicResponse) { + if (!anthropicResponse || typeof anthropicResponse !== 'object') { + return anthropicResponse; + } + + // Handle error responses + if (anthropicResponse.type === 'error') { + return anthropicResponse; + } + + const openAIResponse = { + id: anthropicResponse.id || `chatcmpl-${Date.now()}`, + object: 'chat.completion', + created: Date.now(), + model: anthropicResponse.model, + choices: [], + }; + + // Convert content blocks to OpenAI format + const content = []; + const toolCalls = []; + + if (Array.isArray(anthropicResponse.content)) { + anthropicResponse.content.forEach((block, index) => { + if (block.type === 'text') { + content.push(block.text); + } else if (block.type === 'tool_use') { + toolCalls.push({ + id: block.id, + type: 'function', + function: { + name: block.name, + arguments: JSON.stringify(block.input), + }, + index, + }); + } + }); + } + + const message = { + role: 'assistant', + content: content.length > 0 ? content.join('') : null, + }; + + if (toolCalls.length > 0) { + message.tool_calls = toolCalls; + } + + openAIResponse.choices.push({ + index: 0, + message, + finish_reason: anthropicResponse.stop_reason || 'stop', + }); + + // Add usage information + if (anthropicResponse.usage) { + openAIResponse.usage = { + prompt_tokens: anthropicResponse.usage.input_tokens || 0, + completion_tokens: anthropicResponse.usage.output_tokens || 0, + total_tokens: (anthropicResponse.usage.input_tokens || 0) + (anthropicResponse.usage.output_tokens || 0), + }; + } + + return openAIResponse; + } + + /** + * Convert Anthropic streaming event to OpenAI streaming format + */ + convertAnthropicStreamToOpenAI(event) { + if (!event || typeof event !== 'object') { + return null; + } + + // Handle different event types + switch (event.type) { + case 'message_start': + return { + id: event.message?.id || `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.message?.model, + choices: [ + { + index: 0, + delta: { role: 'assistant', content: '' }, + finish_reason: null, + }, + ], + }; + + case 'content_block_start': + if (event.content_block?.type === 'tool_use') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { + tool_calls: [ + { + index: event.index || 0, + id: event.content_block.id, + type: 'function', + function: { + name: event.content_block.name, + arguments: '', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + } + return null; + + case 'content_block_delta': + if (event.delta?.type === 'text_delta') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { content: event.delta.text }, + finish_reason: null, + }, + ], + }; + } + if (event.delta?.type === 'input_json_delta') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { + tool_calls: [ + { + index: event.index || 0, + function: { + arguments: event.delta.partial_json, + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + } + return null; + + case 'content_block_stop': + return null; // No equivalent in OpenAI streaming + + case 'message_delta': + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: 0, + delta: {}, + finish_reason: event.delta?.stop_reason || null, + }, + ], + usage: event.usage + ? { + prompt_tokens: 0, + completion_tokens: event.usage.output_tokens || 0, + total_tokens: event.usage.output_tokens || 0, + } + : undefined, + }; + + case 'message_stop': + return '[DONE]'; + + case 'ping': + return null; // Ignore ping events + + default: + return null; + } + } +} diff --git a/backend/src/lib/providers/anthropicProvider.js b/backend/src/lib/providers/anthropicProvider.js index 3821918c..b6e4b23a 100644 --- a/backend/src/lib/providers/anthropicProvider.js +++ b/backend/src/lib/providers/anthropicProvider.js @@ -1,36 +1,226 @@ +import { Readable } from 'node:stream'; +import { logUpstreamRequest, logUpstreamResponse, teeStreamWithPreview } from '../logging/upstreamLogger.js'; import { BaseProvider } from './baseProvider.js'; +import { MessagesAdapter } from '../adapters/messagesAdapter.js'; +import { logger } from '../../logger.js'; + +const FALLBACK_MODEL = 'claude-3-5-sonnet-20241022'; +const ANTHROPIC_API_VERSION = '2023-06-01'; + +function wrapStreamingResponse(response) { + if (!response || !response.body) return response; + // If the body already exposes Node stream semantics, nothing to do. + if (typeof response.body.on === 'function') { + return response; + } + + // Convert WHATWG ReadableStream to Node.js Readable to satisfy existing consumers. + const canConvert = typeof Readable?.fromWeb === 'function' && typeof response.body.getReader === 'function'; + if (!canConvert) return response; + + let nodeReadable; + return new Proxy(response, { + get(target, prop, receiver) { + if (prop === 'body') { + if (!nodeReadable) { + nodeReadable = Readable.fromWeb(target.body); + } + return nodeReadable; + } + return Reflect.get(target, prop, receiver); + }, + }); +} export class AnthropicProvider extends BaseProvider { - isConfigured() { - // TODO: check for required Anthropic credentials. + createAdapter() { + return new MessagesAdapter({ + config: this.config, + settings: this.settings, + getDefaultModel: () => this.getDefaultModel(), + }); + } + + buildAdapterContext(context = {}) { + return { + getDefaultModel: () => this.getDefaultModel(), + ...context, + }; + } + + get apiKey() { + return this.settings?.apiKey + || this.config?.providerConfig?.apiKey + || this.config?.anthropicApiKey; + } + + get baseUrl() { + const seededDefaultUrl = 'https://api.anthropic.com'; + const dbBaseUrl = this.settings?.baseUrl; + const overrideBaseUrl = this.config?.providerConfig?.baseUrl || this.config?.anthropicBaseUrl; + const shouldPreferOverride = Boolean(overrideBaseUrl) + && (!dbBaseUrl || dbBaseUrl === seededDefaultUrl); + const configuredBase = shouldPreferOverride + ? overrideBaseUrl + : dbBaseUrl || overrideBaseUrl || seededDefaultUrl; + return String(configuredBase).replace(/\/$/, ''); } - normalizeRequest(_internalRequest) { - // TODO: adapt internal request into Anthropic Messages payload. + get defaultHeaders() { + return { + ...(this.config?.providerConfig?.headers || {}), + ...(this.settings?.headers || {}), + }; } - async sendRequest(_normalizedRequest) { - // TODO: issue HTTP request to Anthropic endpoint. + get httpClient() { + if (this.http) return this.http; + if (typeof globalThis.fetch === 'function') return globalThis.fetch.bind(globalThis); + return null; } - normalizeResponse(_upstreamResponse) { - // TODO: translate Anthropic response back into internal response format. + isConfigured() { + return Boolean(this.apiKey || this.defaultHeaders['x-api-key']); } - normalizeStreamChunk(_chunk) { - // TODO: translate Anthropic streaming chunk to internal chunk structure. + async makeHttpRequest(translatedRequest) { + const client = this.httpClient; + if (!client) { + throw new Error('No HTTP client available for Anthropic provider'); + } + + const endpoint = '/v1/messages'; + const url = `${this.baseUrl}${endpoint}`; + const headers = { + 'Content-Type': 'application/json', + 'anthropic-version': ANTHROPIC_API_VERSION, + ...(translatedRequest?.stream ? { Accept: 'text/event-stream' } : { Accept: 'application/json' }), + ...this.defaultHeaders, + }; + + if (this.apiKey && !headers['x-api-key']) { + headers['x-api-key'] = this.apiKey; + } + + // Log the exact upstream request for debugging + try { + logUpstreamRequest({ url, headers, body: translatedRequest }); + } catch (err) { + logger.error('Failed to log upstream request:', err?.message || err); + } + + const response = await client(url, { + method: 'POST', + headers, + body: JSON.stringify(translatedRequest), + }); + + // Log the upstream response for debugging + try { + const responseHeaders = response.headers && typeof response.headers.entries === 'function' + ? Object.fromEntries(response.headers.entries()) + : {}; + + // Check if response is actually a stream by inspecting content-type + const contentType = response.headers?.get?.('content-type') || ''; + const isActuallyStreaming = contentType.includes('text/event-stream') || contentType.includes('text/plain'); + + if (translatedRequest?.stream && isActuallyStreaming) { + // For streaming responses, tee the stream to capture SSE data + const wrappedResponse = wrapStreamingResponse(response); + const { previewPromise, stream: loggedStream } = teeStreamWithPreview(wrappedResponse.body, { + maxBytes: 128 * 1024, // Capture up to 128KB of SSE data + encoding: 'utf8' + }); + + // Log asynchronously without blocking the response + previewPromise.then((preview) => { + logUpstreamResponse({ + url, + status: response.status, + headers: responseHeaders, + body: preview + }); + }).catch((err) => { + logger.error('Failed to capture streaming response preview:', err?.message || err); + }); + + // Return response with the logged stream + return new Proxy(wrappedResponse, { + get(target, prop, receiver) { + if (prop === 'body') { + return loggedStream; + } + return Reflect.get(target, prop, receiver); + }, + }); + } else { + // For non-streaming responses, capture the body + let responseBody = null; + if (response.clone) { + const responseClone = response.clone(); + if (typeof responseClone.text === 'function') { + responseBody = await responseClone.text(); + } + } + logUpstreamResponse({ + url, + status: response.status, + headers: responseHeaders, + body: responseBody + }); + return response; + } + } catch (err) { + // logger should be best-effort; don't let logging break responses + logger.error('Failed to log upstream response:', err?.message || err); + if (translatedRequest?.stream) { + return wrapStreamingResponse(response); + } + return response; + } } - getToolsetSpec(_toolRegistry) { - // TODO: emit Anthropic tool schema when the provider supports it. + getToolsetSpec(toolRegistry) { + if (!toolRegistry) return []; + if (Array.isArray(toolRegistry)) { + // Convert OpenAI tool specs to Anthropic format + return toolRegistry.map((tool) => { + if (typeof tool === 'string') { + return { + name: tool, + description: '', + input_schema: { type: 'object', properties: {} }, + }; + } + const fn = tool.function || tool; + return { + name: fn.name, + description: fn.description || '', + input_schema: fn.parameters || fn.input_schema || { type: 'object', properties: {} }, + }; + }); + } + if (typeof toolRegistry.generateOpenAIToolSpecs === 'function') { + const openAISpecs = toolRegistry.generateOpenAIToolSpecs(); + return this.getToolsetSpec(openAISpecs); + } + if (typeof toolRegistry.generateToolSpecs === 'function') { + const specs = toolRegistry.generateToolSpecs(); + return this.getToolsetSpec(specs); + } + return []; } supportsTools() { - // TODO: report Anthropic tool support by model. + return true; } supportsReasoningControls(_model) { - // TODO: report reasoning control availability for Anthropic. + // Anthropic models like Claude 3.5 Sonnet support extended thinking + // but it uses a different mechanism than OpenAI's reasoning controls + // For now, return false to avoid confusion + return false; } supportsPromptCaching() { @@ -40,6 +230,8 @@ export class AnthropicProvider extends BaseProvider { } getDefaultModel() { - // TODO: determine Anthropic default model from configuration. + return this.settings?.defaultModel + || this.config?.defaultModel + || FALLBACK_MODEL; } } From cfe99b58728c8496955cb2879697c0dfe31678d5 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 23 Nov 2025 10:32:55 +0000 Subject: [PATCH 2/8] test: add comprehensive MessagesAdapter tests Add unit tests for the MessagesAdapter covering: - OpenAI to Anthropic message format conversion - System message extraction and combination - Tool specification conversion - Tool use and tool result handling - Multimodal content conversion - Streaming event translation - Default model handling - Error cases All 15 tests pass successfully. --- backend/__tests__/messages_adapter.test.js | 304 +++++++++++++++++++++ 1 file changed, 304 insertions(+) create mode 100644 backend/__tests__/messages_adapter.test.js diff --git a/backend/__tests__/messages_adapter.test.js b/backend/__tests__/messages_adapter.test.js new file mode 100644 index 00000000..7a0848aa --- /dev/null +++ b/backend/__tests__/messages_adapter.test.js @@ -0,0 +1,304 @@ +import { describe, test, expect } from '@jest/globals'; +import { MessagesAdapter } from '../src/lib/adapters/messagesAdapter.js'; + +describe('MessagesAdapter', () => { + let adapter; + + beforeEach(() => { + adapter = new MessagesAdapter({ + config: {}, + settings: {}, + getDefaultModel: () => 'claude-3-5-sonnet-20241022', + }); + }); + + describe('translateRequest', () => { + test('converts OpenAI format to Anthropic format', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'Hello!' }, + ], + max_tokens: 1000, + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.system).toBe('You are a helpful assistant.'); + expect(result.messages).toHaveLength(1); + expect(result.messages[0]).toEqual({ role: 'user', content: 'Hello!' }); + expect(result.max_tokens).toBe(1000); + }); + + test('handles multiple system messages', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'system', content: 'First instruction.' }, + { role: 'system', content: 'Second instruction.' }, + { role: 'user', content: 'Hello!' }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.system).toBe('First instruction.\n\nSecond instruction.'); + expect(result.messages).toHaveLength(1); + }); + + test('converts tool specifications', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'user', content: 'Use a tool' }], + tools: [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get weather', + parameters: { + type: 'object', + properties: { location: { type: 'string' } }, + }, + }, + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.tools).toHaveLength(1); + expect(result.tools[0]).toEqual({ + name: 'get_weather', + description: 'Get weather', + input_schema: { + type: 'object', + properties: { location: { type: 'string' } }, + }, + }); + }); + + test('converts assistant messages with tool calls', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'user', content: 'What is the weather?' }, + { + role: 'assistant', + content: null, + tool_calls: [ + { + id: 'call_123', + type: 'function', + function: { + name: 'get_weather', + arguments: '{"location":"San Francisco"}', + }, + }, + ], + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.messages).toHaveLength(2); + expect(result.messages[1].role).toBe('assistant'); + expect(result.messages[1].content).toEqual([ + { + type: 'tool_use', + id: 'call_123', + name: 'get_weather', + input: { location: 'San Francisco' }, + }, + ]); + }); + + test('converts tool results from OpenAI format', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'user', content: 'What is the weather?' }, + { + role: 'tool', + tool_call_id: 'call_123', + content: 'Sunny, 72°F', + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.messages).toHaveLength(2); + expect(result.messages[1].role).toBe('user'); + expect(result.messages[1].content).toEqual([ + { + type: 'tool_result', + tool_use_id: 'call_123', + content: 'Sunny, 72°F', + }, + ]); + }); + + test('sets default max_tokens if not provided', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'user', content: 'Hello!' }], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.max_tokens).toBe(4096); + }); + + test('uses default model if not specified', async () => { + const internalRequest = { + messages: [{ role: 'user', content: 'Hello!' }], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + }); + + test('throws error if no model and no default', async () => { + const adapterNoDefault = new MessagesAdapter({ + config: {}, + settings: {}, + getDefaultModel: () => undefined, + }); + + const internalRequest = { + messages: [{ role: 'user', content: 'Hello!' }], + }; + + await expect(adapterNoDefault.translateRequest(internalRequest)).rejects.toThrow( + 'Anthropic provider requires a model' + ); + }); + + test('throws error if no non-system messages', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'system', content: 'System only' }], + }; + + await expect(adapter.translateRequest(internalRequest)).rejects.toThrow( + 'Anthropic provider requires at least one non-system message' + ); + }); + }); + + describe('translateResponse', () => { + test('converts Anthropic response to OpenAI format', () => { + const anthropicResponse = { + id: 'msg_123', + type: 'message', + role: 'assistant', + model: 'claude-3-5-sonnet-20241022', + content: [{ type: 'text', text: 'Hello! How can I help?' }], + stop_reason: 'end_turn', + usage: { input_tokens: 10, output_tokens: 20 }, + }; + + const result = adapter.translateResponse(anthropicResponse); + + expect(result.id).toBe('msg_123'); + expect(result.object).toBe('chat.completion'); + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.choices).toHaveLength(1); + expect(result.choices[0].message.role).toBe('assistant'); + expect(result.choices[0].message.content).toBe('Hello! How can I help?'); + expect(result.choices[0].finish_reason).toBe('end_turn'); + expect(result.usage).toEqual({ + prompt_tokens: 10, + completion_tokens: 20, + total_tokens: 30, + }); + }); + + test('converts tool use in response', () => { + const anthropicResponse = { + id: 'msg_123', + type: 'message', + role: 'assistant', + model: 'claude-3-5-sonnet-20241022', + content: [ + { type: 'text', text: 'Let me check that.' }, + { + type: 'tool_use', + id: 'tool_123', + name: 'get_weather', + input: { location: 'San Francisco' }, + }, + ], + stop_reason: 'tool_use', + usage: { input_tokens: 10, output_tokens: 20 }, + }; + + const result = adapter.translateResponse(anthropicResponse); + + expect(result.choices[0].message.content).toBe('Let me check that.'); + expect(result.choices[0].message.tool_calls).toHaveLength(1); + expect(result.choices[0].message.tool_calls[0]).toEqual({ + id: 'tool_123', + type: 'function', + function: { + name: 'get_weather', + arguments: '{"location":"San Francisco"}', + }, + index: 1, + }); + }); + }); + + describe('translateStreamChunk', () => { + test('converts message_start event', () => { + const event = { + type: 'message_start', + message: { + id: 'msg_123', + model: 'claude-3-5-sonnet-20241022', + }, + }; + + const result = adapter.translateStreamChunk(event); + + expect(result.id).toBe('msg_123'); + expect(result.object).toBe('chat.completion.chunk'); + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.choices[0].delta.role).toBe('assistant'); + expect(result.choices[0].delta.content).toBe(''); + }); + + test('converts content_block_delta with text', () => { + const event = { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello' }, + }; + + const result = adapter.translateStreamChunk(event); + + expect(result.choices[0].delta.content).toBe('Hello'); + }); + + test('converts message_stop event', () => { + const event = { type: 'message_stop' }; + + const result = adapter.translateStreamChunk(event); + + expect(result).toBe('[DONE]'); + }); + + test('returns null for ping events', () => { + const event = { type: 'ping' }; + + const result = adapter.translateStreamChunk(event); + + expect(result).toBeNull(); + }); + }); +}); From f4c6fccf0f76786e57c8cbe41d0737535b716d45 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 23 Nov 2025 10:35:34 +0000 Subject: [PATCH 3/8] feat: add Anthropic provider type to Settings Modal Add UI support for Anthropic provider configuration: - Add 'Anthropic' option to Provider Type dropdown - Conditionally hide Base URL field for non-OpenAI providers - Update helper text based on selected provider type - Add provider-specific API key placeholder (sk-ant-api03-... for Anthropic) The Base URL field now only appears when 'OpenAI Compatible' is selected, as Anthropic provider uses a fixed endpoint (https://api.anthropic.com). --- frontend/components/SettingsModal.tsx | 53 +++++++++++++++------------ 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/frontend/components/SettingsModal.tsx b/frontend/components/SettingsModal.tsx index b2ccb0be..f5536cdb 100644 --- a/frontend/components/SettingsModal.tsx +++ b/frontend/components/SettingsModal.tsx @@ -688,34 +688,39 @@ export default function SettingsModal({ open, onClose, onProvidersChanged }: Set required > +

- Compatible with OpenAI API format (ChatGPT, Claude, most providers) + {form.provider_type === 'anthropic' + ? 'Native Anthropic Claude API support with Messages API' + : 'Compatible with OpenAI API format (ChatGPT, Claude, most providers)'}

-
- - setForm((f) => ({ ...f, base_url: e.target.value }))} - placeholder="https://api.openai.com/v1 (auto-filled if empty)" - /> -

- Custom API endpoint. Leave empty for OpenAI's default endpoint. -

-
+ {form.provider_type === 'openai' && ( +
+ + setForm((f) => ({ ...f, base_url: e.target.value }))} + placeholder="https://api.openai.com/v1 (auto-filled if empty)" + /> +

+ Custom API endpoint. Leave empty for OpenAI's default endpoint. +

+
+ )}