diff --git a/README.md b/README.md index 91750832..169a6eab 100644 --- a/README.md +++ b/README.md @@ -227,6 +227,8 @@ PROVIDER_BASE_URL= # Custom provider base URL PROVIDER_API_KEY= # Generic provider API key PROVIDER_CUSTOM_HEADERS={} # Custom headers as JSON OPENAI_BASE_URL=https://api.openai.com/v1 # OpenAI base URL +ANTHROPIC_BASE_URL=https://api.anthropic.com # Anthropic base URL +ANTHROPIC_API_KEY= # Anthropic API key (if different from PROVIDER_API_KEY) ``` **Optional - Tool Configuration:** diff --git a/backend/__tests__/messages_adapter.test.js b/backend/__tests__/messages_adapter.test.js new file mode 100644 index 00000000..e5612f14 --- /dev/null +++ b/backend/__tests__/messages_adapter.test.js @@ -0,0 +1,304 @@ +import { describe, test, expect } from '@jest/globals'; +import { MessagesAdapter } from '../src/lib/adapters/messagesAdapter.js'; + +describe('MessagesAdapter', () => { + let adapter; + + beforeEach(() => { + adapter = new MessagesAdapter({ + config: {}, + settings: {}, + getDefaultModel: () => 'claude-3-5-sonnet-20241022', + }); + }); + + describe('translateRequest', () => { + test('converts OpenAI format to Anthropic format', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'Hello!' }, + ], + max_tokens: 1000, + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.system).toBe('You are a helpful assistant.'); + expect(result.messages).toHaveLength(1); + expect(result.messages[0]).toEqual({ role: 'user', content: 'Hello!' }); + expect(result.max_tokens).toBe(1000); + }); + + test('handles multiple system messages', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'system', content: 'First instruction.' }, + { role: 'system', content: 'Second instruction.' }, + { role: 'user', content: 'Hello!' }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.system).toBe('First instruction.\n\nSecond instruction.'); + expect(result.messages).toHaveLength(1); + }); + + test('converts tool specifications', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'user', content: 'Use a tool' }], + tools: [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get weather', + parameters: { + type: 'object', + properties: { location: { type: 'string' } }, + }, + }, + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.tools).toHaveLength(1); + expect(result.tools[0]).toEqual({ + name: 'get_weather', + description: 'Get weather', + input_schema: { + type: 'object', + properties: { location: { type: 'string' } }, + }, + }); + }); + + test('converts assistant messages with tool calls', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'user', content: 'What is the weather?' }, + { + role: 'assistant', + content: null, + tool_calls: [ + { + id: 'call_123', + type: 'function', + function: { + name: 'get_weather', + arguments: '{"location":"San Francisco"}', + }, + }, + ], + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.messages).toHaveLength(2); + expect(result.messages[1].role).toBe('assistant'); + expect(result.messages[1].content).toEqual([ + { + type: 'tool_use', + id: 'call_123', + name: 'get_weather', + input: { location: 'San Francisco' }, + }, + ]); + }); + + test('converts tool results from OpenAI format', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [ + { role: 'user', content: 'What is the weather?' }, + { + role: 'tool', + tool_call_id: 'call_123', + content: 'Sunny, 72°F', + }, + ], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.messages).toHaveLength(2); + expect(result.messages[1].role).toBe('user'); + expect(result.messages[1].content).toEqual([ + { + type: 'tool_result', + tool_use_id: 'call_123', + content: 'Sunny, 72°F', + }, + ]); + }); + + test('sets default max_tokens if not provided', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'user', content: 'Hello!' }], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.max_tokens).toBe(4096); + }); + + test('uses default model if not specified', async () => { + const internalRequest = { + messages: [{ role: 'user', content: 'Hello!' }], + }; + + const result = await adapter.translateRequest(internalRequest); + + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + }); + + test('throws error if no model and no default', async () => { + const adapterNoDefault = new MessagesAdapter({ + config: {}, + settings: {}, + getDefaultModel: () => undefined, + }); + + const internalRequest = { + messages: [{ role: 'user', content: 'Hello!' }], + }; + + await expect(adapterNoDefault.translateRequest(internalRequest)).rejects.toThrow( + 'Anthropic provider requires a model' + ); + }); + + test('throws error if no non-system messages', async () => { + const internalRequest = { + model: 'claude-3-5-sonnet-20241022', + messages: [{ role: 'system', content: 'System only' }], + }; + + await expect(adapter.translateRequest(internalRequest)).rejects.toThrow( + 'Anthropic provider requires at least one non-system message' + ); + }); + }); + + describe('translateResponse', () => { + test('converts Anthropic response to OpenAI format', async () => { + const anthropicResponse = { + id: 'msg_123', + type: 'message', + role: 'assistant', + model: 'claude-3-5-sonnet-20241022', + content: [{ type: 'text', text: 'Hello! How can I help?' }], + stop_reason: 'end_turn', + usage: { input_tokens: 10, output_tokens: 20 }, + }; + + const result = await adapter.translateResponse(anthropicResponse); + + expect(result.id).toBe('msg_123'); + expect(result.object).toBe('chat.completion'); + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.choices).toHaveLength(1); + expect(result.choices[0].message.role).toBe('assistant'); + expect(result.choices[0].message.content).toBe('Hello! How can I help?'); + expect(result.choices[0].finish_reason).toBe('stop'); + expect(result.usage).toEqual({ + prompt_tokens: 10, + completion_tokens: 20, + total_tokens: 30, + }); + }); + + test('converts tool use in response', async () => { + const anthropicResponse = { + id: 'msg_123', + type: 'message', + role: 'assistant', + model: 'claude-3-5-sonnet-20241022', + content: [ + { type: 'text', text: 'Let me check that.' }, + { + type: 'tool_use', + id: 'tool_123', + name: 'get_weather', + input: { location: 'San Francisco' }, + }, + ], + stop_reason: 'tool_use', + usage: { input_tokens: 10, output_tokens: 20 }, + }; + + const result = await adapter.translateResponse(anthropicResponse); + + expect(result.choices[0].message.content).toBe('Let me check that.'); + expect(result.choices[0].message.tool_calls).toHaveLength(1); + expect(result.choices[0].message.tool_calls[0]).toEqual({ + id: 'tool_123', + type: 'function', + function: { + name: 'get_weather', + arguments: '{"location":"San Francisco"}', + }, + index: 1, + }); + }); + }); + + describe('translateStreamChunk', () => { + test('converts message_start event', () => { + const event = { + type: 'message_start', + message: { + id: 'msg_123', + model: 'claude-3-5-sonnet-20241022', + }, + }; + + const result = adapter.translateStreamChunk(event); + + expect(result.id).toBe('msg_123'); + expect(result.object).toBe('chat.completion.chunk'); + expect(result.model).toBe('claude-3-5-sonnet-20241022'); + expect(result.choices[0].delta.role).toBe('assistant'); + expect(result.choices[0].delta.content).toBe(''); + }); + + test('converts content_block_delta with text', () => { + const event = { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello' }, + }; + + const result = adapter.translateStreamChunk(event); + + expect(result.choices[0].delta.content).toBe('Hello'); + }); + + test('converts message_stop event', () => { + const event = { type: 'message_stop' }; + + const result = adapter.translateStreamChunk(event); + + expect(result).toBe('[DONE]'); + }); + + test('returns null for ping events', () => { + const event = { type: 'ping' }; + + const result = adapter.translateStreamChunk(event); + + expect(result).toBeNull(); + }); + }); +}); diff --git a/backend/__tests__/providers.test.js b/backend/__tests__/providers.test.js index 5410653d..a8c84831 100644 --- a/backend/__tests__/providers.test.js +++ b/backend/__tests__/providers.test.js @@ -6,6 +6,7 @@ import { jest } from '@jest/globals'; import { randomUUID } from 'crypto'; import { config } from '../src/env.js'; import { getDb, resetDbCache } from '../src/db/index.js'; +import { createProvider } from '../src/lib/providers/index.js'; import { generateAccessToken } from '../src/middleware/auth.js'; import { safeTestSetup } from '../test_support/databaseSafety.js'; @@ -15,10 +16,12 @@ const insertTestUser = () => { const now = new Date().toISOString(); const email = `provider-test-${now}@example.com`; - db.prepare(` + db.prepare( + ` INSERT INTO users (id, email, password_hash, display_name, created_at, updated_at, email_verified, last_login_at, deleted_at) VALUES (@id, @email, 'test-hash', 'Provider Tester', @now, @now, 1, NULL, NULL) - `).run({ id, email, now }); + ` + ).run({ id, email, now }); return { id, email, displayName: 'Provider Tester' }; }; @@ -87,16 +90,14 @@ describe('Providers CRUD', () => { assert.ok(Array.isArray(body.providers)); // Create provider - res = await agent - .post('/v1/providers') - .send({ - id: 'p1', - name: 'local', - provider_type: 'openai', - base_url: 'http://example.com', - api_key: 'test', - enabled: true, - }); + res = await agent.post('/v1/providers').send({ + id: 'p1', + name: 'local', + provider_type: 'openai', + base_url: 'http://example.com', + api_key: 'test', + enabled: true, + }); assert.equal(res.status, 201); body = res.body; assert.equal(body.id, 'p1'); @@ -151,8 +152,10 @@ describe('Providers connectivity', () => { // Seed provider const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('p2', @user_id, 'p2','openai','k','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('p2', @user_id, 'p2','openai','k','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.get('/v1/providers/p2/models'); assert.equal(res.status, 200); @@ -195,8 +198,10 @@ describe('Providers connectivity', () => { // Seed provider with key const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('p4', @user_id, 'p4','openai','key123','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('p4', @user_id, 'p4','openai','key123','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.post('/v1/providers/p4/test').send({ base_url: 'http://mock' }); if (res.status !== 200) { @@ -223,8 +228,10 @@ describe('Providers connectivity', () => { // Seed provider const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('p5', @user_id, 'p5','openai','bad-key','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('p5', @user_id, 'p5','openai','bad-key','http://mock',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.get('/v1/providers/p5/models'); assert.equal(res.status, 502); @@ -246,8 +253,10 @@ describe('Providers connectivity', () => { // Seed provider const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('p6', @user_id, 'p6','openai','key','http://unreachable',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('p6', @user_id, 'p6','openai','key','http://unreachable',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.get('/v1/providers/p6/models'); assert.equal(res.status, 502); @@ -259,18 +268,18 @@ describe('Providers connectivity', () => { test('GET /v1/providers/:id/models filters OpenRouter models by creation date', async () => { // Create mock models with different creation dates const now = Math.floor(Date.now() / 1000); - const oneYearAgo = now - (365 * 24 * 60 * 60); - const twoYearsAgo = now - (2 * 365 * 24 * 60 * 60); + const oneYearAgo = now - 365 * 24 * 60 * 60; + const twoYearsAgo = now - 2 * 365 * 24 * 60 * 60; const mockHttp = jest.fn().mockResolvedValueOnce({ ok: true, json: async () => ({ data: [ - { id: 'model-recent', created: now - (30 * 24 * 60 * 60) }, // 30 days old + { id: 'model-recent', created: now - 30 * 24 * 60 * 60 }, // 30 days old { id: 'model-old', created: twoYearsAgo }, // 2 years old (should be filtered) { id: 'model-edge', created: oneYearAgo + 100 }, // Just under 1 year (should be included) { id: 'model-no-date' }, // No created field (should be included) - ] + ], }), }); @@ -281,8 +290,10 @@ describe('Providers connectivity', () => { // Seed OpenRouter provider const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('openrouter', @user_id, 'OpenRouter','openai','key123','https://openrouter.ai/api',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('openrouter', @user_id, 'OpenRouter','openai','key123','https://openrouter.ai/api',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.get('/v1/providers/openrouter/models'); assert.equal(res.status, 200); @@ -299,7 +310,7 @@ describe('Providers connectivity', () => { test('GET /v1/providers/:id/models does not filter non-OpenRouter providers', async () => { const now = Math.floor(Date.now() / 1000); - const twoYearsAgo = now - (2 * 365 * 24 * 60 * 60); + const twoYearsAgo = now - 2 * 365 * 24 * 60 * 60; const mockHttp = jest.fn().mockResolvedValueOnce({ ok: true, @@ -307,7 +318,7 @@ describe('Providers connectivity', () => { data: [ { id: 'model-recent', created: now }, { id: 'model-old', created: twoYearsAgo }, // Should NOT be filtered for non-OpenRouter - ] + ], }), }); @@ -318,8 +329,10 @@ describe('Providers connectivity', () => { // Seed regular OpenAI provider const db = getDb(); db.exec('DELETE FROM providers;'); - db.prepare(`INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) - VALUES ('openai', @user_id, 'OpenAI','openai','key123','https://api.openai.com',1,1,'{}','{}',datetime('now'),datetime('now'))`).run({ user_id: testUser.id }); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('openai', @user_id, 'OpenAI','openai','key123','https://api.openai.com',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); const res = await agent.get('/v1/providers/openai/models'); assert.equal(res.status, 200); @@ -331,4 +344,59 @@ describe('Providers connectivity', () => { assert.ok(body.models.some((m) => m.id === 'model-recent')); assert.ok(body.models.some((m) => m.id === 'model-old')); }); + + test('GET /v1/providers/:id/models uses default Anthropic base URL when not provided', async () => { + const mockHttp = jest.fn().mockResolvedValueOnce({ + ok: true, + json: async () => ({ data: [{ id: 'claude-3-5-sonnet' }] }), + }); + + const { createProvidersRouter } = await import('../src/routes/providers.js'); + const app = makeApp(createProvidersRouter({ http: mockHttp })); + const agent = request(app); + + // Seed Anthropic provider without base_url + const db = getDb(); + db.exec('DELETE FROM providers;'); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('anthropic-provider', @user_id, 'Anthropic','anthropic','sk-ant-123',NULL,1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); + + const res = await agent.get('/v1/providers/anthropic-provider/models'); + assert.equal(res.status, 200); + + // Verify the mock was called with the correct Anthropic base URL and auth header + assert.equal(mockHttp.mock.calls.length, 1); + const [url, options] = mockHttp.mock.calls[0]; + assert.ok(url.includes('https://api.anthropic.com'), `Expected Anthropic base URL, got: ${url}`); + assert.equal(options.headers['x-api-key'], 'sk-ant-123', 'Expected x-api-key header for Anthropic'); + assert.ok(!options.headers.Authorization, 'Should not use Authorization header for Anthropic'); + }); +}); + +describe('Anthropic provider base URL resolution', () => { + test('falls back to Anthropic default when base_url is null', async () => { + const db = getDb(); + db.exec('DELETE FROM providers;'); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('anth-null', @user_id, 'Anth Default','anthropic','sk-ant-123',NULL,1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); + + const provider = await createProvider(config, { providerId: 'anth-null' }); + assert.equal(provider.baseUrl, 'https://api.anthropic.com'); + }); + + test('ignores persisted OpenAI base URLs for Anthropic providers', async () => { + const db = getDb(); + db.exec('DELETE FROM providers;'); + db.prepare( + `INSERT INTO providers (id, user_id, name, provider_type, api_key, base_url, enabled, is_default, extra_headers, metadata, created_at, updated_at) + VALUES ('anth-openai', @user_id, 'Anth With OpenAI','anthropic','sk-ant-123','https://api.openai.com/v1',1,1,'{}','{}',datetime('now'),datetime('now'))` + ).run({ user_id: testUser.id }); + + const provider = await createProvider(config, { providerId: 'anth-openai' }); + assert.equal(provider.baseUrl, 'https://api.anthropic.com'); + }); }); diff --git a/backend/src/db/providers.js b/backend/src/db/providers.js index 8efe52b2..20abb7d7 100644 --- a/backend/src/db/providers.js +++ b/backend/src/db/providers.js @@ -306,7 +306,17 @@ export function createDefaultProviders(userId) { const provider = createProvider(providerConfig); createdProviders.push(provider); } catch (error) { - logger.warn(`Failed to create default provider ${providerConfig.name} for user ${userId}:`, error.message); + logger.warn( + { + err: error, + userId, + providerId: providerConfig.id, + providerName: providerConfig.name, + providerType: providerConfig.provider_type, + baseUrl: providerConfig.base_url, + }, + '[providers#createDefaultProviders] Failed to create default provider' + ); // Continue with other providers even if one fails } } diff --git a/backend/src/db/seeders/000-env-provider.js b/backend/src/db/seeders/000-env-provider.js index a76588cd..80dd543a 100644 --- a/backend/src/db/seeders/000-env-provider.js +++ b/backend/src/db/seeders/000-env-provider.js @@ -16,8 +16,14 @@ export default function seedProviderFromEnv(db, options = {}) { if (existing > 0) return; const providerType = (config.provider || 'openai').toLowerCase(); - const baseUrl = config?.providerConfig?.baseUrl || config?.openaiBaseUrl || null; - const apiKey = config?.providerConfig?.apiKey || config?.openaiApiKey || null; + const defaultBaseUrl = + providerType === 'anthropic' ? config?.anthropicBaseUrl || 'https://api.anthropic.com' : config?.openaiBaseUrl; + const resolvedBaseUrl = config?.providerConfig?.baseUrl || defaultBaseUrl || null; + const baseUrl = resolvedBaseUrl ? String(resolvedBaseUrl).replace(/\/$/, '').replace(/\/v1$/, '') : null; + const apiKey = + providerType === 'anthropic' + ? config?.anthropicApiKey || config?.providerConfig?.apiKey || config?.openaiApiKey || null + : config?.providerConfig?.apiKey || config?.openaiApiKey || null; const headersObj = config?.providerConfig?.headers || {}; if (!apiKey && !baseUrl) return; diff --git a/backend/src/db/users.js b/backend/src/db/users.js index 15cd8810..070c1bcd 100644 --- a/backend/src/db/users.js +++ b/backend/src/db/users.js @@ -38,7 +38,14 @@ export function createUser({ email, passwordHash, displayName }) { try { createDefaultProviders(id); } catch (error) { - logger.warn(`Failed to create default providers for user ${id}:`, error.message); + logger.warn( + { + err: error, + userId: id, + email, + }, + '[users#createUser] Failed to create default providers' + ); // Don't fail user creation if provider creation fails } diff --git a/backend/src/env.js b/backend/src/env.js index aedca7cc..c34be5b4 100644 --- a/backend/src/env.js +++ b/backend/src/env.js @@ -34,6 +34,9 @@ export const config = { // Backward-compat: legacy OpenAI fields still present openaiBaseUrl: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1', openaiApiKey: process.env.OPENAI_API_KEY, + // Anthropic provider overrides + anthropicBaseUrl: process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com', + anthropicApiKey: process.env.ANTHROPIC_API_KEY, // Generic provider config; falls back to OpenAI values providerConfig: { baseUrl: process.env.PROVIDER_BASE_URL || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1', @@ -62,10 +65,8 @@ export const config = { persistence: { enabled: bool(process.env.PERSIST_TRANSCRIPTS, true), dbUrl: process.env.DB_URL || '', - maxConversationsPerSession: - Number(process.env.MAX_CONVERSATIONS_PER_SESSION) || 100, - maxMessagesPerConversation: - Number(process.env.MAX_MESSAGES_PER_CONVERSATION) || 1000, + maxConversationsPerSession: Number(process.env.MAX_CONVERSATIONS_PER_SESSION) || 100, + maxMessagesPerConversation: Number(process.env.MAX_MESSAGES_PER_CONVERSATION) || 1000, historyBatchFlushMs: Number(process.env.HISTORY_BATCH_FLUSH_MS) || 250, retentionDays: Number(process.env.RETENTION_DAYS) || 30, }, diff --git a/backend/src/lib/adapters/messagesAdapter.js b/backend/src/lib/adapters/messagesAdapter.js new file mode 100644 index 00000000..3343444f --- /dev/null +++ b/backend/src/lib/adapters/messagesAdapter.js @@ -0,0 +1,561 @@ +import { BaseAdapter } from './baseAdapter.js'; +import { convertContentPartImage } from '../localImageEncoder.js'; + +const ANTHROPIC_ALLOWED_REQUEST_KEYS = new Set([ + 'max_tokens', + 'metadata', + 'stop_sequences', + 'stream', + 'temperature', + 'tool_choice', + 'top_k', + 'top_p', +]); + +const RESERVED_INTERNAL_KEYS = new Set([ + 'conversation_id', + 'provider_id', + 'provider', + 'streamingEnabled', + 'toolsEnabled', + 'qualityLevel', + 'researchMode', + 'systemPrompt', + 'system_prompt', + 'previous_response_id', +]); + +/** + * Convert OpenAI-style message format to Anthropic Messages API format + */ +async function normalizeMessageForAnthropic(message) { + if (!message || typeof message !== 'object') return null; + + const role = typeof message.role === 'string' ? message.role : undefined; + if (!role) return null; + + // Anthropic doesn't support 'system' role in messages array + // System messages should be extracted to top-level system parameter + if (role === 'system') return null; + + const normalized = { role: role === 'assistant' ? 'assistant' : 'user' }; + + if ('content' in message) { + const content = message.content; + if (Array.isArray(content)) { + // Convert multimodal content + const convertedParts = await Promise.all(content.map(async (part) => { + if (typeof part === 'string') { + return { type: 'text', text: part }; + } + if (part?.type === 'text') { + return { type: 'text', text: part.text }; + } + if (part?.type === 'image_url') { + // Convert OpenAI image format to Anthropic format + const converted = await convertContentPartImage(part); + if (converted?.type === 'image_url') { + const url = converted.image_url?.url || converted.image_url; + // Extract base64 data if present + if (typeof url === 'string' && url.startsWith('data:image/')) { + const matches = url.match(/^data:image\/(\w+);base64,(.+)$/); + if (matches) { + const [, mediaType, data] = matches; + return { + type: 'image', + source: { + type: 'base64', + media_type: `image/${mediaType}`, + data, + }, + }; + } + } + } + return null; + } + if (part?.type === 'tool_result') { + // Anthropic tool result format + return part; + } + if (part?.type === 'tool_use') { + // Anthropic tool use format + return part; + } + return null; + })); + normalized.content = convertedParts.filter(Boolean); + } else if (typeof content === 'string') { + normalized.content = content; + } else if (content === null) { + normalized.content = ''; + } + } + + // Handle tool calls from OpenAI format + if (Array.isArray(message.tool_calls) && message.tool_calls.length > 0) { + const toolUseBlocks = message.tool_calls.map((toolCall) => { + const fn = toolCall.function || {}; + let input = {}; + try { + input = typeof fn.arguments === 'string' ? JSON.parse(fn.arguments) : fn.arguments || {}; + } catch { + input = {}; + } + return { + type: 'tool_use', + id: toolCall.id || `tool_${Date.now()}`, + name: fn.name, + input, + }; + }); + + // If content is empty, initialize it as array + if (!normalized.content) { + normalized.content = toolUseBlocks; + } else if (Array.isArray(normalized.content)) { + normalized.content.push(...toolUseBlocks); + } else { + // Convert string content to array and add tool use blocks + normalized.content = [ + { type: 'text', text: normalized.content }, + ...toolUseBlocks, + ]; + } + } + + // Handle tool results from OpenAI format (tool role) + if (role === 'tool' && message.tool_call_id && message.content) { + return { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: message.tool_call_id, + content: message.content, + }, + ], + }; + } + + // Preserve cache_control for prompt caching + if (message.cache_control && typeof message.cache_control === 'object') { + normalized.cache_control = message.cache_control; + } + + return normalized; +} + +async function normalizeMessagesForAnthropic(messages) { + if (!Array.isArray(messages)) return { system: undefined, messages: [] }; + + // Extract system messages + const systemMessages = messages.filter((m) => m.role === 'system'); + const nonSystemMessages = messages.filter((m) => m.role !== 'system'); + + // Combine system messages into a single system prompt + const system = systemMessages.length > 0 + ? systemMessages.map((m) => m.content).filter(Boolean).join('\n\n') + : undefined; + + const normalized = await Promise.all( + nonSystemMessages.map((message) => normalizeMessageForAnthropic(message)) + ); + + return { + system, + messages: normalized.filter(Boolean), + }; +} + +/** + * Convert OpenAI-style tool spec to Anthropic format + */ +function normalizeToolForAnthropic(tool) { + if (!tool) return null; + if (typeof tool === 'string') { + return { + name: tool, + description: '', + input_schema: { type: 'object', properties: {} }, + }; + } + if (typeof tool !== 'object') return null; + + const fn = tool.function || tool; + if (!fn.name) return null; + + return { + name: fn.name, + description: fn.description || '', + input_schema: fn.parameters || fn.input_schema || { type: 'object', properties: {} }, + }; +} + +function normalizeToolsForAnthropic(tools) { + if (!Array.isArray(tools)) return undefined; + const normalized = tools.map(normalizeToolForAnthropic).filter(Boolean); + return normalized.length > 0 ? normalized : undefined; +} + +function omitReservedKeys(payload) { + if (!payload || typeof payload !== 'object') return {}; + const result = {}; + for (const [key, value] of Object.entries(payload)) { + if (RESERVED_INTERNAL_KEYS.has(key)) continue; + result[key] = value; + } + return result; +} + +/** + * Adapter for Anthropic Messages API + */ +export class MessagesAdapter extends BaseAdapter { + constructor(options = {}) { + super(options); + this.getDefaultModel = options.getDefaultModel || (() => undefined); + } + + async translateRequest(internalRequest = {}, context = {}) { + const payload = omitReservedKeys(internalRequest); + + const resolveDefaultModel = context.getDefaultModel || this.getDefaultModel; + const model = payload.model || resolveDefaultModel(); + if (!model) { + throw new Error('Anthropic provider requires a model'); + } + + const { system, messages } = await normalizeMessagesForAnthropic(payload.messages); + + if (messages.length === 0) { + throw new Error('Anthropic provider requires at least one non-system message'); + } + + const normalized = { model, messages }; + + // Add system prompt if present + if (system) { + normalized.system = system; + } + + // Anthropic requires max_tokens + if (payload.max_tokens) { + normalized.max_tokens = payload.max_tokens; + } else { + // Use a sensible default if not provided + normalized.max_tokens = 4096; + } + + if ('stream' in payload) { + normalized.stream = Boolean(payload.stream); + } + + // Handle tools + const tools = normalizeToolsForAnthropic(payload.tools); + if (tools) { + normalized.tools = tools; + if (payload.tool_choice !== undefined) { + // Convert OpenAI tool_choice format to Anthropic format + if (typeof payload.tool_choice === 'string') { + if (payload.tool_choice === 'auto') { + normalized.tool_choice = { type: 'auto' }; + } else if (payload.tool_choice === 'required') { + normalized.tool_choice = { type: 'any' }; + } + } else if (payload.tool_choice?.type === 'function') { + normalized.tool_choice = { + type: 'tool', + name: payload.tool_choice.function?.name, + }; + } + } + } + + // Copy allowed parameters + for (const [key, value] of Object.entries(payload)) { + if (value === undefined) continue; + if (key === 'messages' || key === 'model' || key === 'tools' || key === 'tool_choice' || key === 'stream' || key === 'max_tokens') + continue; + if (ANTHROPIC_ALLOWED_REQUEST_KEYS.has(key)) { + normalized[key] = value; + } + } + + return normalized; + } + + async translateResponse(providerResponse, _context = {}) { + if (!providerResponse) return providerResponse; + + if (typeof providerResponse === 'string') { + try { + const parsed = JSON.parse(providerResponse); + return this.convertAnthropicToOpenAI(parsed); + } catch { + return providerResponse; + } + } + // Handle Response objects from fetch API + if (providerResponse && typeof providerResponse === 'object' && typeof providerResponse.json === 'function') { + try { + // Check if body is already used + if (providerResponse.bodyUsed) { + // If body is used, we can't read it again. + // Assuming it was read elsewhere and we received the parsed object or string. + // If we received the Response object here, it implies we need to read it. + // However, if it's already used, we might be in a tricky spot. + // For now, let's return it as is or try to clone if possible (but clone won't work if used). + return providerResponse; + } + const parsed = await providerResponse.json(); + return this.convertAnthropicToOpenAI(parsed); + } catch { + return providerResponse; + } + } + if (providerResponse && typeof providerResponse === 'object') { + return this.convertAnthropicToOpenAI(providerResponse); + } + return providerResponse; + } + + translateStreamChunk(chunk, _context = {}) { + if (!chunk) return null; + if (typeof chunk === 'string') { + const trimmed = chunk.trim(); + if (!trimmed) return null; + try { + const parsed = JSON.parse(trimmed); + return this.convertAnthropicStreamToOpenAI(parsed); + } catch { + return null; + } + } + if (chunk && typeof chunk === 'object') { + return this.convertAnthropicStreamToOpenAI(chunk); + } + return chunk; + } + + /** + * Map Anthropic stop_reason to OpenAI finish_reason + */ + mapStopReason(stopReason) { + switch (stopReason) { + case 'end_turn': + return 'stop'; + case 'max_tokens': + return 'length'; + case 'tool_use': + return 'tool_calls'; + case 'stop_sequence': + return 'stop'; + default: + return stopReason || null; + } + } + + /** + * Convert Anthropic response to OpenAI format + */ + convertAnthropicToOpenAI(anthropicResponse) { + if (!anthropicResponse || typeof anthropicResponse !== 'object') { + return anthropicResponse; + } + + // Handle error responses + if (anthropicResponse.type === 'error') { + return anthropicResponse; + } + + const openAIResponse = { + id: anthropicResponse.id || `chatcmpl-${Date.now()}`, + object: 'chat.completion', + created: Date.now(), + model: anthropicResponse.model, + choices: [], + }; + + // Convert content blocks to OpenAI format + const content = []; + const toolCalls = []; + + if (Array.isArray(anthropicResponse.content)) { + anthropicResponse.content.forEach((block, index) => { + if (block.type === 'text') { + content.push(block.text); + } else if (block.type === 'tool_use') { + toolCalls.push({ + id: block.id, + type: 'function', + function: { + name: block.name, + arguments: JSON.stringify(block.input), + }, + index, + }); + } + }); + } + + const message = { + role: 'assistant', + content: content.length > 0 ? content.join('') : null, + }; + + if (toolCalls.length > 0) { + message.tool_calls = toolCalls; + } + + openAIResponse.choices.push({ + index: 0, + message, + finish_reason: this.mapStopReason(anthropicResponse.stop_reason), + }); + + // Add usage information + if (anthropicResponse.usage) { + openAIResponse.usage = { + prompt_tokens: anthropicResponse.usage.input_tokens || 0, + completion_tokens: anthropicResponse.usage.output_tokens || 0, + total_tokens: (anthropicResponse.usage.input_tokens || 0) + (anthropicResponse.usage.output_tokens || 0), + }; + } + + return openAIResponse; + } + + /** + * Convert Anthropic streaming event to OpenAI streaming format + */ + convertAnthropicStreamToOpenAI(event) { + if (!event || typeof event !== 'object') { + return null; + } + + // Handle different event types + switch (event.type) { + case 'message_start': + return { + id: event.message?.id || `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.message?.model, + choices: [ + { + index: 0, + delta: { role: 'assistant', content: '' }, + finish_reason: null, + }, + ], + }; + + case 'content_block_start': + if (event.content_block?.type === 'tool_use') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { + tool_calls: [ + { + index: event.index || 0, + id: event.content_block.id, + type: 'function', + function: { + name: event.content_block.name, + arguments: '', + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + } + return null; + + case 'content_block_delta': + if (event.delta?.type === 'text_delta') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { content: event.delta.text }, + finish_reason: null, + }, + ], + }; + } + if (event.delta?.type === 'input_json_delta') { + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: event.index || 0, + delta: { + tool_calls: [ + { + index: event.index || 0, + function: { + arguments: event.delta.partial_json, + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + } + return null; + + case 'content_block_stop': + return null; // No equivalent in OpenAI streaming + + case 'message_delta': + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Date.now(), + model: event.model, + choices: [ + { + index: 0, + delta: {}, + finish_reason: this.mapStopReason(event.delta?.stop_reason), + }, + ], + usage: event.usage + ? { + prompt_tokens: 0, + completion_tokens: event.usage.output_tokens || 0, + total_tokens: event.usage.output_tokens || 0, + } + : undefined, + }; + + case 'message_stop': + return '[DONE]'; + + case 'ping': + return null; // Ignore ping events + + default: + return null; + } + } +} diff --git a/backend/src/lib/openaiProxy.js b/backend/src/lib/openaiProxy.js index 8bac98a7..51d469d4 100644 --- a/backend/src/lib/openaiProxy.js +++ b/backend/src/lib/openaiProxy.js @@ -396,7 +396,7 @@ async function handleRequest(context, req, res) { // Persist the response if (persistence.persist && upstreamJson.choices?.[0]?.message) { - const message = upstreamJson.choices[0].message; + const message = upstreamJson.choices?.[0]?.message; if (message.content !== undefined) { const safeContent = sanitizeContent(message.content); persistence.setAssistantContent(safeContent); @@ -416,7 +416,7 @@ async function handleRequest(context, req, res) { persistence.addToolCalls(toolCallsWithOffset); } - const finishReason = upstreamJson.choices[0].finish_reason || null; + const finishReason = upstreamJson.choices?.[0]?.finish_reason || null; const responseId = upstreamJson.id || null; const reasoningTokens = upstreamJson?.usage?.reasoning_tokens @@ -441,7 +441,7 @@ async function handleRequest(context, req, res) { } // Convert to streaming chunks - const message = upstreamJson.choices[0]?.message; + const message = upstreamJson.choices?.[0]?.message; if (message) { const { createChatCompletionChunk } = await import('./streamUtils.js'); @@ -475,7 +475,7 @@ async function handleRequest(context, req, res) { upstreamJson.id || 'fallback', upstreamJson.model || body.model, {}, - upstreamJson.choices[0].finish_reason || 'stop' + upstreamJson.choices?.[0]?.finish_reason || 'stop' ); writeAndFlush(res, `data: ${JSON.stringify(finalChunk)}\n\n`); } @@ -514,12 +514,11 @@ async function handleRequest(context, req, res) { // Normal streaming response setupStreamingHeaders(res); - return handleRegularStreaming({ config, upstream, res, req, persistence }); + return handleRegularStreaming({ config, upstream, res, req, persistence, provider }); } else { // JSON response (for backward compatibility and when explicitly requested) try { - const upstreamJson = await upstream.json(); - const responseBody = { ...upstreamJson }; + const responseBody = await upstream.json(); addConversationMetadata(responseBody, persistence); return res.status(200).json(responseBody); } catch (err) { diff --git a/backend/src/lib/providers/anthropicProvider.js b/backend/src/lib/providers/anthropicProvider.js index 3821918c..1bbc654b 100644 --- a/backend/src/lib/providers/anthropicProvider.js +++ b/backend/src/lib/providers/anthropicProvider.js @@ -1,36 +1,238 @@ +import { Readable } from 'node:stream'; +import { logUpstreamRequest, logUpstreamResponse, teeStreamWithPreview } from '../logging/upstreamLogger.js'; import { BaseProvider } from './baseProvider.js'; +import { MessagesAdapter } from '../adapters/messagesAdapter.js'; +import { logger } from '../../logger.js'; + +const FALLBACK_MODEL = 'claude-3-5-sonnet-20241022'; +export const ANTHROPIC_API_VERSION = '2023-06-01'; + +function wrapStreamingResponse(response) { + if (!response || !response.body) return response; + // If the body already exposes Node stream semantics, nothing to do. + if (typeof response.body.on === 'function') { + return response; + } + + // Convert WHATWG ReadableStream to Node.js Readable to satisfy existing consumers. + const canConvert = typeof Readable?.fromWeb === 'function' && typeof response.body.getReader === 'function'; + if (!canConvert) return response; + + let nodeReadable; + return new Proxy(response, { + get(target, prop, receiver) { + if (prop === 'body') { + if (!nodeReadable) { + nodeReadable = Readable.fromWeb(target.body); + } + return nodeReadable; + } + return Reflect.get(target, prop, receiver); + }, + }); +} export class AnthropicProvider extends BaseProvider { - isConfigured() { - // TODO: check for required Anthropic credentials. + createAdapter() { + return new MessagesAdapter({ + config: this.config, + settings: this.settings, + getDefaultModel: () => this.getDefaultModel(), + }); } - normalizeRequest(_internalRequest) { - // TODO: adapt internal request into Anthropic Messages payload. + buildAdapterContext(context = {}) { + return { + getDefaultModel: () => this.getDefaultModel(), + ...context, + }; } - async sendRequest(_normalizedRequest) { - // TODO: issue HTTP request to Anthropic endpoint. + get apiKey() { + return this.settings?.apiKey + || this.config?.providerConfig?.apiKey + || this.config?.anthropicApiKey; } - normalizeResponse(_upstreamResponse) { - // TODO: translate Anthropic response back into internal response format. + get baseUrl() { + const seededDefaultUrl = 'https://api.anthropic.com'; + const dbBaseUrl = this.settings?.baseUrl; + const normalizedDbBase = dbBaseUrl ? String(dbBaseUrl).replace(/\/$/, '').replace(/\/v1$/, '') : ''; + const isMismatchedDbBase = normalizedDbBase && /api\.openai\.com/i.test(normalizedDbBase); + const effectiveDbBase = isMismatchedDbBase ? null : normalizedDbBase || null; + + const normalizedAnthropicOverride = this.config?.anthropicBaseUrl + ? String(this.config.anthropicBaseUrl).replace(/\/$/, '').replace(/\/v1$/, '') + : null; + const normalizedProviderOverride = this.config?.providerConfig?.baseUrl + ? String(this.config.providerConfig.baseUrl).replace(/\/$/, '').replace(/\/v1$/, '') + : null; + const providerOverride = + normalizedProviderOverride && /anthropic/i.test(normalizedProviderOverride) ? normalizedProviderOverride : null; + const overrideBaseUrl = normalizedAnthropicOverride || providerOverride; + + const shouldPreferOverride = Boolean(overrideBaseUrl) && (!effectiveDbBase || effectiveDbBase === seededDefaultUrl); + const configuredBase = shouldPreferOverride + ? overrideBaseUrl + : effectiveDbBase || overrideBaseUrl || seededDefaultUrl; + return String(configuredBase).replace(/\/$/, '').replace(/\/v1$/, ''); + } + + get defaultHeaders() { + return { + ...(this.config?.providerConfig?.headers || {}), + ...(this.settings?.headers || {}), + }; } - normalizeStreamChunk(_chunk) { - // TODO: translate Anthropic streaming chunk to internal chunk structure. + get httpClient() { + if (this.http) return this.http; + if (typeof globalThis.fetch === 'function') return globalThis.fetch.bind(globalThis); + return null; } - getToolsetSpec(_toolRegistry) { - // TODO: emit Anthropic tool schema when the provider supports it. + isConfigured() { + return Boolean(this.apiKey || this.defaultHeaders['x-api-key']); + } + + async makeHttpRequest(translatedRequest) { + const client = this.httpClient; + if (!client) { + throw new Error('No HTTP client available for Anthropic provider'); + } + + const endpoint = '/v1/messages'; + const url = `${this.baseUrl}${endpoint}`; + const headers = { + 'Content-Type': 'application/json', + 'anthropic-version': ANTHROPIC_API_VERSION, + ...(translatedRequest?.stream ? { Accept: 'text/event-stream' } : { Accept: 'application/json' }), + ...this.defaultHeaders, + }; + + if (this.apiKey && !headers['x-api-key']) { + headers['x-api-key'] = this.apiKey; + } + + // Log the exact upstream request for debugging + try { + logUpstreamRequest({ url, headers, body: translatedRequest }); + } catch (err) { + logger.error('Failed to log upstream request:', err?.message || err); + } + + const response = await client(url, { + method: 'POST', + headers, + body: JSON.stringify(translatedRequest), + }); + + // Log the upstream response for debugging + try { + const responseHeaders = response.headers && typeof response.headers.entries === 'function' + ? Object.fromEntries(response.headers.entries()) + : {}; + + // Check if response is actually a stream by inspecting content-type + const contentType = response.headers?.get?.('content-type') || ''; + const isActuallyStreaming = contentType.includes('text/event-stream') || contentType.includes('text/plain'); + + if (translatedRequest?.stream && isActuallyStreaming) { + // For streaming responses, tee the stream to capture SSE data + const wrappedResponse = wrapStreamingResponse(response); + const { previewPromise, stream: loggedStream } = teeStreamWithPreview(wrappedResponse.body, { + maxBytes: 128 * 1024, // Capture up to 128KB of SSE data + encoding: 'utf8' + }); + + // Log asynchronously without blocking the response + previewPromise.then((preview) => { + logUpstreamResponse({ + url, + status: response.status, + headers: responseHeaders, + body: preview + }); + }).catch((err) => { + logger.error('Failed to capture streaming response preview:', err?.message || err); + }); + + // Return response with the logged stream + return new Proxy(wrappedResponse, { + get(target, prop, receiver) { + if (prop === 'body') { + return loggedStream; + } + return Reflect.get(target, prop, receiver); + }, + }); + } else { + // For non-streaming responses, capture the body + let responseBody = null; + if (response.clone) { + const responseClone = response.clone(); + if (typeof responseClone.text === 'function') { + responseBody = await responseClone.text(); + } + } + logUpstreamResponse({ + url, + status: response.status, + headers: responseHeaders, + body: responseBody + }); + return response; + } + } catch (err) { + // logger should be best-effort; don't let logging break responses + logger.error('Failed to log upstream response:', err?.message || err); + if (translatedRequest?.stream) { + return wrapStreamingResponse(response); + } + return response; + } + } + + getToolsetSpec(toolRegistry) { + if (!toolRegistry) return []; + if (Array.isArray(toolRegistry)) { + // Convert OpenAI tool specs to Anthropic format + return toolRegistry.map((tool) => { + if (typeof tool === 'string') { + return { + name: tool, + description: '', + input_schema: { type: 'object', properties: {} }, + }; + } + const fn = tool.function || tool; + return { + name: fn.name, + description: fn.description || '', + input_schema: fn.parameters || fn.input_schema || { type: 'object', properties: {} }, + }; + }); + } + if (typeof toolRegistry.generateOpenAIToolSpecs === 'function') { + const openAISpecs = toolRegistry.generateOpenAIToolSpecs(); + return this.getToolsetSpec(openAISpecs); + } + if (typeof toolRegistry.generateToolSpecs === 'function') { + const specs = toolRegistry.generateToolSpecs(); + return this.getToolsetSpec(specs); + } + return []; } supportsTools() { - // TODO: report Anthropic tool support by model. + return true; } supportsReasoningControls(_model) { - // TODO: report reasoning control availability for Anthropic. + // Anthropic models like Claude 3.5 Sonnet support extended thinking + // but it uses a different mechanism than OpenAI's reasoning controls + // For now, return false to avoid confusion + return false; } supportsPromptCaching() { @@ -39,7 +241,13 @@ export class AnthropicProvider extends BaseProvider { return true; } + needsStreamingTranslation() { + return true; + } + getDefaultModel() { - // TODO: determine Anthropic default model from configuration. + return this.settings?.defaultModel + || this.config?.defaultModel + || FALLBACK_MODEL; } } diff --git a/backend/src/lib/providers/baseProvider.js b/backend/src/lib/providers/baseProvider.js index 5f70a3a3..9d86e91f 100644 --- a/backend/src/lib/providers/baseProvider.js +++ b/backend/src/lib/providers/baseProvider.js @@ -34,9 +34,9 @@ export class BaseProvider { return await this.getAdapter().translateRequest(internalRequest, adapterContext); } - translateResponse(providerResponse, context = {}) { + async translateResponse(providerResponse, context = {}) { const adapterContext = this.buildAdapterContext(context); - return this.getAdapter().translateResponse(providerResponse, adapterContext); + return await this.getAdapter().translateResponse(providerResponse, adapterContext); } translateStreamChunk(chunk, context = {}) { @@ -48,14 +48,24 @@ export class BaseProvider { const adapterContext = this.buildAdapterContext(context); const translatedRequest = await this.translateRequest(internalRequest, adapterContext); const providerResponse = await this.makeHttpRequest(translatedRequest, adapterContext); - return this.getAdapter().translateResponse(providerResponse, adapterContext); + return await this.getAdapter().translateResponse(providerResponse, adapterContext); + } + + /** + * Execute a request but return the raw upstream response for callers that + * need full Response semantics (status, headers, body stream). + */ + async sendRawRequest(internalRequest = {}, context = {}) { + const adapterContext = this.buildAdapterContext(context); + const translatedRequest = await this.translateRequest(internalRequest, adapterContext); + return await this.makeHttpRequest(translatedRequest, adapterContext); } async streamRequest(internalRequest = {}, context = {}) { const adapterContext = this.buildAdapterContext(context); const translatedRequest = await this.translateRequest(internalRequest, adapterContext); const providerResponse = await this.makeStreamRequest(translatedRequest, adapterContext); - return this.getAdapter().translateResponse(providerResponse, adapterContext); + return await this.getAdapter().translateResponse(providerResponse, adapterContext); } // Subclasses must provide the HTTP invocation used by sendRequest/streamRequest. @@ -72,8 +82,8 @@ export class BaseProvider { return await this.translateRequest(internalRequest, context); } - normalizeResponse(providerResponse, context = {}) { - return this.translateResponse(providerResponse, context); + async normalizeResponse(providerResponse, context = {}) { + return await this.translateResponse(providerResponse, context); } normalizeStreamChunk(chunk, context = {}) { @@ -100,6 +110,10 @@ export class BaseProvider { return false; } + needsStreamingTranslation() { + return false; + } + getDefaultModel() { // TODO: provide the default model identifier for this provider. return this.config?.defaultModel; diff --git a/backend/src/lib/providers/index.js b/backend/src/lib/providers/index.js index 05364711..a62f0d1b 100644 --- a/backend/src/lib/providers/index.js +++ b/backend/src/lib/providers/index.js @@ -12,6 +12,28 @@ function parseJSONSafe(value, fallback) { } } +function normalizeBaseUrl(url) { + if (!url) return undefined; + return String(url).trim().replace(/\/$/, '').replace(/\/v1$/, ''); +} + +function getProviderDefaults(providerType, config) { + const type = (providerType || config?.provider || 'openai').toLowerCase(); + switch (type) { + case 'anthropic': + return { + baseUrl: config?.anthropicBaseUrl || 'https://api.anthropic.com', + apiKey: config?.anthropicApiKey, + }; + case 'openai': + default: + return { + baseUrl: config?.openaiBaseUrl || config?.providerConfig?.baseUrl || 'https://api.openai.com/v1', + apiKey: config?.openaiApiKey || config?.providerConfig?.apiKey, + }; + } +} + export async function resolveProviderSettings(config, options = {}) { try { const db = getDb(); @@ -42,14 +64,18 @@ export async function resolveProviderSettings(config, options = {}) { const headers = parseJSONSafe(row.extra_headers, {}); const metadata = parseJSONSafe(row.metadata, {}); const responsesApiEnabled = - typeof metadata?.responses_api_enabled === 'boolean' - ? metadata.responses_api_enabled - : undefined; + typeof metadata?.responses_api_enabled === 'boolean' ? metadata.responses_api_enabled : undefined; + const providerType = (row.provider_type || config?.provider || 'openai').toLowerCase(); + const defaults = getProviderDefaults(providerType, config); + const baseUrl = normalizeBaseUrl( + row.base_url || defaults.baseUrl || config?.providerConfig?.baseUrl || config?.openaiBaseUrl + ); + return { source: 'db', - providerType: row.provider_type || (config?.provider || 'openai'), - baseUrl: row.base_url || config?.providerConfig?.baseUrl || config?.openaiBaseUrl, - apiKey: row.api_key || config?.providerConfig?.apiKey || config?.openaiApiKey, + providerType, + baseUrl, + apiKey: row.api_key || defaults.apiKey || config?.providerConfig?.apiKey || config?.openaiApiKey, headers, defaultModel: config?.defaultModel, // Only use config defaultModel, not from metadata responsesApiEnabled, @@ -61,11 +87,15 @@ export async function resolveProviderSettings(config, options = {}) { // TODO: surface diagnostics when provider resolution fails. } + const providerType = config?.provider || 'openai'; + const defaults = getProviderDefaults(providerType, config); + const baseUrl = normalizeBaseUrl(config?.providerConfig?.baseUrl || defaults.baseUrl || config?.openaiBaseUrl); + return { source: 'env', - providerType: (config?.provider || 'openai'), - baseUrl: config?.providerConfig?.baseUrl || config?.openaiBaseUrl, - apiKey: config?.providerConfig?.apiKey || config?.openaiApiKey, + providerType, + baseUrl, + apiKey: config?.providerConfig?.apiKey || defaults.apiKey || config?.openaiApiKey, headers: { ...(config?.providerConfig?.headers || {}) }, defaultModel: config?.defaultModel, responsesApiEnabled: config?.featureFlags?.responsesApiEnabled, @@ -116,5 +146,5 @@ export async function providerChatCompletions(config, requestBody, options = {}) providerId: options.providerId || provider.providerId, ...options.context, }; - return provider.sendRequest(requestBody, context); + return provider.sendRawRequest(requestBody, context); } diff --git a/backend/src/lib/streamUtils.js b/backend/src/lib/streamUtils.js index 0c7f43ca..30526e82 100644 --- a/backend/src/lib/streamUtils.js +++ b/backend/src/lib/streamUtils.js @@ -27,10 +27,51 @@ export function createChatCompletionChunk(id, model, delta, finishReason = null) * @param {Object} requestBody - Request body to send * @returns {Promise} Fetch response promise */ +const TRANSLATED_JSON = Symbol('chatforgeTranslatedJson'); + +function wrapResponseJson(response, provider, context = {}) { + if (!response || typeof response.json !== 'function') return response; + if (response[TRANSLATED_JSON]) return response; + + const needsTranslation = typeof provider?.needsStreamingTranslation === 'function' + ? provider.needsStreamingTranslation() + : false; + + // Only translate successful JSON responses; let error payloads pass through unchanged. + if (!needsTranslation || response.ok === false) { + return response; + } + + const originalJson = response.json.bind(response); + let translatedPromise = null; + + response.json = async () => { + if (!translatedPromise) { + translatedPromise = (async () => { + const raw = await originalJson(); + try { + return await provider.translateResponse(raw, context); + } catch { + return raw; + } + })(); + } + return translatedPromise; + }; + + response[TRANSLATED_JSON] = true; + return response; +} + export async function createOpenAIRequest(config, requestBody, options = {}) { - // Backward-compat shim: delegate to provider registry - const { providerChatCompletions } = await import('./providers/index.js'); - return providerChatCompletions(config, requestBody, options); + const { createProvider } = await import('./providers/index.js'); + const provider = await createProvider(config, options); + const context = { + providerId: options.providerId || provider.providerId, + ...(options.context || {}), + }; + const upstream = await provider.sendRawRequest(requestBody, context); + return wrapResponseJson(upstream, provider, context); } // Optional alias with a more generic name for future call sites diff --git a/backend/src/lib/streamingHandler.js b/backend/src/lib/streamingHandler.js index f1d7487a..3debfb09 100644 --- a/backend/src/lib/streamingHandler.js +++ b/backend/src/lib/streamingHandler.js @@ -76,6 +76,95 @@ function setupStreamEventHandlers({ }); } +/** + * Process a parsed chunk for persistence + */ +function processPersistenceChunk(obj, persistence, toolCallMap, lastFinishReason) { + let finishReason = null; + + // Capture response_id from any chunk + if (obj?.id) { + persistence.setResponseId(obj.id); + } + + const choice = obj?.choices?.[0]; + const delta = choice?.delta; + + if (delta) { + const deltaContent = delta.content; + if (deltaContent !== undefined) { + persistence.appendContent(deltaContent); + } + + const reasoningText = delta.reasoning_content ?? delta.reasoning; + if (reasoningText) { + persistence.appendReasoningText(reasoningText); + } + + if (Array.isArray(delta.reasoning_details) && delta.reasoning_details.length > 0) { + persistence.setReasoningDetails(delta.reasoning_details); + } + + // Capture tool_calls from delta (streaming tool calls) + if (Array.isArray(delta.tool_calls) && delta.tool_calls.length > 0) { + for (const tcDelta of delta.tool_calls) { + const idx = tcDelta.index ?? 0; + const isNewToolCall = !toolCallMap.has(idx); + + const existing = toolCallMap.get(idx) || { + id: tcDelta.id, + type: 'function', + index: idx, + function: { name: '', arguments: '' } + }; + + // Capture textOffset when tool call first appears + if (isNewToolCall && persistence) { + existing.textOffset = persistence.getContentLength(); + } + + if (tcDelta.id) existing.id = tcDelta.id; + if (tcDelta.type) existing.type = tcDelta.type; + if (tcDelta.function?.name) { + existing.function.name = tcDelta.function.name; + } + if (tcDelta.function?.arguments) { + existing.function.arguments += tcDelta.function.arguments; + } + + toolCallMap.set(idx, existing); + } + } + + finishReason = choice?.finish_reason ?? finishReason; + } + + // Capture reasoning_tokens from usage (check both locations) + const reasoningTokens = obj?.usage?.reasoning_tokens + ?? obj?.usage?.completion_tokens_details?.reasoning_tokens + ?? null; + if (reasoningTokens != null) { + persistence.setReasoningTokens(reasoningTokens); + } + + const message = choice?.message; + if (message?.reasoning_details) { + persistence.setReasoningDetails(message.reasoning_details); + } + + // Capture complete tool_calls from message (non-streaming or final) + if (Array.isArray(message?.tool_calls) && message.tool_calls.length > 0) { + for (const toolCall of message.tool_calls) { + const idx = toolCall.index ?? toolCallMap.size; + toolCallMap.set(idx, toolCall); + } + } + + if (finishReason) { + lastFinishReason.value = finishReason; + } +} + /** * Handle regular streaming (non-tool orchestration) * @param {Object} params - Streaming parameters @@ -85,10 +174,11 @@ export async function handleRegularStreaming({ res, req, persistence, + provider, }) { let leftover = ''; + let translationLeftover = ''; let lastFinishReason = { value: null }; - let responseId = null; // Track response_id from chunks let toolCallMap = new Map(); // Accumulate streamed tool calls // Emit conversation metadata upfront if available so clients receive @@ -105,104 +195,48 @@ export async function handleRegularStreaming({ upstream.body.on('data', (chunk) => { try { - // Direct passthrough for Chat Completions API - writeAndFlush(res, chunk); - - // Update persistence buffer if enabled - if (!persistence || !persistence.persist) return; - - leftover = parseSSEStream( - chunk, - leftover, - (obj) => { - let finishReason = null; - - // Capture response_id from any chunk - if (obj?.id && !responseId) { - responseId = obj.id; - if (persistence) persistence.setResponseId(responseId); - } - - const choice = obj?.choices?.[0]; - const delta = choice?.delta; - - if (delta) { - const deltaContent = delta.content; - if (deltaContent !== undefined) { - persistence.appendContent(deltaContent); - } - - const reasoningText = delta.reasoning_content ?? delta.reasoning; - if (reasoningText) { - persistence.appendReasoningText(reasoningText); - } - - if (Array.isArray(delta.reasoning_details) && delta.reasoning_details.length > 0) { - persistence.setReasoningDetails(delta.reasoning_details); - } - - // Capture tool_calls from delta (streaming tool calls) - if (Array.isArray(delta.tool_calls) && delta.tool_calls.length > 0) { - for (const tcDelta of delta.tool_calls) { - const idx = tcDelta.index ?? 0; - const isNewToolCall = !toolCallMap.has(idx); - - const existing = toolCallMap.get(idx) || { - id: tcDelta.id, - type: 'function', - index: idx, - function: { name: '', arguments: '' } - }; - - // Capture textOffset when tool call first appears - if (isNewToolCall && persistence) { - existing.textOffset = persistence.getContentLength(); - } - - if (tcDelta.id) existing.id = tcDelta.id; - if (tcDelta.type) existing.type = tcDelta.type; - if (tcDelta.function?.name) { - existing.function.name = tcDelta.function.name; - } - if (tcDelta.function?.arguments) { - existing.function.arguments += tcDelta.function.arguments; - } - - toolCallMap.set(idx, existing); + if (provider?.needsStreamingTranslation()) { + // Translate chunks before writing and persistence + translationLeftover = parseSSEStream( + chunk, + translationLeftover, + (obj) => { + const translated = provider.translateStreamChunk(obj); + if (translated === '[DONE]') { + writeAndFlush(res, 'data: [DONE]\n\n'); + } else if (translated) { + writeAndFlush(res, `data: ${JSON.stringify(translated)}\n\n`); + + // Update persistence with translated chunk + if (persistence && persistence.persist) { + processPersistenceChunk(translated, persistence, toolCallMap, lastFinishReason); } } - - finishReason = choice?.finish_reason ?? finishReason; - } - - // Capture reasoning_tokens from usage (check both locations) - const reasoningTokens = obj?.usage?.reasoning_tokens - ?? obj?.usage?.completion_tokens_details?.reasoning_tokens - ?? null; - if (reasoningTokens != null) { - persistence.setReasoningTokens(reasoningTokens); + }, + () => { + writeAndFlush(res, 'data: [DONE]\n\n'); + }, + (err) => { + logger.warn('Error parsing upstream SSE JSON for translation', err); } - - const message = choice?.message; - if (message?.reasoning_details) { - persistence.setReasoningDetails(message.reasoning_details); - } - - // Capture complete tool_calls from message (non-streaming or final) - if (Array.isArray(message?.tool_calls) && message.tool_calls.length > 0) { - for (const toolCall of message.tool_calls) { - const idx = toolCall.index ?? toolCallMap.size; - toolCallMap.set(idx, toolCall); - } - } - - if (finishReason) { - lastFinishReason.value = finishReason; - } - }, - () => { }, - () => { } - ); + ); + } else { + // Direct passthrough for Chat Completions API + writeAndFlush(res, chunk); + + // Update persistence buffer if enabled + if (persistence && persistence.persist) { + leftover = parseSSEStream( + chunk, + leftover, + (obj) => { + processPersistenceChunk(obj, persistence, toolCallMap, lastFinishReason); + }, + () => { }, + () => { } + ); + } + } } catch (e) { logger.error('[stream data] error', e); } diff --git a/backend/src/routes/providers.js b/backend/src/routes/providers.js index 7895ab4d..d24ac622 100644 --- a/backend/src/routes/providers.js +++ b/backend/src/routes/providers.js @@ -12,9 +12,43 @@ import { deleteProvider, getDefaultProvider, } from '../db/providers.js'; +import { ANTHROPIC_API_VERSION as _ANTHROPIC_API_VERSION } from '../lib/providers/anthropicProvider.js'; import { filterModels } from '../lib/modelFilter.js'; import { authenticateToken } from '../middleware/auth.js'; +/** + * Get default base URL for a provider type + * @param {string} providerType - The provider type (openai, anthropic, etc.) + * @returns {string} The default base URL + */ +function getDefaultBaseUrl(providerType) { + const defaults = { + anthropic: 'https://api.anthropic.com', + openai: 'https://api.openai.com', + }; + return defaults[providerType] || 'https://api.openai.com'; +} + +// Fallback (in case the provider module export is not available in some test/old envs) +const ANTHROPIC_API_VERSION = typeof _ANTHROPIC_API_VERSION === 'string' ? _ANTHROPIC_API_VERSION : '2023-06-01'; + +/** + * Set authentication header based on provider type + * @param {object} headers - Headers object to modify + * @param {string} providerType - The provider type (openai, anthropic, etc.) + * @param {string} apiKey - The API key + */ +function setAuthHeader(headers, providerType, apiKey) { + if (!apiKey) return; + + if (providerType === 'anthropic') { + headers['x-api-key'] = apiKey; + } else { + // OpenAI and other providers use Bearer token + headers.Authorization = `Bearer ${apiKey}`; + } +} + export function createProvidersRouter({ http = globalThis.fetch ?? fetchLib } = {}) { const providersRouter = Router(); @@ -141,109 +175,116 @@ export function createProvidersRouter({ http = globalThis.fetch ?? fetchLib } = if (!row) return res.status(404).json({ error: 'not_found' }); if (row.enabled === 0) return res.status(400).json({ error: 'disabled', message: 'Provider is disabled' }); - const baseUrl = String(row.base_url || '').replace(/\/v1\/?$/, ''); + const baseUrl = String(row.base_url || getDefaultBaseUrl(row.provider_type)).replace(/\/v1\/?$/, ''); if (!baseUrl) return res.status(400).json({ error: 'invalid_provider', message: 'Missing base_url' }); - let extra = {}; - try { - extra = row.extra_headers ? JSON.parse(row.extra_headers) : {}; - } catch { - extra = {}; - } - - const url = `${baseUrl}/v1/models`; - const headers = { - Accept: 'application/json', - Authorization: `Bearer ${row.api_key}`, - ...extra, - }; - - const upstream = await http(url, { - method: 'GET', - headers, - timeout: config.providerConfig.modelFetchTimeoutMs - }); - - if (!upstream.ok) { - const text = await upstream.text().catch(() => ''); - let errorMessage = 'Failed to fetch models'; + let extra = {}; + try { + extra = row.extra_headers ? JSON.parse(row.extra_headers) : {}; + } catch { + extra = {}; + } - if (upstream.status === 401) { - errorMessage = 'Invalid API key. Please check your credentials.'; - } else if (upstream.status === 403) { - errorMessage = 'API key does not have permission to access models.'; - } else if (upstream.status === 404) { - errorMessage = 'Invalid base URL. The /v1/models endpoint was not found.'; - } else if (upstream.status >= 500) { - errorMessage = 'Server error from the provider. Please try again later.'; - } else { - errorMessage = `Provider returned error: ${upstream.status}`; + const url = `${baseUrl}/v1/models`; + const headers = { + Accept: 'application/json', + ...extra, + }; + setAuthHeader(headers, row.provider_type, row.api_key); + // Anthropic APIs require the anthropic-version header on every request + if (row.provider_type === 'anthropic' && !headers['anthropic-version']) { + headers['anthropic-version'] = ANTHROPIC_API_VERSION; } - return res.status(502).json({ - error: 'bad_gateway', - message: errorMessage, - detail: text.slice(0, 200) + const upstream = await http(url, { + method: 'GET', + headers, + timeout: config.providerConfig.modelFetchTimeoutMs, }); - } - const json = await upstream.json().catch(() => ({})); - let models = []; - if (Array.isArray(json?.data)) models = json.data; - else if (Array.isArray(json?.models)) models = json.models; - else if (Array.isArray(json)) models = json; - - // Normalize to { id, ... } - models = models - .map((m) => (typeof m === 'string' ? { id: m } : m)) - .filter((m) => m && m.id); + if (!upstream.ok) { + const text = await upstream.text().catch(() => ''); + let errorMessage = 'Failed to fetch models'; + + if (upstream.status === 401) { + errorMessage = 'Invalid API key. Please check your credentials.'; + } else if (upstream.status === 403) { + errorMessage = 'API key does not have permission to access models.'; + } else if (upstream.status === 404) { + errorMessage = 'Invalid base URL. The /v1/models endpoint was not found.'; + } else if (upstream.status >= 500) { + errorMessage = 'Server error from the provider. Please try again later.'; + } else { + errorMessage = `Provider returned error: ${upstream.status}`; + } + + return res.status(502).json({ + error: 'bad_gateway', + message: errorMessage, + detail: text.slice(0, 200), + }); + } - // Filter OpenRouter models to only show those released in the last 1 year - if (baseUrl.includes('openrouter.ai')) { - const oneYearAgo = Math.floor(Date.now() / 1000) - (365 * 24 * 60 * 60); - models = models.filter((m) => { - // If no 'created' field, include the model (backwards compatibility) - if (!m.created) return true; - // Filter out models older than 1 year - return m.created >= oneYearAgo; - }); - } + const json = await upstream.json().catch(() => ({})); + let models = []; + if (Array.isArray(json?.data)) models = json.data; + else if (Array.isArray(json?.models)) models = json.models; + else if (Array.isArray(json)) models = json; + + // Normalize to { id, ... } + models = models.map((m) => (typeof m === 'string' ? { id: m } : m)).filter((m) => m && m.id); + + // Filter OpenRouter models to only show those released in the last 1 year + if (baseUrl.includes('openrouter.ai')) { + const oneYearAgo = Math.floor(Date.now() / 1000) - 365 * 24 * 60 * 60; + models = models.filter((m) => { + // If no 'created' field, include the model (backwards compatibility) + if (!m.created) return true; + // Filter out models older than 1 year + return m.created >= oneYearAgo; + }); + } - // Apply model filter from provider metadata if configured - // Note: row.metadata is already parsed by getProviderByIdWithApiKey - if (row.metadata && row.metadata.model_filter) { - models = filterModels(models, row.metadata.model_filter); - } + // Apply model filter from provider metadata if configured + // Note: row.metadata is already parsed by getProviderByIdWithApiKey + if (row.metadata && row.metadata.model_filter) { + models = filterModels(models, row.metadata.model_filter); + } - res.json({ provider: { id: row.id, name: row.name, provider_type: row.provider_type }, models }); - } catch (err) { - let errorMessage = 'Failed to retrieve models. Please check your provider configuration.'; - let statusCode = 502; // Default to bad gateway for provider connectivity issues + res.json({ provider: { id: row.id, name: row.name, provider_type: row.provider_type }, models }); + } catch (err) { + let errorMessage = 'Failed to retrieve models. Please check your provider configuration.'; + let statusCode = 502; // Default to bad gateway for provider connectivity issues + + // Check various error conditions for network-related issues + if (err.name === 'AbortError' || err.code === 'ETIMEDOUT' || err.message?.includes('timeout')) { + errorMessage = 'Connection timeout. Please check your base URL and network connection.'; + } else if ( + err.code === 'ENOTFOUND' || + err.code === 'ECONNREFUSED' || + err.message?.includes('ENOTFOUND') || + err.message?.includes('fetch failed') + ) { + errorMessage = 'Cannot connect to the provider. Please check your base URL.'; + } else if (err.code === 'ECONNRESET' || err.code === 'ECONNABORTED') { + errorMessage = 'Connection to provider was interrupted. Please try again.'; + } else if (err.name === 'TypeError' && err.message?.includes('fetch')) { + errorMessage = 'Network error occurred while connecting to provider.'; + } else { + // For truly internal errors (database issues, etc.), use 500 + statusCode = 500; + errorMessage = 'Internal server error while fetching models.'; + } - // Check various error conditions for network-related issues - if (err.name === 'AbortError' || err.code === 'ETIMEDOUT' || err.message?.includes('timeout')) { - errorMessage = 'Connection timeout. Please check your base URL and network connection.'; - } else if (err.code === 'ENOTFOUND' || err.code === 'ECONNREFUSED' || err.message?.includes('ENOTFOUND') || err.message?.includes('fetch failed')) { - errorMessage = 'Cannot connect to the provider. Please check your base URL.'; - } else if (err.code === 'ECONNRESET' || err.code === 'ECONNABORTED') { - errorMessage = 'Connection to provider was interrupted. Please try again.'; - } else if (err.name === 'TypeError' && err.message?.includes('fetch')) { - errorMessage = 'Network error occurred while connecting to provider.'; - } else { - // For truly internal errors (database issues, etc.), use 500 - statusCode = 500; - errorMessage = 'Internal server error while fetching models.'; + res.status(statusCode).json({ + error: statusCode === 500 ? 'internal_server_error' : 'provider_error', + message: errorMessage, + detail: err?.message || 'Unknown error', + }); } - - res.status(statusCode).json({ - error: statusCode === 500 ? 'internal_server_error' : 'provider_error', - message: errorMessage, - detail: err?.message || 'Unknown error' - }); - } }); -// Test provider connection without saving + // Test provider connection without saving providersRouter.post('/v1/providers/test', async (req, res) => { try { const body = req.body || {}; @@ -254,104 +295,107 @@ export function createProvidersRouter({ http = globalThis.fetch ?? fetchLib } = return res.status(400).json({ error: 'invalid_request', message: 'name and provider_type are required' }); } - const api_key = body.api_key || null; - const base_url = String(body.base_url || '').replace(/\/v1\/?$/, '') || 'https://api.openai.com'; - - let extra = {}; - try { - extra = body.extra_headers ? JSON.parse(body.extra_headers) : {}; - } catch { - extra = {}; - } - - // Test connection by attempting to list models - const url = `${base_url}/v1/models`; - const headers = { - Accept: 'application/json', - ...extra, - }; - if (api_key) { - headers.Authorization = `Bearer ${api_key}`; - } - - const upstream = await http(url, { - method: 'GET', - headers, - timeout: config.providerConfig.timeoutMs - }); + const api_key = body.api_key || null; + const base_url = String(body.base_url || '').replace(/\/v1\/?$/, '') || getDefaultBaseUrl(provider_type); - if (!upstream.ok) { - const text = await upstream.text().catch(() => ''); - let errorMessage = 'Connection failed'; + let extra = {}; + try { + extra = body.extra_headers ? JSON.parse(body.extra_headers) : {}; + } catch { + extra = {}; + } - if (upstream.status === 401) { - errorMessage = 'Invalid API key. Please check your credentials.'; - } else if (upstream.status === 403) { - errorMessage = 'API key does not have permission to access this endpoint.'; - } else if (upstream.status === 404) { - errorMessage = 'Invalid base URL. The /v1/models endpoint was not found.'; - } else if (upstream.status >= 500) { - errorMessage = 'Server error from the provider. Please try again later.'; - } else { - errorMessage = `Provider returned error: ${upstream.status}`; + // Test connection by attempting to list models + const url = `${base_url}/v1/models`; + const headers = { + Accept: 'application/json', + ...extra, + }; + setAuthHeader(headers, provider_type, api_key); + // Anthropic APIs require the anthropic-version header on every request + if (provider_type === 'anthropic' && !headers['anthropic-version']) { + headers['anthropic-version'] = ANTHROPIC_API_VERSION; } - return res.status(400).json({ - error: 'test_failed', - message: errorMessage, - detail: text.slice(0, 200) + const upstream = await http(url, { + method: 'GET', + headers, + timeout: config.providerConfig.timeoutMs, }); - } - const json = await upstream.json().catch(() => ({})); - let models = []; - if (Array.isArray(json?.data)) models = json.data; - else if (Array.isArray(json?.models)) models = json.models; - else if (Array.isArray(json)) models = json; + if (!upstream.ok) { + const text = await upstream.text().catch(() => ''); + let errorMessage = 'Connection failed'; + + if (upstream.status === 401) { + errorMessage = 'Invalid API key. Please check your credentials.'; + } else if (upstream.status === 403) { + errorMessage = 'API key does not have permission to access this endpoint.'; + } else if (upstream.status === 404) { + errorMessage = 'Invalid base URL. The /v1/models endpoint was not found.'; + } else if (upstream.status >= 500) { + errorMessage = 'Server error from the provider. Please try again later.'; + } else { + errorMessage = `Provider returned error: ${upstream.status}`; + } + + return res.status(400).json({ + error: 'test_failed', + message: errorMessage, + detail: text.slice(0, 200), + }); + } - models = models - .map((m) => (typeof m === 'string' ? { id: m } : m)) - .filter((m) => m && m.id); + const json = await upstream.json().catch(() => ({})); + let models = []; + if (Array.isArray(json?.data)) models = json.data; + else if (Array.isArray(json?.models)) models = json.models; + else if (Array.isArray(json)) models = json; + + models = models.map((m) => (typeof m === 'string' ? { id: m } : m)).filter((m) => m && m.id); + + // Filter OpenRouter models to only show those released in the last 1 year + if (base_url.includes('openrouter.ai')) { + const oneYearAgo = Math.floor(Date.now() / 1000) - 365 * 24 * 60 * 60; + models = models.filter((m) => { + // If no 'created' field, include the model (backwards compatibility) + if (!m.created) return true; + // Filter out models older than 1 year + return m.created >= oneYearAgo; + }); + } - // Filter OpenRouter models to only show those released in the last 1 year - if (base_url.includes('openrouter.ai')) { - const oneYearAgo = Math.floor(Date.now() / 1000) - (365 * 24 * 60 * 60); - models = models.filter((m) => { - // If no 'created' field, include the model (backwards compatibility) - if (!m.created) return true; - // Filter out models older than 1 year - return m.created >= oneYearAgo; - }); - } + // Apply model filter from metadata if configured + if (body.metadata && body.metadata.model_filter) { + models = filterModels(models, body.metadata.model_filter); + } - // Apply model filter from metadata if configured - if (body.metadata && body.metadata.model_filter) { - models = filterModels(models, body.metadata.model_filter); - } + const modelCount = models.length; + const sampleModels = models + .slice(0, 3) + .map((m) => m.id) + .join(', '); - const modelCount = models.length; - const sampleModels = models.slice(0, 3).map(m => m.id).join(', '); + res.json({ + success: true, + message: `Connection successful! Found ${modelCount} models${sampleModels ? ` (${sampleModels}${modelCount > 3 ? ', ...' : ''})` : ''}.`, + models: modelCount, + }); + } catch (err) { + let errorMessage = 'Connection test failed. Please check your configuration.'; - res.json({ - success: true, - message: `Connection successful! Found ${modelCount} models${sampleModels ? ` (${sampleModels}${modelCount > 3 ? ', ...' : ''})` : ''}.`, - models: modelCount - }); - } catch (err) { - let errorMessage = 'Connection test failed. Please check your configuration.'; + if (err.name === 'AbortError' || err.code === 'ETIMEDOUT') { + errorMessage = 'Connection timeout. Please check your base URL and network connection.'; + } else if (err.code === 'ENOTFOUND' || err.code === 'ECONNREFUSED') { + errorMessage = 'Cannot connect to the provider. Please check your base URL.'; + } - if (err.name === 'AbortError' || err.code === 'ETIMEDOUT') { - errorMessage = 'Connection timeout. Please check your base URL and network connection.'; - } else if (err.code === 'ENOTFOUND' || err.code === 'ECONNREFUSED') { - errorMessage = 'Cannot connect to the provider. Please check your base URL.'; + res.status(400).json({ + error: 'test_failed', + message: errorMessage, + detail: err?.message || 'Unknown error', + }); } - - res.status(400).json({ - error: 'test_failed', - message: errorMessage, - detail: err?.message || 'Unknown error' - }); - } }); // Test existing provider connection using stored credentials but with updated config @@ -372,7 +416,9 @@ export function createProvidersRouter({ http = globalThis.fetch ?? fetchLib } = } // Use existing API key but allow override of other settings for testing - const base_url = (body.base_url !== undefined ? body.base_url : existingProvider.base_url) || 'https://api.openai.com'; + const base_url = + (body.base_url !== undefined ? body.base_url : existingProvider.base_url) || + getDefaultBaseUrl(existingProvider.provider_type); const testBaseUrl = String(base_url).replace(/\/v1\/?$/, ''); @@ -390,9 +436,13 @@ export function createProvidersRouter({ http = globalThis.fetch ?? fetchLib } = const url = `${testBaseUrl}/v1/models`; const headers = { Accept: 'application/json', - Authorization: `Bearer ${existingProvider.api_key}`, ...extra, }; + setAuthHeader(headers, existingProvider.provider_type, existingProvider.api_key); + // Anthropic APIs require the anthropic-version header on every request + if (existingProvider.provider_type === 'anthropic' && !headers['anthropic-version']) { + headers['anthropic-version'] = ANTHROPIC_API_VERSION; + } const upstream = await http(url, { method: 'GET', diff --git a/docs/backend_api_spec.md b/docs/backend_api_spec.md index a04b25e5..3785c1f4 100644 --- a/docs/backend_api_spec.md +++ b/docs/backend_api_spec.md @@ -119,12 +119,14 @@ Deletes provider. 204 empty or 404 not_found. ### GET /v1/providers/{id}/models Fetch upstream models via stored credentials. Applies optional model filtering. +Note: If provider's `base_url` is not set, defaults are used based on `provider_type`. Authentication headers are automatically set based on provider type (Anthropic uses `x-api-key`, others use `Authorization: Bearer`). Success: `{ provider: { id, name, provider_type }, models: [ { id, ...upstream } ] }` Errors: 400 invalid_provider | disabled | bad request reasons; 404 not_found; 502 bad_gateway/provider_error; 500 internal_server_error. ### POST /v1/providers/test Test a provider configuration without saving. Body requires: `name`, `provider_type`, `api_key`; optional `base_url`, `extra_headers`, `metadata.model_filter`. +Note: If `base_url` is omitted, defaults are used based on `provider_type` (e.g., Anthropic → `https://api.anthropic.com`, OpenAI → `https://api.openai.com`). Success 200: `{ success: true, message: "Connection successful! Found X models (sample1, sample2, ...).", models: }` Errors 400 test_failed (with detail), 400 invalid_request. diff --git a/frontend/__tests__/sidebar.collapse.test.tsx b/frontend/__tests__/sidebar.collapse.test.tsx index 2f58b443..47f0cb9f 100644 --- a/frontend/__tests__/sidebar.collapse.test.tsx +++ b/frontend/__tests__/sidebar.collapse.test.tsx @@ -130,6 +130,15 @@ function renderWithProviders(ui: React.ReactElement) { return render({ui}); } +async function waitForLeftSidebar() { + const conversationButton = await screen.findByRole('button', { name: 'Test Conversation' }); + const sidebar = conversationButton.closest('aside'); + if (!sidebar) { + throw new Error('Unable to locate left sidebar element'); + } + return sidebar as HTMLElement; +} + function setupHttpClient() { mockHttpClient.get.mockImplementation((url: string) => { if (url === '/v1/providers') { @@ -220,40 +229,31 @@ describe('Sidebar Collapse Functionality', () => { test('sidebar is expanded by default', async () => { renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - expect(screen.getByText('Test Conversation')).toBeInTheDocument(); - }); + const leftSidebar = await waitForLeftSidebar(); + expect( + within(leftSidebar).getByRole('button', { name: 'Test Conversation' }) + ).toBeInTheDocument(); + expect(within(leftSidebar).getByTitle('Collapse sidebar')).toBeInTheDocument(); }); test('sidebar can be collapsed and expanded', async () => { const user = userEvent.setup(); renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - }); - - // Find and click the collapse button - const collapseButton = screen.getAllByTitle('Collapse sidebar')[0]; - expect(collapseButton).toBeInTheDocument(); - + const leftSidebar = await waitForLeftSidebar(); + const collapseButton = within(leftSidebar).getByTitle('Collapse sidebar'); await user.click(collapseButton); - // After collapsing, the "Chat History" text should not be visible await waitFor(() => { - expect(screen.queryByText('Chat History')).not.toBeInTheDocument(); + expect(within(leftSidebar).queryByTitle('Collapse sidebar')).not.toBeInTheDocument(); + expect(within(leftSidebar).getByTitle('Expand sidebar')).toBeInTheDocument(); }); - // Find and click the expand button - const expandButton = screen.getByTitle('Expand sidebar'); - expect(expandButton).toBeInTheDocument(); - + const expandButton = within(leftSidebar).getByTitle('Expand sidebar'); await user.click(expandButton); - // After expanding, the "Chat History" text should be visible again await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); + expect(within(leftSidebar).getByTitle('Collapse sidebar')).toBeInTheDocument(); }); }); @@ -261,21 +261,15 @@ describe('Sidebar Collapse Functionality', () => { const user = userEvent.setup(); renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - }); - - // Click collapse button - const leftSidebar = screen.getByText('Chat History').closest('aside'); - expect(leftSidebar).not.toBeNull(); - const collapseButton = within(leftSidebar as HTMLElement).getByTitle('Collapse sidebar'); + const leftSidebar = await waitForLeftSidebar(); + const collapseButton = within(leftSidebar).getByTitle('Collapse sidebar'); await user.click(collapseButton); // Verify localStorage.setItem was called with 'true' expect(mockLocalStorage.setItem).toHaveBeenCalledWith('sidebarCollapsed', 'true'); // Click expand button - const expandButton = screen.getByTitle('Expand sidebar'); + const expandButton = within(leftSidebar).getByTitle('Expand sidebar'); await user.click(expandButton); // Verify localStorage.setItem was called with 'false' @@ -291,9 +285,7 @@ describe('Sidebar Collapse Functionality', () => { renderWithProviders(); await waitFor(() => { - // Should not show "Chat History" text when collapsed - expect(screen.queryByText('Chat History')).not.toBeInTheDocument(); - // Should show expand button(s) - there may be left and right sidebars + expect(screen.getByLabelText('Start new chat')).toBeInTheDocument(); expect(screen.getAllByTitle('Expand sidebar').length).toBeGreaterThanOrEqual(1); }); }); @@ -302,22 +294,20 @@ describe('Sidebar Collapse Functionality', () => { const user = userEvent.setup(); renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - }); + const leftSidebar = await waitForLeftSidebar(); // Press Ctrl+\ to collapse await user.keyboard('{Control>}\\{/Control}'); await waitFor(() => { - expect(screen.queryByText('Chat History')).not.toBeInTheDocument(); + expect(within(leftSidebar).queryByTitle('Collapse sidebar')).not.toBeInTheDocument(); }); // Press Ctrl+\ again to expand await user.keyboard('{Control>}\\{/Control}'); await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); + expect(within(leftSidebar).getByTitle('Collapse sidebar')).toBeInTheDocument(); }); }); @@ -325,22 +315,14 @@ describe('Sidebar Collapse Functionality', () => { const user = userEvent.setup(); renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - }); - - // Collapse the sidebar - const leftSidebar = screen.getByText('Chat History').closest('aside'); - expect(leftSidebar).not.toBeNull(); - const collapseButton = within(leftSidebar as HTMLElement).getByTitle('Collapse sidebar'); + const leftSidebar = await waitForLeftSidebar(); + const collapseButton = within(leftSidebar).getByTitle('Collapse sidebar'); await user.click(collapseButton); await waitFor(() => { - // Should show minimal buttons - expect(screen.getByTitle('New Chat')).toBeInTheDocument(); - expect(screen.getByTitle('Refresh conversations')).toBeInTheDocument(); - // Should not show full "Chat History" header - expect(screen.queryByText('Chat History')).not.toBeInTheDocument(); + // Should show minimal buttons specific to collapsed state + expect(screen.getByLabelText('Start new chat')).toBeInTheDocument(); + expect(screen.getByLabelText('Refresh conversation list')).toBeInTheDocument(); }); }); @@ -348,14 +330,8 @@ describe('Sidebar Collapse Functionality', () => { const user = userEvent.setup(); renderWithProviders(); - await waitFor(() => { - expect(screen.getByText('Chat History')).toBeInTheDocument(); - }); - - // Collapse the sidebar - const leftSidebar = screen.getByText('Chat History').closest('aside'); - expect(leftSidebar).not.toBeNull(); - const collapseButton = within(leftSidebar as HTMLElement).getByTitle('Collapse sidebar'); + const leftSidebar = await waitForLeftSidebar(); + const collapseButton = within(leftSidebar).getByTitle('Collapse sidebar'); await user.click(collapseButton); await waitFor(() => { diff --git a/frontend/components/SettingsModal.tsx b/frontend/components/SettingsModal.tsx index b2ccb0be..6b94f7a9 100644 --- a/frontend/components/SettingsModal.tsx +++ b/frontend/components/SettingsModal.tsx @@ -688,34 +688,39 @@ export default function SettingsModal({ open, onClose, onProvidersChanged }: Set required > +

- Compatible with OpenAI API format (ChatGPT, Claude, most providers) + {form.provider_type === 'anthropic' + ? 'Native Anthropic Claude API support with Messages API' + : 'Compatible with OpenAI API format (ChatGPT, Claude, most providers)'}

-
- - setForm((f) => ({ ...f, base_url: e.target.value }))} - placeholder="https://api.openai.com/v1 (auto-filled if empty)" - /> -

- Custom API endpoint. Leave empty for OpenAI's default endpoint. -

-
+ {form.provider_type === 'openai' && ( +
+ + setForm((f) => ({ ...f, base_url: e.target.value }))} + placeholder="https://api.openai.com/v1 (auto-filled if empty)" + /> +

+ Custom API endpoint. Leave empty for OpenAI's default endpoint. +

+
+ )}