From a9545f1800b1952478cf8740f191acd0fbcd714c Mon Sep 17 00:00:00 2001 From: tangshixiang Date: Thu, 26 Mar 2026 17:21:16 +0800 Subject: [PATCH 1/2] setup files --- .github/workflows/ci.yml | 2 +- .github/workflows/daily-summary.yml | 2 +- .github/workflows/readme-whats-new.yml | 2 +- .nvmrc | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 .nvmrc diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f8ab6a3..2a062198 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: strategy: matrix: - node-version: [22] + node-version: [24, 25] steps: - name: Checkout diff --git a/.github/workflows/daily-summary.yml b/.github/workflows/daily-summary.yml index 5735eac4..5d8079f6 100644 --- a/.github/workflows/daily-summary.yml +++ b/.github/workflows/daily-summary.yml @@ -41,7 +41,7 @@ jobs: if: steps.check.outputs.commit_count != '0' uses: actions/setup-node@v4 with: - node-version: '22' + node-version: '25' - name: Install Copilot CLI if: steps.check.outputs.commit_count != '0' diff --git a/.github/workflows/readme-whats-new.yml b/.github/workflows/readme-whats-new.yml index 01d4e58f..0a9c5f1d 100644 --- a/.github/workflows/readme-whats-new.yml +++ b/.github/workflows/readme-whats-new.yml @@ -46,7 +46,7 @@ jobs: if: steps.check.outputs.commit_count != '0' uses: actions/setup-node@v4 with: - node-version: '22' + node-version: '25' - name: Install Copilot CLI if: steps.check.outputs.commit_count != '0' diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 00000000..7273c0fa --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +25 From c6b7415fbc808ff7cc5d63053597d96bea923310 Mon Sep 17 00:00:00 2001 From: tangshixiang Date: Thu, 26 Mar 2026 17:22:11 +0800 Subject: [PATCH 2/2] new version --- README.md | 18 +- docs/README_CN.md | 18 +- docs/README_DE.md | 2 +- docs/README_FR.md | 2 +- docs/README_JA.md | 2 +- docs/getting-started/installation.md | 2 +- .../getting-started/installation.po | 4 +- .../zh_CN/LC_MESSAGES/troubleshooting/faq.po | 4 +- docs/troubleshooting/faq.md | 2 +- package-lock.json | 2 +- package.json | 2 +- public/innoclaw.html | 14 +- site/index.html | 14 +- src/app/api/agent/route.ts | 18 +- src/app/api/models/route.test.ts | 61 +++++ src/app/api/models/route.ts | 152 ++++++++++-- src/app/api/paper-study/chat/route.ts | 5 +- src/app/api/settings/route.ts | 33 +-- src/components/agent/agent-panel.tsx | 234 +++++++++++++----- src/lib/ai/provider-env.ts | 80 ++++++ src/lib/ai/provider.test.ts | 23 ++ src/lib/ai/provider.ts | 31 +-- src/lib/ai/runtime-capabilities.test.ts | 39 +++ src/lib/ai/runtime-capabilities.ts | 18 ++ src/lib/env-file.test.ts | 17 ++ src/lib/env-file.ts | 33 +++ 26 files changed, 665 insertions(+), 165 deletions(-) create mode 100644 src/lib/ai/provider-env.ts create mode 100644 src/lib/ai/runtime-capabilities.test.ts create mode 100644 src/lib/ai/runtime-capabilities.ts diff --git a/README.md b/README.md index 1c610629..f6258848 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@

Apache 2.0 License - Node.js 20+ + Node.js 24+ LTS or 25 Current CI Status Online Docs GitHub Stars @@ -55,6 +55,10 @@ It is built for researchers, developers, labs, and self-hosters who want more th +#### 2026-03-26 +- **Node.js Runtime Update**: InnoClaw now targets Node.js 24+ and is verified against both Node.js 24 LTS and the latest Node.js 25 current release. CI and local version hints have been updated accordingly. + + #### 2026-03-24 - **Multimodal LLM Support**: Paper Study and agent workflows now support both standard LLMs and multimodal LLMs (mLLM), selectable per-context in settings and the model selector @@ -186,6 +190,18 @@ Instead of juggling separate tools for files, notes, literature review, and auto ## 🚀 Quick Start +Runtime requirement: +- Node.js `24+` required +- Node.js `24 LTS` recommended for stable deployments +- Node.js `25 Current` is also supported + +If you use `nvm`, you can follow the repo default: + +```bash +nvm install +nvm use +``` + ```bash git clone https://github.com/SpectrAI-Initiative/InnoClaw.git cd InnoClaw diff --git a/docs/README_CN.md b/docs/README_CN.md index bb3343a7..39908b52 100644 --- a/docs/README_CN.md +++ b/docs/README_CN.md @@ -14,7 +14,7 @@

Apache 2.0 License - Node.js 20+ + Node.js 24+ LTS or 25 Current CI Status Online Docs GitHub Stars @@ -45,6 +45,10 @@ InnoClaw 将服务器文件夹变成 AI 原生工作空间,用于基于文档 +#### 2026-03-26 +- **Node.js 运行时更新**: InnoClaw 现以 Node.js 24+ 为目标运行时,并已验证兼容 Node.js 24 LTS 与最新的 Node.js 25 Current 版本。CI 与本地版本提示也已同步更新。 + + #### 2026-03-24 - **多模态大模型支持**: 论文研究与智能体工作流现支持标准 LLM 与多模态 LLM(mLLM),可在设置页面和模型选择器中按上下文切换 @@ -176,6 +180,18 @@ InnoClaw 是一个面向研究工作的可自托管 Web 应用,把工作空间 ## 🚀 快速开始 +运行时要求: +- 需要 Node.js `24+` +- 稳定部署推荐使用 Node.js `24 LTS` +- 也支持 Node.js `25 Current` + +如果你使用 `nvm`,可以直接跟随仓库默认版本: + +```bash +nvm install +nvm use +``` + ```bash git clone https://github.com/SpectrAI-Initiative/InnoClaw.git cd InnoClaw diff --git a/docs/README_DE.md b/docs/README_DE.md index aa6d19c5..ecb231f0 100644 --- a/docs/README_DE.md +++ b/docs/README_DE.md @@ -14,7 +14,7 @@

Apache 2.0 License - Node.js 20+ + Node.js 24+ LTS or 25 Current CI Status Online Docs GitHub Stars diff --git a/docs/README_FR.md b/docs/README_FR.md index 32273d5c..f43e2cfa 100644 --- a/docs/README_FR.md +++ b/docs/README_FR.md @@ -14,7 +14,7 @@

Apache 2.0 License - Node.js 20+ + Node.js 24+ LTS or 25 Current CI Status Online Docs GitHub Stars diff --git a/docs/README_JA.md b/docs/README_JA.md index 6ad1a354..50b8bd68 100644 --- a/docs/README_JA.md +++ b/docs/README_JA.md @@ -14,7 +14,7 @@

Apache 2.0 License - Node.js 20+ + Node.js 24+ LTS or 25 Current CI Status Online Docs GitHub Stars diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index 1fcf18bb..5ffac9a7 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -8,7 +8,7 @@ Before you begin, ensure you have the following installed: | Requirement | Version | Notes | |-------------|---------|-------| -| **Node.js** | 18+ (20+ recommended) | Runtime environment | +| **Node.js** | 24+ LTS recommended, 25 Current supported | Runtime environment | | **npm** | Included with Node.js | Package manager | | **Git** | Any recent version | Required for GitHub clone/pull features | diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting-started/installation.po b/docs/locales/zh_CN/LC_MESSAGES/getting-started/installation.po index 72324b62..53a7dc0b 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/getting-started/installation.po +++ b/docs/locales/zh_CN/LC_MESSAGES/getting-started/installation.po @@ -56,8 +56,8 @@ msgid "**Node.js**" msgstr "**Node.js**" #: ../../getting-started/installation.md 6c2a136eaba9488b9028d72a372682ed -msgid "18+ (20+ recommended)" -msgstr "18+(推荐 20+)" +msgid "24+ LTS recommended, 25 Current supported" +msgstr "推荐使用 24+ LTS,也支持 25 Current" #: ../../getting-started/installation.md 2cd03196b1a3452b8cb37aa519ff54e7 msgid "Runtime environment" diff --git a/docs/locales/zh_CN/LC_MESSAGES/troubleshooting/faq.po b/docs/locales/zh_CN/LC_MESSAGES/troubleshooting/faq.po index 4f5bb667..1fb9bc30 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/troubleshooting/faq.po +++ b/docs/locales/zh_CN/LC_MESSAGES/troubleshooting/faq.po @@ -196,8 +196,8 @@ msgid "The development server won't start" msgstr "开发服务器无法启动" #: ../../troubleshooting/faq.md:77 f10279b7075e415ca232c7f880c59e7b -msgid "Ensure Node.js 18+ is installed: `node --version`" -msgstr "确保已安装 Node.js 18+:`node --version`" +msgid "Ensure Node.js 24+ is installed (Node.js 25 is also supported): `node --version`" +msgstr "确保已安装 Node.js 24+(也支持 Node.js 25):`node --version`" #: ../../troubleshooting/faq.md:78 272be70528fb41aba0a0a3a5fd0362ea msgid "Ensure dependencies are installed: `npm install`" diff --git a/docs/troubleshooting/faq.md b/docs/troubleshooting/faq.md index 7a6b6dce..9c23f073 100644 --- a/docs/troubleshooting/faq.md +++ b/docs/troubleshooting/faq.md @@ -166,7 +166,7 @@ Back up the following: ### The development server won't start -1. Ensure Node.js 18+ is installed: `node --version` +1. Ensure Node.js 24+ is installed (Node.js 25 is also supported): `node --version` 2. Ensure dependencies are installed: `npm install` 3. Ensure the database is initialized: `npx drizzle-kit migrate` 4. Ensure `WORKSPACE_ROOTS` directories exist on disk diff --git a/package-lock.json b/package-lock.json index 0f2dd587..d4c01240 100644 --- a/package-lock.json +++ b/package-lock.json @@ -77,7 +77,7 @@ "vitest": "^4.0.18" }, "engines": { - "node": ">=20.0.0" + "node": ">=24.0.0" } }, "node_modules/@ai-sdk/anthropic": { diff --git a/package.json b/package.json index d6690d87..e84fa011 100644 --- a/package.json +++ b/package.json @@ -23,7 +23,7 @@ "url": "https://github.com/SpectrAI-Initiative/InnoClaw/issues" }, "engines": { - "node": ">=20.0.0" + "node": ">=24.0.0" }, "scripts": { "predev": "node scripts/check-deps.js", diff --git a/public/innoclaw.html b/public/innoclaw.html index c3cd07f7..c0b3d3f7 100644 --- a/public/innoclaw.html +++ b/public/innoclaw.html @@ -939,10 +939,10 @@

Get Started in Minutes

-

Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on any machine with Node.js 20+.

+

Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on Node.js 24 LTS and also supports the latest Node.js 25 current release.

Requirements

-
Node.js 20+ required
+
Node.js 24+ required (25 supported)
SQLite — zero-config database
Supports 9 LLM providers (OpenAI, Anthropic, Gemini, DeepSeek, Qwen, etc.)
Ready in under 3 minutes
@@ -1208,9 +1208,9 @@

wf4: '\u836f\u7269\u9776\u70b9\u5206\u6790', wf5: '\u86cb\u767d\u8d28\u7ed3\u6784\u9884\u6d4b', qs_title: '\u51e0\u5206\u949f\u5373\u53ef\u4e0a\u624b', - qs_desc: '\u514b\u9686\u4ed3\u5e93\u3001\u5b89\u88c5\u4f9d\u8d56\u3001\u914d\u7f6e API \u5bc6\u94a5\u5e76\u542f\u52a8\u5f00\u53d1\u670d\u52a1\u5668\u3002InnoClaw \u53ef\u8fd0\u884c\u5728\u4efb\u4f55\u5b89\u88c5\u4e86 Node.js 20+ \u7684\u673a\u5668\u4e0a\u3002', + qs_desc: '\u514b\u9686\u4ed3\u5e93\u3001\u5b89\u88c5\u4f9d\u8d56\u3001\u914d\u7f6e API \u5bc6\u94a5\u5e76\u542f\u52a8\u5f00\u53d1\u670d\u52a1\u5668\u3002InnoClaw \u63a8\u8350\u4f7f\u7528 Node.js 24 LTS\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u6700\u65b0\u7684 Node.js 25 \u5f53\u524d\u7248\u672c\u3002', req_title: '\u7cfb\u7edf\u8981\u6c42', - req1: '\u9700\u8981 Node.js 20+', + req1: '\u9700\u8981 Node.js 24+\uff0825 \u4e5f\u53ef\u7528\uff09', req2: 'SQLite \u2014 \u96f6\u914d\u7f6e\u6570\u636e\u5e93', req3: '\u652f\u6301 9 \u5927 LLM \u63d0\u4f9b\u5546 (OpenAI, Anthropic, Gemini, DeepSeek, Qwen \u7b49)', req4: '3 \u5206\u949f\u5185\u5373\u53ef\u5c31\u7eea', @@ -1324,9 +1324,9 @@

wf4: 'Drug Target Analysis', wf5: 'Protein Structure Prediction', qs_title: 'Get Started in Minutes', - qs_desc: 'Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on any machine with Node.js 20+.', + qs_desc: 'Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on Node.js 24 LTS and also supports the latest Node.js 25 current release.', req_title: 'Requirements', - req1: 'Node.js 20+ required', + req1: 'Node.js 24+ required (25 supported)', req2: 'SQLite \u2014 zero-config database', req3: 'Supports 9 LLM providers (OpenAI, Anthropic, Gemini, DeepSeek, Qwen, etc.)', req4: 'Ready in under 3 minutes', @@ -1508,4 +1508,4 @@

}); - \ No newline at end of file + diff --git a/site/index.html b/site/index.html index f7e9c0f3..a0d3ac06 100644 --- a/site/index.html +++ b/site/index.html @@ -939,10 +939,10 @@

Get Started in Minutes

-

Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on any machine with Node.js 20+.

+

Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on Node.js 24 LTS and also supports the latest Node.js 25 current release.

Requirements

-
Node.js 20+ required
+
Node.js 24+ required (25 supported)
SQLite — zero-config database
Supports 9 LLM providers (OpenAI, Anthropic, Gemini, DeepSeek, Qwen, etc.)
Ready in under 3 minutes
@@ -1208,9 +1208,9 @@

wf4: '\u836f\u7269\u9776\u70b9\u5206\u6790', wf5: '\u86cb\u767d\u8d28\u7ed3\u6784\u9884\u6d4b', qs_title: '\u51e0\u5206\u949f\u5373\u53ef\u4e0a\u624b', - qs_desc: '\u514b\u9686\u4ed3\u5e93\u3001\u5b89\u88c5\u4f9d\u8d56\u3001\u914d\u7f6e API \u5bc6\u94a5\u5e76\u542f\u52a8\u5f00\u53d1\u670d\u52a1\u5668\u3002InnoClaw \u53ef\u8fd0\u884c\u5728\u4efb\u4f55\u5b89\u88c5\u4e86 Node.js 20+ \u7684\u673a\u5668\u4e0a\u3002', + qs_desc: '\u514b\u9686\u4ed3\u5e93\u3001\u5b89\u88c5\u4f9d\u8d56\u3001\u914d\u7f6e API \u5bc6\u94a5\u5e76\u542f\u52a8\u5f00\u53d1\u670d\u52a1\u5668\u3002InnoClaw \u63a8\u8350\u4f7f\u7528 Node.js 24 LTS\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u6700\u65b0\u7684 Node.js 25 \u5f53\u524d\u7248\u672c\u3002', req_title: '\u7cfb\u7edf\u8981\u6c42', - req1: '\u9700\u8981 Node.js 20+', + req1: '\u9700\u8981 Node.js 24+\uff0825 \u4e5f\u53ef\u7528\uff09', req2: 'SQLite \u2014 \u96f6\u914d\u7f6e\u6570\u636e\u5e93', req3: '\u652f\u6301 9 \u5927 LLM \u63d0\u4f9b\u5546 (OpenAI, Anthropic, Gemini, DeepSeek, Qwen \u7b49)', req4: '3 \u5206\u949f\u5185\u5373\u53ef\u5c31\u7eea', @@ -1324,9 +1324,9 @@

wf4: 'Drug Target Analysis', wf5: 'Protein Structure Prediction', qs_title: 'Get Started in Minutes', - qs_desc: 'Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on any machine with Node.js 20+.', + qs_desc: 'Clone the repo, install dependencies, configure your API keys, and start the development server. InnoClaw runs on Node.js 24 LTS and also supports the latest Node.js 25 current release.', req_title: 'Requirements', - req1: 'Node.js 20+ required', + req1: 'Node.js 24+ required (25 supported)', req2: 'SQLite \u2014 zero-config database', req3: 'Supports 9 LLM providers (OpenAI, Anthropic, Gemini, DeepSeek, Qwen, etc.)', req4: 'Ready in under 3 minutes', @@ -1508,4 +1508,4 @@

}); - \ No newline at end of file + diff --git a/src/app/api/agent/route.ts b/src/app/api/agent/route.ts index 505cd366..e10da0b8 100644 --- a/src/app/api/agent/route.ts +++ b/src/app/api/agent/route.ts @@ -4,8 +4,7 @@ import { getConfiguredModelWithProvider, getModelFromOverride, isAIAvailable } f import { createAgentTools } from "@/lib/ai/agent-tools"; import { buildAgentSystemPrompt, buildAgentLongSystemPrompt, buildPlanSystemPrompt, buildAskSystemPrompt } from "@/lib/ai/prompts"; import { buildSkillSystemPrompt } from "@/lib/ai/skill-prompt"; -import { providerSupportsTools, PROVIDERS } from "@/lib/ai/models"; -import type { ProviderId } from "@/lib/ai/models"; +import { runtimeProviderSupportsTools } from "@/lib/ai/runtime-capabilities"; import { db } from "@/lib/db"; import { skills } from "@/lib/db/schema"; import { and, eq, or, isNull } from "drizzle-orm"; @@ -34,19 +33,6 @@ export async function POST(req: NextRequest) { { status: 400 } ); } - // If the provider is a known built-in provider, validate the model is allowed - const knownProviderIds = Object.keys(PROVIDERS) as ProviderId[]; - const matchedProvider = knownProviderIds.find((id) => id === llmProvider); - if (matchedProvider) { - const knownModels: string[] = PROVIDERS[matchedProvider].models.map((m) => m.id); - if (!knownModels.includes(llmModel)) { - return new Response( - `Invalid llmModel "${llmModel}" for provider "${llmProvider}". ` + - `Allowed models: ${knownModels.join(", ")}`, - { status: 400 } - ); - } - } } else if (llmProvider !== undefined || llmModel !== undefined) { return new Response( "Both llmProvider and llmModel must be provided together for model override", @@ -65,7 +51,7 @@ export async function POST(req: NextRequest) { ? getModelFromOverride(llmProvider, llmModel) : await getConfiguredModelWithProvider(); console.log(`[agent] provider=${providerId} model=${typeof model === 'string' ? model : model.modelId} override=${!!(llmProvider && llmModel)}`); - const useTools = providerSupportsTools(providerId); + const useTools = runtimeProviderSupportsTools(providerId); let systemPrompt: string; let tools; diff --git a/src/app/api/models/route.test.ts b/src/app/api/models/route.test.ts index 4b905168..e1f7917b 100644 --- a/src/app/api/models/route.test.ts +++ b/src/app/api/models/route.test.ts @@ -1,4 +1,7 @@ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import fs from "fs"; +import os from "os"; +import path from "path"; /** * Unit tests for the /api/models route helper functions. @@ -20,15 +23,20 @@ let GET: (req: unknown) => Promise; describe("/api/models", () => { const originalEnv = { ...process.env }; const fetchSpy = vi.fn(); + let tmpDir: string; beforeEach(async () => { // Reset env process.env = { ...originalEnv }; + fetchSpy.mockReset(); + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "innoclaw-models-route-")); + vi.spyOn(process, "cwd").mockReturnValue(tmpDir); // Mock global fetch used inside the route handler vi.stubGlobal("fetch", fetchSpy); // Dynamically import so env vars are read at import time + vi.resetModules(); const mod = await import("@/app/api/models/route"); GET = mod.GET as (req: unknown) => Promise; }); @@ -36,6 +44,9 @@ describe("/api/models", () => { afterEach(() => { vi.restoreAllMocks(); process.env = originalEnv; + if (tmpDir) { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } }); it("returns 400 when OPENAI_API_KEY is missing (openai provider)", async () => { @@ -197,4 +208,54 @@ describe("/api/models", () => { }), ); }); + + it("aggregates models across per-model Qwen base URLs from .env.local", async () => { + fs.writeFileSync( + path.join(tmpDir, ".env.local"), + [ + "QWEN_API_KEY=none", + "QWEN_QWEN3_5_35B_BASE_URL=http://106.75.244.215:8000/v1", + "QWEN_QWEN3_5_122B_BASE_URL=http://106.75.244.215:8001/v1", + "", + ].join("\n"), + ); + + fetchSpy + .mockResolvedValueOnce({ + ok: true, + json: async () => ({ + data: [{ id: "qwen3.5-35b", owned_by: "qwen" }], + }), + }) + .mockResolvedValueOnce({ + ok: true, + json: async () => ({ + data: [{ id: "qwen3.5-122b", owned_by: "qwen" }], + }), + }); + + const req = new FakeNextRequest("http://localhost/api/models?provider=qwen"); + const res = await GET(req); + const body = await res.json(); + + expect(res.status).toBe(200); + expect(body.models).toEqual([ + { id: "qwen3.5-122b", name: "qwen3.5-122b" }, + { id: "qwen3.5-35b", name: "qwen3.5-35b" }, + ]); + const requestedUrls = fetchSpy.mock.calls.map(([url]) => url); + expect(requestedUrls).toEqual( + expect.arrayContaining([ + "http://106.75.244.215:8000/v1/models", + "http://106.75.244.215:8001/v1/models", + ]), + ); + for (const [, options] of fetchSpy.mock.calls) { + expect(options).toEqual( + expect.objectContaining({ + headers: { Authorization: "Bearer none" }, + }), + ); + } + }); }); diff --git a/src/app/api/models/route.ts b/src/app/api/models/route.ts index d6ff23cc..2ed78c60 100644 --- a/src/app/api/models/route.ts +++ b/src/app/api/models/route.ts @@ -1,4 +1,11 @@ import { NextRequest, NextResponse } from "next/server"; +import { PROVIDERS } from "@/lib/ai/models"; +import { + getCurrentEnv, + getDiscoveredPerModelBaseUrls, + getVendorBaseUrlEnvKey, + isPerModelBaseUrlProvider, +} from "@/lib/ai/provider-env"; /** * Fetch available models from the configured provider's API. @@ -18,8 +25,24 @@ export async function GET(request: NextRequest) { if (provider === "anthropic") { return await fetchAnthropicModels(); } - // Default: OpenAI-compatible - return await fetchOpenAIModels(); + if (provider === "openai") { + return await fetchOpenAICompatibleProviderModels("openai", { + defaultBaseUrl: "https://api.openai.com/v1", + requireApiKey: true, + }); + } + if (provider === "gemini") { + return await fetchOpenAICompatibleProviderModels("gemini", { + requireApiKey: true, + }); + } + if (provider in PROVIDERS && isPerModelBaseUrlProvider(provider)) { + return await fetchOpenAICompatiblePerModelProviderModels(provider); + } + return NextResponse.json( + { error: `Unsupported provider "${provider}"` }, + { status: 400 }, + ); } catch (error) { const message = error instanceof Error ? error.message : "Failed to fetch models"; @@ -31,38 +54,118 @@ export async function GET(request: NextRequest) { /* OpenAI-compatible */ /* ------------------------------------------------------------------ */ -async function fetchOpenAIModels() { - const apiKey = process.env.OPENAI_API_KEY; - if (!apiKey) { - return NextResponse.json( - { error: "OPENAI_API_KEY is not configured" }, - { status: 400 }, - ); - } - - const baseUrl = (process.env.OPENAI_BASE_URL || "https://api.openai.com/v1") - .replace(/\/+$/, ""); +function buildOpenAIModelsUrl(baseUrl: string) { + const trimmed = baseUrl.replace(/\/+$/, ""); + return trimmed.endsWith("/v1") + ? `${trimmed}/models` + : `${trimmed}/v1/models`; +} - // Ensure the URL ends with /models - const modelsUrl = baseUrl.endsWith("/v1") - ? `${baseUrl}/models` - : `${baseUrl}/v1/models`; +async function fetchModelsFromOpenAICompatibleBaseUrl( + baseUrl: string, + apiKey?: string, +) { + const modelsUrl = buildOpenAIModelsUrl(baseUrl); + const headers = apiKey ? { Authorization: `Bearer ${apiKey}` } : undefined; const res = await fetch(modelsUrl, { - headers: { Authorization: `Bearer ${apiKey}` }, + ...(headers ? { headers } : {}), signal: AbortSignal.timeout(15_000), }); if (!res.ok) { + throw new Error(`Provider returned ${res.status}`); + } + + const json = await res.json(); + return formatOpenAIModels(json); +} + +async function fetchOpenAICompatibleProviderModels( + providerId: "openai" | "gemini", + options: { + defaultBaseUrl?: string; + requireApiKey: boolean; + }, +) { + const env = getCurrentEnv(); + const apiKey = env[PROVIDERS[providerId].envKey]; + if (options.requireApiKey && !apiKey) { return NextResponse.json( - { error: `Provider returned ${res.status}` }, - { status: 502 }, + { error: `${PROVIDERS[providerId].envKey} is not configured` }, + { status: 400 }, ); } - const json = await res.json(); - const models = formatOpenAIModels(json); + const baseUrl = + env[getVendorBaseUrlEnvKey(providerId)] || options.defaultBaseUrl; + if (!baseUrl) { + return NextResponse.json( + { error: `${getVendorBaseUrlEnvKey(providerId)} is not configured` }, + { status: 400 }, + ); + } + + const models = await fetchModelsFromOpenAICompatibleBaseUrl(baseUrl, apiKey); + return NextResponse.json({ models }); +} + +async function fetchOpenAICompatiblePerModelProviderModels(providerId: string) { + const env = getCurrentEnv(); + const apiKey = env[PROVIDERS[providerId as keyof typeof PROVIDERS].envKey]; + const vendorBaseUrl = env[getVendorBaseUrlEnvKey(providerId)]?.trim(); + const discovered = getDiscoveredPerModelBaseUrls(providerId, env); + + const baseUrls = Array.from( + new Set( + [ + ...(vendorBaseUrl ? [vendorBaseUrl] : []), + ...discovered.map((entry) => entry.baseUrl), + ].filter(Boolean), + ), + ); + + if (baseUrls.length === 0) { + return NextResponse.json( + { + error: + `${getVendorBaseUrlEnvKey(providerId)} or ` + + `${providerId.toUpperCase()}_*_BASE_URL is not configured`, + }, + { status: 400 }, + ); + } + + const settled = await Promise.allSettled( + baseUrls.map((baseUrl) => + fetchModelsFromOpenAICompatibleBaseUrl(baseUrl, apiKey), + ), + ); + + const merged = new Map(); + const errors: string[] = []; + + for (const result of settled) { + if (result.status === "fulfilled") { + for (const model of result.value) { + merged.set(model.id, model); + } + continue; + } + errors.push( + result.reason instanceof Error + ? result.reason.message + : "Failed to fetch models", + ); + } + + if (merged.size === 0 && errors.length > 0) { + return NextResponse.json({ error: errors[0] }, { status: 502 }); + } + const models = Array.from(merged.values()).sort((a, b) => + a.id.localeCompare(b.id), + ); return NextResponse.json({ models }); } @@ -83,7 +186,8 @@ function formatOpenAIModels(json: { /* ------------------------------------------------------------------ */ async function fetchAnthropicModels() { - const apiKey = process.env.ANTHROPIC_API_KEY; + const env = getCurrentEnv(); + const apiKey = env.ANTHROPIC_API_KEY; if (!apiKey) { return NextResponse.json( { error: "ANTHROPIC_API_KEY is not configured" }, @@ -91,7 +195,7 @@ async function fetchAnthropicModels() { ); } - const rawBase = process.env.ANTHROPIC_BASE_URL; + const rawBase = env.ANTHROPIC_BASE_URL; let baseUrl = rawBase ? rawBase.replace(/\/+$/, "") : "https://api.anthropic.com"; diff --git a/src/app/api/paper-study/chat/route.ts b/src/app/api/paper-study/chat/route.ts index 85dae117..5e9d06fc 100644 --- a/src/app/api/paper-study/chat/route.ts +++ b/src/app/api/paper-study/chat/route.ts @@ -1,7 +1,8 @@ import { NextRequest } from "next/server"; import { streamText, convertToModelMessages, UIMessage } from "ai"; import { getConfiguredModelWithProvider, getModelFromOverride, isAIAvailable } from "@/lib/ai/provider"; -import { modelSupportsVision, providerSupportsTools } from "@/lib/ai/models"; +import { modelSupportsVision } from "@/lib/ai/models"; +import { runtimeProviderSupportsTools } from "@/lib/ai/runtime-capabilities"; import { buildPaperChatPrompt, buildPaperChatWithNotesPrompt } from "@/lib/ai/prompts"; import { createPaperChatTools } from "@/lib/ai/tools/paper-chat-tools"; import { buildPaperChatContextMessage, buildPaperModelContext } from "../paper-model-context"; @@ -74,7 +75,7 @@ export async function POST(req: NextRequest) { source: article.source || "", }); - const useTools = providerSupportsTools(providerId); + const useTools = runtimeProviderSupportsTools(providerId); const result = streamText({ model, diff --git a/src/app/api/settings/route.ts b/src/app/api/settings/route.ts index 1ce08071..d0e7e35c 100644 --- a/src/app/api/settings/route.ts +++ b/src/app/api/settings/route.ts @@ -7,6 +7,7 @@ import path from "path"; import { getWorkspaceRoots } from "@/lib/files/filesystem"; import { updateEnvLocal } from "@/lib/env-file"; import { PROVIDERS } from "@/lib/ai/models"; +import { getCurrentEnv } from "@/lib/ai/provider-env"; import { getK8sConfig, SETTINGS_TO_ENV, invalidateK8sConfigCache } from "@/lib/cluster/config"; /** @@ -22,11 +23,12 @@ function baseUrlEnvKey(providerId: string): string { * providerBaseUrls: { [providerId]: string } — the base URL env var value (or "") */ function getProviderEnvInfo() { + const env = getCurrentEnv(); const providerKeys: Record = {}; const providerBaseUrls: Record = {}; for (const p of Object.values(PROVIDERS)) { - providerKeys[p.id] = !!process.env[p.envKey]; - providerBaseUrls[p.id] = process.env[baseUrlEnvKey(p.id)] || ""; + providerKeys[p.id] = !!env[p.envKey]; + providerBaseUrls[p.id] = env[baseUrlEnvKey(p.id)] || ""; } return { providerKeys, providerBaseUrls }; } @@ -40,7 +42,8 @@ export async function GET() { settingsMap[s.key] = s.value; } - const hasHfToken = !!settingsMap["hf_token"] || !!process.env.HF_TOKEN; + const env = getCurrentEnv(); + const hasHfToken = !!settingsMap["hf_token"] || !!env.HF_TOKEN; const { providerKeys, providerBaseUrls } = getProviderEnvInfo(); return NextResponse.json({ @@ -53,9 +56,9 @@ export async function GET() { hasOpenAIKey: providerKeys["openai"] ?? false, hasAnthropicKey: providerKeys["anthropic"] ?? false, hasGeminiKey: providerKeys["gemini"] ?? false, - hasGithubToken: !!process.env.GITHUB_TOKEN, + hasGithubToken: !!env.GITHUB_TOKEN, hasHfToken, - hfTokenSource: settingsMap["hf_token"] ? "db" : (process.env.HF_TOKEN ? "env" : null), + hfTokenSource: settingsMap["hf_token"] ? "db" : (env.HF_TOKEN ? "env" : null), hasAIKey: Object.values(providerKeys).some(Boolean), configuredProviders: Object.entries(providerKeys) .filter(([, has]) => has) @@ -63,17 +66,17 @@ export async function GET() { providerKeys, providerBaseUrls, feishuBotEnabled: - process.env.FEISHU_BOT_ENABLED === "true" && - !!process.env.FEISHU_APP_ID && - !!process.env.FEISHU_APP_SECRET && - !!process.env.FEISHU_VERIFICATION_TOKEN, + env.FEISHU_BOT_ENABLED === "true" && + !!env.FEISHU_APP_ID && + !!env.FEISHU_APP_SECRET && + !!env.FEISHU_VERIFICATION_TOKEN, wechatBotEnabled: - process.env.WECHAT_BOT_ENABLED === "true" && - !!process.env.WECHAT_CORP_ID && - !!process.env.WECHAT_CORP_SECRET && - !!process.env.WECHAT_TOKEN && - !!process.env.WECHAT_ENCODING_AES_KEY && - !!process.env.WECHAT_AGENT_ID, + env.WECHAT_BOT_ENABLED === "true" && + !!env.WECHAT_CORP_ID && + !!env.WECHAT_CORP_SECRET && + !!env.WECHAT_TOKEN && + !!env.WECHAT_ENCODING_AES_KEY && + !!env.WECHAT_AGENT_ID, styleTheme: settingsMap["style_theme"] || "default", k8sConfig: await getK8sConfig(), }); diff --git a/src/components/agent/agent-panel.tsx b/src/components/agent/agent-panel.tsx index 9d9afbb7..41ab1da1 100644 --- a/src/components/agent/agent-panel.tsx +++ b/src/components/agent/agent-panel.tsx @@ -50,6 +50,17 @@ import { swrFetcher as fetcher } from "@/lib/fetcher"; import { AgentMessage } from "./agent-message"; type AgentMode = "long-agent" | "agent" | "plan" | "ask"; +type ModelSelection = { provider: string; model: string }; +type ModelOption = { id: string; name: string }; +type ProviderOption = { + id: string; + name: string; + models: ModelOption[]; +}; + +function normalizeModelKey(modelId: string): string { + return modelId.replace(/[^a-zA-Z0-9]/g, "").toLowerCase(); +} /** Pixel threshold for considering the user "at the bottom" of the scroll area */ const BOTTOM_THRESHOLD_PX = 80; @@ -84,6 +95,30 @@ const MODE_PLACEHOLDER_KEYS: Record(null); const [input, setInput] = useState(""); const [mode, setMode] = useState("agent"); - const [selectedProvider, setSelectedProvider] = useState(null); - const [selectedModel, setSelectedModel] = useState(null); + const [userSelection, setUserSelection] = useState( + () => readStoredModelSelection("innoclaw-agent-model-selection") + ); const MODEL_SELECTION_STORAGE_KEY = "innoclaw-agent-model-selection"; @@ -253,71 +289,145 @@ export function AgentPanel({ const { data: settings } = useSWR("/api/settings", fetcher); const aiEnabled = settings?.hasAIKey ?? false; - // Initialize model selection from localStorage, then fall back to global settings - useEffect(() => { - if (selectedProvider !== null) return; // already initialized + const configuredProviderIds = useMemo(() => { + const configured = settings?.configuredProviders as string[] | undefined; + if (!configured) return []; + return configured.filter((id): id is ProviderId => Boolean(PROVIDERS[id as ProviderId])); + }, [settings?.configuredProviders]); - // Try to read a stored selection from localStorage - let storedSelection: { provider: string; model: string } | null = null; - try { - if (typeof window !== "undefined" && window.localStorage) { - const stored = window.localStorage.getItem(MODEL_SELECTION_STORAGE_KEY); - if (stored) { + const { data: discoveredModelsByProvider, mutate: refreshDiscoveredModels } = useSWR< + Record + >( + configuredProviderIds.length > 0 + ? (["agent-model-options", ...configuredProviderIds] as const) + : null, + async ([, ...providerIds]) => { + const entries = await Promise.all( + providerIds.map(async (providerId) => { try { - storedSelection = JSON.parse(stored); + const response = await fetch(`/api/models?provider=${encodeURIComponent(providerId)}`); + const data = await response.json().catch(() => ({})); + return [providerId, Array.isArray(data.models) ? data.models : []] as const; } catch { - // Ignore parse errors and treat as no stored selection + return [providerId, []] as const; } - } - } - } catch { - // Ignore storage access errors and fall back to settings + }), + ); + return Object.fromEntries(entries); + }, + ); + + const availableProviders = useMemo(() => { + return configuredProviderIds + .map((id) => { + const provider = PROVIDERS[id]; + if (!provider) return null; + + const knownIds = new Set(provider.models.map((model) => model.id)); + const extraModels = (discoveredModelsByProvider?.[id] ?? []).filter( + (model) => !knownIds.has(model.id), + ); + + return { + id: provider.id, + name: provider.name, + models: [...provider.models, ...extraModels], + }; + }) + .filter((provider): provider is ProviderOption => provider !== null); + }, [configuredProviderIds, discoveredModelsByProvider]); + + const settingsFallback = useMemo(() => { + if (!settings?.llmProvider || !settings?.llmModel) return null; + if ( + configuredProviderIds.length > 0 && + !configuredProviderIds.includes(settings.llmProvider as ProviderId) + ) { + return null; + } + return { + provider: settings.llmProvider as string, + model: settings.llmModel as string, + }; + }, [configuredProviderIds, settings?.llmModel, settings?.llmProvider]); + + const canonicalSelection = useMemo(() => { + const selection = userSelection ?? settingsFallback; + if (!selection) return null; + + const provider = availableProviders.find((entry) => entry.id === selection.provider); + if (!provider) { + return selection; + } + + const matchedModel = provider.models.find((entry) => entry.id === selection.model) + ?? provider.models.find( + (entry) => normalizeModelKey(entry.id) === normalizeModelKey(selection.model), + ); + + if (!matchedModel) { + return selection; + } + + if (matchedModel.id === selection.model) { + return selection; } - const configuredProviders = settings?.configuredProviders as string[] | undefined; - - const isValidSelection = (selection: { provider: string; model: string } | null) => { - if (!selection) return false; - const { provider, model } = selection; - if (!provider || !model) return false; - const providerDef = PROVIDERS[provider as ProviderId]; - if (!providerDef) return false; - if (configuredProviders && !configuredProviders.includes(provider)) return false; - const hasModel = providerDef.models.some((m) => m.id === model); - return hasModel; + return { + provider: selection.provider, + model: matchedModel.id, }; + }, [availableProviders, settingsFallback, userSelection]); + + useEffect(() => { + if (!userSelection) return; - if (isValidSelection(storedSelection)) { - setSelectedProvider(storedSelection!.provider); - setSelectedModel(storedSelection!.model); + const providerStillConfigured = + configuredProviderIds.length === 0 || + configuredProviderIds.includes(userSelection.provider as ProviderId); + const providerExists = Boolean(PROVIDERS[userSelection.provider as ProviderId]); + + if (providerExists && providerStillConfigured) { return; - } else if (storedSelection) { - // Clear invalid stored value - try { - if (typeof window !== "undefined" && window.localStorage) { - window.localStorage.removeItem(MODEL_SELECTION_STORAGE_KEY); - } - } catch { - // Ignore storage access errors + } + + setUserSelection(null); + try { + if (typeof window !== "undefined" && window.localStorage) { + window.localStorage.removeItem(MODEL_SELECTION_STORAGE_KEY); } + } catch { + // Ignore storage access errors. } + }, [configuredProviderIds, userSelection]); - // Fall back to global settings if available and valid - if (settings?.llmProvider && settings?.llmModel) { - const fallbackSelection = { - provider: settings.llmProvider as string, - model: settings.llmModel as string, - }; - if (isValidSelection(fallbackSelection)) { - setSelectedProvider(fallbackSelection.provider); - setSelectedModel(fallbackSelection.model); + useEffect(() => { + if (!userSelection || !canonicalSelection) return; + if ( + canonicalSelection.provider === userSelection.provider && + canonicalSelection.model === userSelection.model + ) { + return; + } + + setUserSelection(canonicalSelection); + try { + if (typeof window !== "undefined" && window.localStorage) { + window.localStorage.setItem( + MODEL_SELECTION_STORAGE_KEY, + JSON.stringify(canonicalSelection), + ); } + } catch { + // Ignore storage access errors. } - }, [settings?.llmProvider, settings?.llmModel, settings?.configuredProviders, selectedProvider]); + }, [MODEL_SELECTION_STORAGE_KEY, canonicalSelection, userSelection]); + + const selectedProvider = canonicalSelection?.provider ?? null; + const selectedModel = canonicalSelection?.model ?? null; const handleModelChange = useCallback((providerId: string, modelId: string) => { - setSelectedProvider(providerId); - setSelectedModel(modelId); + setUserSelection({ provider: providerId, model: modelId }); try { if (typeof window !== "undefined" && window.localStorage) { window.localStorage.setItem( @@ -332,24 +442,16 @@ export function AgentPanel({ const modelDisplayName = useMemo(() => { if (!selectedProvider || !selectedModel) return t("modelLabel"); - const provider = PROVIDERS[selectedProvider as ProviderId]; - const model = provider?.models.find((m) => m.id === selectedModel); + const provider = availableProviders.find((entry) => entry.id === selectedProvider); + const model = provider?.models.find((entry) => entry.id === selectedModel); return model?.name ?? selectedModel; - }, [selectedProvider, selectedModel, t]); + }, [availableProviders, selectedProvider, selectedModel, t]); const selectedSupportsVision = useMemo(() => { if (!selectedProvider || !selectedModel) return null; return modelSupportsVision(selectedProvider, selectedModel); }, [selectedProvider, selectedModel]); - const availableProviders = useMemo(() => { - const configured = settings?.configuredProviders as string[] | undefined; - if (!configured) return []; - return configured - .map((id: string) => PROVIDERS[id as ProviderId]) - .filter(Boolean); - }, [settings?.configuredProviders]); - // Mutable body object — allows injecting skillId/paramValues before each send const agentBody = useMemo( () => @@ -1193,7 +1295,13 @@ export function AgentPanel({
{/* Model selector */} - + { + if (open) { + void refreshDiscoveredModels(); + } + }} + >