diff --git a/crates/forge_app/src/dto/openai/transformers/pipeline.rs b/crates/forge_app/src/dto/openai/transformers/pipeline.rs index 7b1c309c0e..ad1693cbaa 100644 --- a/crates/forge_app/src/dto/openai/transformers/pipeline.rs +++ b/crates/forge_app/src/dto/openai/transformers/pipeline.rs @@ -72,7 +72,9 @@ impl Transformer for ProviderPipeline<'_> { let strict_schema = EnforceStrictToolSchema .pipe(EnforceStrictResponseFormatSchema) .when(move |_| { - provider.id == ProviderId::FIREWORKS_AI || provider.id == ProviderId::OPENCODE_ZEN + provider.id == ProviderId::FIREWORKS_AI + || provider.id == ProviderId::OPENCODE_ZEN + || provider.id == ProviderId::OPENCODE_GO }); let mut combined = zai_thinking diff --git a/crates/forge_domain/src/provider.rs b/crates/forge_domain/src/provider.rs index a65b43e416..371b74b31e 100644 --- a/crates/forge_domain/src/provider.rs +++ b/crates/forge_domain/src/provider.rs @@ -70,6 +70,7 @@ impl ProviderId { pub const MINIMAX: ProviderId = ProviderId(Cow::Borrowed("minimax")); pub const CODEX: ProviderId = ProviderId(Cow::Borrowed("codex")); pub const OPENCODE_ZEN: ProviderId = ProviderId(Cow::Borrowed("opencode_zen")); + pub const OPENCODE_GO: ProviderId = ProviderId(Cow::Borrowed("opencode_go")); pub const FIREWORKS_AI: ProviderId = ProviderId(Cow::Borrowed("fireworks-ai")); pub const NOVITA: ProviderId = ProviderId(Cow::Borrowed("novita")); pub const GOOGLE_AI_STUDIO: ProviderId = ProviderId(Cow::Borrowed("google_ai_studio")); @@ -103,6 +104,7 @@ impl ProviderId { ProviderId::MINIMAX, ProviderId::CODEX, ProviderId::OPENCODE_ZEN, + ProviderId::OPENCODE_GO, ProviderId::FIREWORKS_AI, ProviderId::NOVITA, ProviderId::GOOGLE_AI_STUDIO, @@ -129,6 +131,8 @@ impl ProviderId { "io_intelligence" => "IOIntelligence".to_string(), "minimax" => "MiniMax".to_string(), "codex" => "Codex".to_string(), + "opencode_zen" => "OpenCode Zen".to_string(), + "opencode_go" => "OpenCode Go".to_string(), "fireworks-ai" => "FireworksAI".to_string(), "novita" => "Novita".to_string(), "google_ai_studio" => "GoogleAIStudio".to_string(), @@ -174,6 +178,7 @@ impl std::str::FromStr for ProviderId { "io_intelligence" => ProviderId::IO_INTELLIGENCE, "minimax" => ProviderId::MINIMAX, "codex" => ProviderId::CODEX, + "opencode_go" => ProviderId::OPENCODE_GO, "fireworks-ai" => ProviderId::FIREWORKS_AI, "novita" => ProviderId::NOVITA, "google_ai_studio" => ProviderId::GOOGLE_AI_STUDIO, @@ -548,6 +553,8 @@ mod tests { assert_eq!(ProviderId::IO_INTELLIGENCE.to_string(), "IOIntelligence"); assert_eq!(ProviderId::CODEX.to_string(), "Codex"); assert_eq!(ProviderId::FIREWORKS_AI.to_string(), "FireworksAI"); + assert_eq!(ProviderId::OPENCODE_ZEN.to_string(), "OpenCode Zen"); + assert_eq!(ProviderId::OPENCODE_GO.to_string(), "OpenCode Go"); assert_eq!(ProviderId::GOOGLE_AI_STUDIO.to_string(), "GoogleAIStudio"); } @@ -565,12 +572,20 @@ mod tests { assert_eq!(actual, expected); } + #[test] + fn test_opencode_go_from_str() { + let actual = ProviderId::from_str("opencode_go").unwrap(); + let expected = ProviderId::OPENCODE_GO; + assert_eq!(actual, expected); + } + #[test] fn test_codex_in_built_in_providers() { let built_in = ProviderId::built_in_providers(); assert!(built_in.contains(&ProviderId::CODEX)); assert!(built_in.contains(&ProviderId::OPENAI_RESPONSES_COMPATIBLE)); assert!(built_in.contains(&ProviderId::FIREWORKS_AI)); + assert!(built_in.contains(&ProviderId::OPENCODE_GO)); assert!(built_in.contains(&ProviderId::GOOGLE_AI_STUDIO)); } diff --git a/crates/forge_repo/src/provider/chat.rs b/crates/forge_repo/src/provider/chat.rs index 28e208203f..0304273b39 100644 --- a/crates/forge_repo/src/provider/chat.rs +++ b/crates/forge_repo/src/provider/chat.rs @@ -14,6 +14,7 @@ use crate::provider::bedrock::BedrockResponseRepository; use crate::provider::google::GoogleResponseRepository; use crate::provider::openai::OpenAIResponseRepository; use crate::provider::openai_responses::OpenAIResponsesResponseRepository; +use crate::provider::opencode_go::OpenCodeGoResponseRepository; use crate::provider::opencode_zen::OpenCodeZenResponseRepository; /// Repository responsible for routing chat requests to the appropriate provider @@ -46,6 +47,8 @@ impl ForgeChatRepository { GoogleResponseRepository::new(infra.clone()).retry_config(retry_config.clone()); let opencode_zen_repo = OpenCodeZenResponseRepository::new(infra.clone()).retry_config(retry_config.clone()); + let opencode_go_repo = + OpenCodeGoResponseRepository::new(infra.clone()).retry_config(retry_config.clone()); let model_cache = Arc::new(CacacheStorage::new( env.cache_dir().join("model_cache"), @@ -60,6 +63,7 @@ impl ForgeChatRepository { bedrock_repo, google_repo, opencode_zen_repo, + opencode_go_repo, }), model_cache, bg_refresh: BgRefresh::default(), @@ -130,6 +134,7 @@ struct ProviderRouter { bedrock_repo: BedrockResponseRepository, google_repo: GoogleResponseRepository, opencode_zen_repo: OpenCodeZenResponseRepository, + opencode_go_repo: OpenCodeGoResponseRepository, } impl ProviderRouter { @@ -151,6 +156,10 @@ impl ProviderRouter { } else if provider.id == ProviderId::CODEX { // All Codex provider models use the Responses API self.codex_repo.chat(model_id, context, provider).await + } else if provider.id == ProviderId::OPENCODE_GO { + self.opencode_go_repo + .chat(model_id, context, provider) + .await } else { self.openai_repo.chat(model_id, context, provider).await } @@ -181,7 +190,13 @@ impl ProviderRouter { async fn models(&self, provider: Provider) -> anyhow::Result> { match provider.response { - Some(ProviderResponse::OpenAI) => self.openai_repo.models(provider).await, + Some(ProviderResponse::OpenAI) => { + if provider.id == ProviderId::OPENCODE_GO { + self.opencode_go_repo.models(provider).await + } else { + self.openai_repo.models(provider).await + } + } Some(ProviderResponse::OpenAIResponses) => self.codex_repo.models(provider).await, Some(ProviderResponse::Anthropic) => self.anthropic_repo.models(provider).await, Some(ProviderResponse::Bedrock) => self.bedrock_repo.models(provider).await, diff --git a/crates/forge_repo/src/provider/mod.rs b/crates/forge_repo/src/provider/mod.rs index 411bfd3079..9069b662a6 100644 --- a/crates/forge_repo/src/provider/mod.rs +++ b/crates/forge_repo/src/provider/mod.rs @@ -8,6 +8,7 @@ mod google; mod mock_server; mod openai; mod openai_responses; +mod opencode_go; mod opencode_zen; mod provider_repo; mod retry; diff --git a/crates/forge_repo/src/provider/opencode_go.rs b/crates/forge_repo/src/provider/opencode_go.rs new file mode 100644 index 0000000000..80308298c8 --- /dev/null +++ b/crates/forge_repo/src/provider/opencode_go.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use anyhow::Result; +use derive_setters::Setters; +use forge_app::HttpInfra; +use forge_app::domain::{ + ChatCompletionMessage, Context as ChatContext, Model, ModelId, Provider, ProviderResponse, + ResultStream, +}; +use forge_config::RetryConfig; +use forge_domain::ChatRepository; +use url::Url; + +use crate::provider::openai::OpenAIResponseRepository; + +#[derive(Setters)] +#[setters(strip_option, into)] +pub struct OpenCodeGoResponseRepository { + openai_repo: OpenAIResponseRepository, + retry_config: Arc, +} + +impl OpenCodeGoResponseRepository { + pub fn new(infra: Arc) -> Self { + Self { + openai_repo: OpenAIResponseRepository::new(infra), + retry_config: Arc::new(RetryConfig::default()), + } + } + + fn build_provider(&self, provider: &Provider) -> Provider { + let mut new_provider = provider.clone(); + + new_provider.url = Url::parse("https://opencode.ai/zen/go/v1/chat/completions").unwrap(); + new_provider.response = Some(ProviderResponse::OpenAI); + + new_provider + } +} + +impl OpenCodeGoResponseRepository { + pub async fn chat( + &self, + model_id: &ModelId, + context: ChatContext, + provider: Provider, + ) -> ResultStream { + let adapted_provider = self.build_provider(&provider); + + self.openai_repo + .chat(model_id, context, adapted_provider) + .await + } + + pub async fn models(&self, provider: Provider) -> Result> { + if let Some(models) = provider.models() { + match models { + forge_domain::ModelSource::Hardcoded(models) => Ok(models.clone()), + forge_domain::ModelSource::Url(_) => Ok(vec![]), + } + } else { + Ok(vec![]) + } + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use forge_app::domain::ProviderResponse; + use forge_domain::ProviderId; + use pretty_assertions::assert_eq; + use url::Url; + + #[test] + fn test_opencode_go_provider_url() { + let url = Url::parse("https://opencode.ai/zen/go/v1/chat/completions").unwrap(); + assert_eq!( + url.as_str(), + "https://opencode.ai/zen/go/v1/chat/completions" + ); + } + + #[test] + fn test_opencode_go_provider_id_from_str() { + let actual = ProviderId::from_str("opencode_go").unwrap(); + let expected = ProviderId::OPENCODE_GO; + assert_eq!(actual, expected); + } + + #[test] + fn test_opencode_go_response_type() { + let response = ProviderResponse::OpenAI; + assert_eq!(format!("{:?}", response), "OpenAI"); + } +} diff --git a/crates/forge_repo/src/provider/provider.json b/crates/forge_repo/src/provider/provider.json index 73f59a6d65..cd5aa5623d 100644 --- a/crates/forge_repo/src/provider/provider.json +++ b/crates/forge_repo/src/provider/provider.json @@ -2506,6 +2506,76 @@ ], "auth_methods": ["api_key"] }, + { + "id": "opencode_go", + "api_key_vars": "OPENCODE_API_KEY", + "url_param_vars": [], + "response_type": "OpenAI", + "url": "https://opencode.ai/zen/go/v1/chat/completions", + "models": [ + { + "id": "glm-5", + "name": "GLM 5", + "description": "Zhipu AI's flagship model with 204K context, reasoning, and tool calling capabilities", + "context_length": 204800, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + }, + { + "id": "kimi-k2.5", + "name": "Kimi K2.5", + "description": "Moonshot AI's flagship model with 262K context, vision, and reasoning capabilities", + "context_length": 262144, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text", "image"] + }, + { + "id": "mimo-v2-pro", + "name": "MiMo V2 Pro", + "description": "Xiaomi's flagship foundation model with 1M context, reasoning, and tool calling capabilities", + "context_length": 1000000, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + }, + { + "id": "mimo-v2-omni", + "name": "MiMo V2 Omni", + "description": "Xiaomi's omni-modal model that natively processes image, video, and audio inputs", + "context_length": 262100, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text", "image"] + }, + { + "id": "minimax-m2.7", + "name": "MiniMax M2.7", + "description": "MiniMax's latest model with enhanced reasoning and 204K context", + "context_length": 204800, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + }, + { + "id": "minimax-m2.5", + "name": "MiniMax M2.5", + "description": "MiniMax's model with 204K context and reasoning capabilities", + "context_length": 204800, + "tools_supported": true, + "supports_parallel_tool_calls": true, + "supports_reasoning": true, + "input_modalities": ["text"] + } + ], + "auth_methods": ["api_key"] + }, { "id": "alibaba_coding", "provider_type": "llm",