Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion crates/forge_app/src/dto/openai/transformers/pipeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ impl Transformer for ProviderPipeline<'_> {
let strict_schema = EnforceStrictToolSchema
.pipe(EnforceStrictResponseFormatSchema)
.when(move |_| {
provider.id == ProviderId::FIREWORKS_AI || provider.id == ProviderId::OPENCODE_ZEN
provider.id == ProviderId::FIREWORKS_AI
|| provider.id == ProviderId::OPENCODE_ZEN
|| provider.id == ProviderId::OPENCODE_GO
});

let mut combined = zai_thinking
Expand Down
15 changes: 15 additions & 0 deletions crates/forge_domain/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ impl ProviderId {
pub const MINIMAX: ProviderId = ProviderId(Cow::Borrowed("minimax"));
pub const CODEX: ProviderId = ProviderId(Cow::Borrowed("codex"));
pub const OPENCODE_ZEN: ProviderId = ProviderId(Cow::Borrowed("opencode_zen"));
pub const OPENCODE_GO: ProviderId = ProviderId(Cow::Borrowed("opencode_go"));
pub const FIREWORKS_AI: ProviderId = ProviderId(Cow::Borrowed("fireworks-ai"));
pub const NOVITA: ProviderId = ProviderId(Cow::Borrowed("novita"));

Expand Down Expand Up @@ -102,6 +103,7 @@ impl ProviderId {
ProviderId::MINIMAX,
ProviderId::CODEX,
ProviderId::OPENCODE_ZEN,
ProviderId::OPENCODE_GO,
ProviderId::FIREWORKS_AI,
ProviderId::NOVITA,
]
Expand All @@ -127,6 +129,8 @@ impl ProviderId {
"io_intelligence" => "IOIntelligence".to_string(),
"minimax" => "MiniMax".to_string(),
"codex" => "Codex".to_string(),
"opencode_zen" => "OpenCode Zen".to_string(),
"opencode_go" => "OpenCode Go".to_string(),
"fireworks-ai" => "FireworksAI".to_string(),
"novita" => "Novita".to_string(),
_ => {
Expand Down Expand Up @@ -171,6 +175,7 @@ impl std::str::FromStr for ProviderId {
"io_intelligence" => ProviderId::IO_INTELLIGENCE,
"minimax" => ProviderId::MINIMAX,
"codex" => ProviderId::CODEX,
"opencode_go" => ProviderId::OPENCODE_GO,
"fireworks-ai" => ProviderId::FIREWORKS_AI,
"novita" => ProviderId::NOVITA,
// For custom providers, use Cow::Owned to avoid memory leaks
Expand Down Expand Up @@ -544,6 +549,8 @@ mod tests {
assert_eq!(ProviderId::IO_INTELLIGENCE.to_string(), "IOIntelligence");
assert_eq!(ProviderId::CODEX.to_string(), "Codex");
assert_eq!(ProviderId::FIREWORKS_AI.to_string(), "FireworksAI");
assert_eq!(ProviderId::OPENCODE_ZEN.to_string(), "OpenCode Zen");
assert_eq!(ProviderId::OPENCODE_GO.to_string(), "OpenCode Go");
}

#[test]
Expand All @@ -560,12 +567,20 @@ mod tests {
assert_eq!(actual, expected);
}

#[test]
fn test_opencode_go_from_str() {
let actual = ProviderId::from_str("opencode_go").unwrap();
let expected = ProviderId::OPENCODE_GO;
assert_eq!(actual, expected);
}

#[test]
fn test_codex_in_built_in_providers() {
let built_in = ProviderId::built_in_providers();
assert!(built_in.contains(&ProviderId::CODEX));
assert!(built_in.contains(&ProviderId::OPENAI_RESPONSES_COMPATIBLE));
assert!(built_in.contains(&ProviderId::FIREWORKS_AI));
assert!(built_in.contains(&ProviderId::OPENCODE_GO));
}

#[test]
Expand Down
17 changes: 16 additions & 1 deletion crates/forge_repo/src/provider/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use crate::provider::bedrock::BedrockResponseRepository;
use crate::provider::google::GoogleResponseRepository;
use crate::provider::openai::OpenAIResponseRepository;
use crate::provider::openai_responses::OpenAIResponsesResponseRepository;
use crate::provider::opencode_go::OpenCodeGoResponseRepository;
use crate::provider::opencode_zen::OpenCodeZenResponseRepository;

/// Repository responsible for routing chat requests to the appropriate provider
Expand Down Expand Up @@ -46,6 +47,8 @@ impl<F: EnvironmentInfra + HttpInfra> ForgeChatRepository<F> {
GoogleResponseRepository::new(infra.clone()).retry_config(retry_config.clone());
let opencode_zen_repo =
OpenCodeZenResponseRepository::new(infra.clone()).retry_config(retry_config.clone());
let opencode_go_repo =
OpenCodeGoResponseRepository::new(infra.clone()).retry_config(retry_config.clone());

let model_cache = Arc::new(CacacheStorage::new(
env.cache_dir().join("model_cache"),
Expand All @@ -60,6 +63,7 @@ impl<F: EnvironmentInfra + HttpInfra> ForgeChatRepository<F> {
bedrock_repo,
google_repo,
opencode_zen_repo,
opencode_go_repo,
}),
model_cache,
bg_refresh: BgRefresh::default(),
Expand Down Expand Up @@ -130,6 +134,7 @@ struct ProviderRouter<F> {
bedrock_repo: BedrockResponseRepository,
google_repo: GoogleResponseRepository<F>,
opencode_zen_repo: OpenCodeZenResponseRepository<F>,
opencode_go_repo: OpenCodeGoResponseRepository<F>,
}

impl<F: HttpInfra + Sync> ProviderRouter<F> {
Expand All @@ -151,6 +156,10 @@ impl<F: HttpInfra + Sync> ProviderRouter<F> {
} else if provider.id == ProviderId::CODEX {
// All Codex provider models use the Responses API
self.codex_repo.chat(model_id, context, provider).await
} else if provider.id == ProviderId::OPENCODE_GO {
self.opencode_go_repo
.chat(model_id, context, provider)
.await
} else {
self.openai_repo.chat(model_id, context, provider).await
}
Expand Down Expand Up @@ -181,7 +190,13 @@ impl<F: HttpInfra + Sync> ProviderRouter<F> {

async fn models(&self, provider: Provider<Url>) -> anyhow::Result<Vec<Model>> {
match provider.response {
Some(ProviderResponse::OpenAI) => self.openai_repo.models(provider).await,
Some(ProviderResponse::OpenAI) => {
if provider.id == ProviderId::OPENCODE_GO {
self.opencode_go_repo.models(provider).await
} else {
self.openai_repo.models(provider).await
}
}
Some(ProviderResponse::OpenAIResponses) => self.codex_repo.models(provider).await,
Some(ProviderResponse::Anthropic) => self.anthropic_repo.models(provider).await,
Some(ProviderResponse::Bedrock) => self.bedrock_repo.models(provider).await,
Expand Down
1 change: 1 addition & 0 deletions crates/forge_repo/src/provider/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ mod google;
mod mock_server;
mod openai;
mod openai_responses;
mod opencode_go;
mod opencode_zen;
mod provider_repo;
mod retry;
Expand Down
97 changes: 97 additions & 0 deletions crates/forge_repo/src/provider/opencode_go.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
use std::sync::Arc;

use anyhow::Result;
use derive_setters::Setters;
use forge_app::HttpInfra;
use forge_app::domain::{
ChatCompletionMessage, Context as ChatContext, Model, ModelId, Provider, ProviderResponse,
ResultStream,
};
use forge_config::RetryConfig;
use forge_domain::ChatRepository;
use url::Url;

use crate::provider::openai::OpenAIResponseRepository;

#[derive(Setters)]
#[setters(strip_option, into)]
pub struct OpenCodeGoResponseRepository<F> {
openai_repo: OpenAIResponseRepository<F>,
retry_config: Arc<RetryConfig>,
}

impl<F: HttpInfra + Sync> OpenCodeGoResponseRepository<F> {
pub fn new(infra: Arc<F>) -> Self {
Self {
openai_repo: OpenAIResponseRepository::new(infra),
retry_config: Arc::new(RetryConfig::default()),
}
}

fn build_provider(&self, provider: &Provider<Url>) -> Provider<Url> {
let mut new_provider = provider.clone();

new_provider.url = Url::parse("https://opencode.ai/zen/go/v1/chat/completions").unwrap();
new_provider.response = Some(ProviderResponse::OpenAI);

new_provider
}
}

impl<F: HttpInfra + Sync> OpenCodeGoResponseRepository<F> {
pub async fn chat(
&self,
model_id: &ModelId,
context: ChatContext,
provider: Provider<Url>,
) -> ResultStream<ChatCompletionMessage, anyhow::Error> {
let adapted_provider = self.build_provider(&provider);

self.openai_repo
.chat(model_id, context, adapted_provider)
.await
}

pub async fn models(&self, provider: Provider<Url>) -> Result<Vec<Model>> {
if let Some(models) = provider.models() {
match models {
forge_domain::ModelSource::Hardcoded(models) => Ok(models.clone()),
forge_domain::ModelSource::Url(_) => Ok(vec![]),
}
} else {
Ok(vec![])
}
}
}

#[cfg(test)]
mod tests {
use std::str::FromStr;

use forge_app::domain::ProviderResponse;
use forge_domain::ProviderId;
use pretty_assertions::assert_eq;
use url::Url;

#[test]
fn test_opencode_go_provider_url() {
let url = Url::parse("https://opencode.ai/zen/go/v1/chat/completions").unwrap();
assert_eq!(
url.as_str(),
"https://opencode.ai/zen/go/v1/chat/completions"
);
}

#[test]
fn test_opencode_go_provider_id_from_str() {
let actual = ProviderId::from_str("opencode_go").unwrap();
let expected = ProviderId::OPENCODE_GO;
assert_eq!(actual, expected);
}

#[test]
fn test_opencode_go_response_type() {
let response = ProviderResponse::OpenAI;
assert_eq!(format!("{:?}", response), "OpenAI");
}
}
70 changes: 70 additions & 0 deletions crates/forge_repo/src/provider/provider.json
Original file line number Diff line number Diff line change
Expand Up @@ -2426,6 +2426,76 @@
],
"auth_methods": ["api_key"]
},
{
"id": "opencode_go",
"api_key_vars": "OPENCODE_API_KEY",
"url_param_vars": [],
"response_type": "OpenAI",
"url": "https://opencode.ai/zen/go/v1/chat/completions",
"models": [
{
"id": "glm-5",
"name": "GLM 5",
"description": "Zhipu AI's flagship model with 204K context, reasoning, and tool calling capabilities",
"context_length": 204800,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text"]
},
{
"id": "kimi-k2.5",
"name": "Kimi K2.5",
"description": "Moonshot AI's flagship model with 262K context, vision, and reasoning capabilities",
"context_length": 262144,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text", "image"]
},
{
"id": "mimo-v2-pro",
"name": "MiMo V2 Pro",
"description": "Xiaomi's flagship foundation model with 1M context, reasoning, and tool calling capabilities",
"context_length": 1000000,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text"]
},
{
"id": "mimo-v2-omni",
"name": "MiMo V2 Omni",
"description": "Xiaomi's omni-modal model that natively processes image, video, and audio inputs",
"context_length": 262100,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text", "image"]
},
{
"id": "minimax-m2.7",
"name": "MiniMax M2.7",
"description": "MiniMax's latest model with enhanced reasoning and 204K context",
"context_length": 204800,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text"]
},
{
"id": "minimax-m2.5",
"name": "MiniMax M2.5",
"description": "MiniMax's model with 204K context and reasoning capabilities",
"context_length": 204800,
"tools_supported": true,
"supports_parallel_tool_calls": true,
"supports_reasoning": true,
"input_modalities": ["text"]
}
],
"auth_methods": ["api_key"]
},
{
"id": "alibaba_coding",
"provider_type": "llm",
Expand Down
Loading