Skip to content

Commit 0a19f69

Browse files
committed
base url is working
1 parent aa85ed5 commit 0a19f69

2 files changed

Lines changed: 26 additions & 1 deletion

File tree

eval_protocol/pytest/remote_rollout_processor.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import asyncio
2+
import base64
23
import time
34
from typing import Any, Dict, List, Optional, Callable
45

@@ -50,6 +51,11 @@ class RemoteRolloutProcessor(RolloutProcessor):
5051
By default, fetches traces from the Fireworks tracing proxy using rollout_id tags.
5152
You can provide a custom output_data_loader for different tracing backends.
5253
54+
If a `base_url` is provided in `completion_params` (e.g., "https://dev.api.fireworks.ai/inference/v1"),
55+
it will be base64-encoded and appended to the model_base_url path as `/encoded_base_url/{encoded}`.
56+
This allows routing LLM calls through a metadata gateway that can inject tracing while
57+
forwarding to the actual LLM provider endpoint.
58+
5359
See https://evalprotocol.io/tutorial/remote-rollout-processor for documentation.
5460
"""
5561

@@ -144,6 +150,13 @@ async def _process_row(row: EvaluationRow) -> EvaluationRow:
144150
"Model must be provided in row.input_metadata.completion_params or config.completion_params"
145151
)
146152

153+
# Extract base_url from completion_params if provided
154+
llm_base_url: Optional[str] = None
155+
if row.input_metadata and row.input_metadata.completion_params:
156+
llm_base_url = row.input_metadata.completion_params.get("base_url")
157+
if llm_base_url is None and config.completion_params:
158+
llm_base_url = config.completion_params.get("base_url")
159+
147160
# Strip non-OpenAI fields from messages before sending to remote
148161
allowed_message_fields = {"role", "content", "tool_calls", "tool_call_id", "name"}
149162
clean_messages = []
@@ -180,6 +193,10 @@ async def _process_row(row: EvaluationRow) -> EvaluationRow:
180193
f"/row_id/{meta.row_id}"
181194
)
182195

196+
if llm_base_url:
197+
encoded_base_url = base64.urlsafe_b64encode(llm_base_url.encode()).decode()
198+
final_model_base_url = f"{final_model_base_url}/encoded_base_url/{encoded_base_url}"
199+
183200
init_payload: InitRequest = InitRequest(
184201
model=model,
185202
messages=clean_messages,

tests/remote_server/test_remote_fireworks.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,15 @@ def rows() -> List[EvaluationRow]:
5858

5959

6060
@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Only run this test locally (skipped in CI)")
61-
@pytest.mark.parametrize("completion_params", [{"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b"}])
61+
@pytest.mark.parametrize(
62+
"completion_params",
63+
[
64+
{
65+
"model": "fireworks_ai/accounts/pyroworks-dev/deployedModels/qwen-v2p5-7b-mix0elk1",
66+
"base_url": "https://dev.api.fireworks.ai/inference/v1/chat/completions",
67+
}
68+
],
69+
) # TODO: move to its own separate test
6270
@evaluation_test(
6371
data_loaders=DynamicDataLoader(
6472
generators=[rows],

0 commit comments

Comments
 (0)