From c0cd3eee7696317efe2caa6b830efec4b6f921da Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Tue, 24 Mar 2026 17:15:39 +0000
Subject: [PATCH 1/9] normalize connection string handling, improve windows
handling
---
src/fast_agent/acp/slash/handlers/mcp.py | 68 +-
src/fast_agent/agents/smart_agent.py | 62 +-
src/fast_agent/cli/commands/auth.py | 11 +-
.../commands/handlers/mcp_runtime.py | 170 ++---
.../commands/mcp_command_intents.py | 189 +-----
src/fast_agent/mcp/connect_targets.py | 596 ++++++++++++++----
src/fast_agent/ui/command_payloads.py | 59 +-
.../ui/interactive/command_dispatch.py | 15 +-
.../ui/interactive/mcp_connect_flow.py | 23 +-
src/fast_agent/ui/prompt/command_help.py | 3 +-
src/fast_agent/ui/prompt/input.py | 31 +-
src/fast_agent/ui/prompt/keybindings.py | 7 +-
src/fast_agent/ui/prompt/parser.py | 127 ++--
src/fast_agent/utils/commandline.py | 52 ++
src/fast_agent/utils/slash_commands.py | 14 +
.../fast_agent/acp/test_slash_commands_mcp.py | 30 +
.../fast_agent/agents/test_agent_types.py | 12 +
.../agents/test_smart_agent_command_tool.py | 9 +
.../fast_agent/commands/test_auth_command.py | 34 +
.../commands/test_mcp_runtime_handlers.py | 98 +--
.../fast_agent/mcp/test_connect_targets.py | 173 +++++
.../test_config_mcp_target_shorthand.py | 4 +-
.../ui/test_command_intent_contract.py | 85 ++-
.../fast_agent/ui/test_hash_agent_command.py | 17 +-
.../fast_agent/ui/test_parse_mcp_commands.py | 16 +
.../unit/fast_agent/utils/test_commandline.py | 38 ++
26 files changed, 1238 insertions(+), 705 deletions(-)
create mode 100644 src/fast_agent/utils/commandline.py
create mode 100644 src/fast_agent/utils/slash_commands.py
create mode 100644 tests/unit/fast_agent/mcp/test_connect_targets.py
create mode 100644 tests/unit/fast_agent/utils/test_commandline.py
diff --git a/src/fast_agent/acp/slash/handlers/mcp.py b/src/fast_agent/acp/slash/handlers/mcp.py
index a8507b91d..415d59afb 100644
--- a/src/fast_agent/acp/slash/handlers/mcp.py
+++ b/src/fast_agent/acp/slash/handlers/mcp.py
@@ -16,11 +16,9 @@
)
from fast_agent.commands.handlers import mcp_runtime as mcp_runtime_handlers
-from fast_agent.commands.mcp_command_intents import (
- build_mcp_connect_runtime_target,
- parse_mcp_connect_tokens,
- parse_mcp_session_tokens,
-)
+from fast_agent.commands.mcp_command_intents import parse_mcp_session_tokens
+from fast_agent.mcp.connect_targets import parse_connect_command_text, render_connect_request
+from fast_agent.utils.slash_commands import split_subcommand_and_remainder
if TYPE_CHECKING:
from fast_agent.acp.command_io import ACPCommandIO
@@ -45,6 +43,7 @@ def _mcp_usage_text(heading: str) -> str:
"- /mcp list\n"
"- /mcp connect [--name ] [--auth ] [--timeout ] "
"[--oauth|--no-oauth] [--reconnect|--no-reconnect]\n"
+ " Example: /mcp connect \"C:\\Program Files\\Tool\\tool.exe\" --flag\n"
"- /mcp session [list [server]|jar [server]|new [server] [--title ]|"
"use |clear [server|--all]]\n"
"- /mcp disconnect \n"
@@ -95,12 +94,13 @@ async def _send_connect_tool_update(
return
-def _connect_tool_call_title(parsed_connect) -> str:
+def _connect_tool_call_title(request) -> str:
connect_label = "MCP server"
- if parsed_connect.server_name:
- connect_label = f"MCP server '{parsed_connect.server_name}'"
- elif parsed_connect.target_text:
- first_target_token = parsed_connect.target_text.split()[0]
+ if request.target.server_name:
+ connect_label = f"MCP server '{request.target.server_name}'"
+ else:
+ target_text = render_connect_request(request)
+ first_target_token = target_text.split()[0]
connect_label = f"MCP target '{first_target_token}'"
return f"Connect {connect_label}"
@@ -256,19 +256,24 @@ async def _handle_mcp_connect_command(
ctx,
io: "ACPCommandIO",
manager,
- tokens: list[str],
+ remainder: str,
) -> str:
if handler._attach_mcp_server_callback is None:
return "mcp\n\nRuntime MCP server attachment is not available."
- parsed_connect = parse_mcp_connect_tokens(tokens[1:])
- if parsed_connect.error:
- return f"{heading}\n\n{parsed_connect.error}"
+ if not remainder:
+ return (
+ f"{heading}\n\nUsage: /mcp connect [--name ] [--auth ] "
+ "[--timeout ] [--oauth|--no-oauth] [--reconnect|--no-reconnect]"
+ )
+ try:
+ request = parse_connect_command_text(remainder)
+ except ValueError as exc:
+ return f"{heading}\n\n{exc}"
- runtime_target = build_mcp_connect_runtime_target(parsed_connect)
- display_target = build_mcp_connect_runtime_target(parsed_connect, redact_auth=True)
+ display_target = render_connect_request(request, redact_auth=True)
tool_call_id = handler._build_tool_call_id()
oauth_authorization_url: str | None = None
- tool_call_title = _connect_tool_call_title(parsed_connect)
+ tool_call_title = _connect_tool_call_title(request)
async def _send_connect_progress(message: str) -> None:
nonlocal oauth_authorization_url
@@ -302,7 +307,7 @@ async def _send_connect_progress(message: str) -> None:
ctx,
manager=manager,
agent_name=handler.current_agent_name,
- target_text=runtime_target,
+ request=request,
on_progress=_send_connect_progress,
)
except asyncio.CancelledError:
@@ -432,15 +437,8 @@ async def _handle_mcp_reconnect_command(
async def handle_mcp(handler: "SlashCommandHandler", arguments: str | None = None) -> str:
heading = "mcp"
args = (arguments or "").strip() or "list"
-
- try:
- tokens = shlex.split(args)
- except ValueError as exc:
- return f"{heading}\n\nInvalid arguments: {exc}"
-
- if not tokens:
- tokens = ["list"]
- subcmd = tokens[0].lower()
+ subcmd_text, remainder = split_subcommand_and_remainder(args)
+ subcmd = (subcmd_text or "list").lower()
if subcmd in {"help", "--help", "-h"}:
return _mcp_usage_text(heading)
@@ -451,11 +449,25 @@ async def handle_mcp(handler: "SlashCommandHandler", arguments: str | None = Non
command_handlers = {
"list": _handle_mcp_list_command,
- "connect": _handle_mcp_connect_command,
"session": _handle_mcp_session_command,
"disconnect": _handle_mcp_disconnect_command,
"reconnect": _handle_mcp_reconnect_command,
}
+ if subcmd == "connect":
+ return await _handle_mcp_connect_command(
+ handler,
+ heading=heading,
+ ctx=ctx,
+ io=io,
+ manager=manager,
+ remainder=remainder,
+ )
+
+ try:
+ tokens = shlex.split(args)
+ except ValueError as exc:
+ return f"{heading}\n\nInvalid arguments: {exc}"
+
handler_func = command_handlers.get(subcmd)
if handler_func is None:
return _mcp_usage_text(heading)
diff --git a/src/fast_agent/agents/smart_agent.py b/src/fast_agent/agents/smart_agent.py
index af0b04d2b..8201a5b69 100644
--- a/src/fast_agent/agents/smart_agent.py
+++ b/src/fast_agent/agents/smart_agent.py
@@ -68,11 +68,13 @@
from fast_agent.core.logging.logger import get_logger
from fast_agent.core.prompt_templates import enrich_with_environment_context
from fast_agent.core.validation import validate_provider_keys_post_creation
+from fast_agent.mcp.connect_targets import infer_server_name, parse_connect_command_text
from fast_agent.mcp.helpers.content_helpers import get_text
from fast_agent.mcp.prompts.prompt_load import load_prompt
from fast_agent.mcp.ui_mixin import McpUIMixin
from fast_agent.paths import resolve_environment_paths
from fast_agent.tools.function_tool_loader import build_default_function_tool
+from fast_agent.utils.slash_commands import split_subcommand_and_remainder
if TYPE_CHECKING:
from fast_agent.agents.llm_agent import LlmAgent
@@ -667,6 +669,30 @@ async def _run_mcp_slash_command_call(agent: Any, arguments: str) -> str:
)
args = arguments.strip() or "list"
+ subcmd_text, connect_remainder = split_subcommand_and_remainder(args)
+ subcmd = (subcmd_text or "list").lower()
+
+ if subcmd == "connect":
+ if not connect_remainder:
+ raise AgentConfigError(
+ "Invalid /mcp connect arguments",
+ (
+ "Usage: /mcp connect [--name ] [--auth ] "
+ "[--timeout ] [--oauth|--no-oauth] [--reconnect|--no-reconnect]"
+ ),
+ )
+ try:
+ request = parse_connect_command_text(connect_remainder)
+ except ValueError as exc:
+ raise AgentConfigError("Invalid /mcp connect arguments", str(exc)) from exc
+ outcome = await mcp_runtime_handlers.handle_mcp_connect(
+ context,
+ manager=runtime_manager,
+ agent_name=agent_name,
+ request=request,
+ )
+ return _render_smart_slash_outcome(outcome, heading="mcp", io=io)
+
try:
tokens = shlex.split(args)
except ValueError as exc:
@@ -688,25 +714,6 @@ async def _run_mcp_slash_command_call(agent: Any, arguments: str) -> str:
)
return _render_smart_slash_outcome(outcome, heading="mcp", io=io)
- if subcmd == "connect":
- if len(tokens) < 2:
- raise AgentConfigError(
- "Invalid /mcp connect arguments",
- (
- "Usage: /mcp connect [--name ] [--auth ] "
- "[--timeout ] [--oauth|--no-oauth] [--reconnect|--no-reconnect]"
- ),
- )
-
- target_text = " ".join(tokens[1:])
- outcome = await mcp_runtime_handlers.handle_mcp_connect(
- context,
- manager=runtime_manager,
- agent_name=agent_name,
- target_text=target_text,
- )
- return _render_smart_slash_outcome(outcome, heading="mcp", io=io)
-
if subcmd == "disconnect":
if len(tokens) != 2:
raise AgentConfigError(
@@ -844,12 +851,19 @@ async def _apply_runtime_mcp_connections(
target = raw_target.strip()
if not target:
continue
+ try:
+ request = parse_connect_command_text(target)
+ except ValueError as exc:
+ raise AgentConfigError(
+ "Failed to connect MCP server for smart tool call",
+ str(exc),
+ ) from exc
outcome = await mcp_runtime_handlers.handle_mcp_connect(
None,
manager=manager,
agent_name=target_agent_name,
- target_text=target,
+ request=request,
)
errors, target_warnings = _collect_outcome_messages(outcome)
warnings.extend(target_warnings)
@@ -859,13 +873,7 @@ async def _apply_runtime_mcp_connections(
"\n".join(errors),
)
- parsed = mcp_runtime_handlers.parse_connect_input(target)
- mode = mcp_runtime_handlers.infer_connect_mode(parsed.target_text)
- resolved_name = parsed.server_name or mcp_runtime_handlers.infer_server_name(
- parsed.target_text,
- mode,
- )
- connected_names.append(resolved_name)
+ connected_names.append(infer_server_name(request.target))
return _SmartConnectSummary(connected=connected_names, warnings=warnings)
diff --git a/src/fast_agent/cli/commands/auth.py b/src/fast_agent/cli/commands/auth.py
index 47531e007..3d7d662a5 100644
--- a/src/fast_agent/cli/commands/auth.py
+++ b/src/fast_agent/cli/commands/auth.py
@@ -193,13 +193,22 @@ def status(
codex_table = Table(show_header=True, box=None)
codex_table.add_column("Codex OAuth", style="white", header_style="bold")
codex_table.add_column("Token", header_style="bold")
+ codex_table.add_column("Source", header_style="bold")
codex_table.add_column("Expires", header_style="bold")
if not codex_status.get("present"):
token_display = "[dim]Not configured[/dim]"
+ source_display = "[dim]-[/dim]"
expires_display = "[dim]-[/dim]"
else:
token_display = "[bold green]Present[/bold green]"
+ source = codex_status.get("source")
+ if source == "keyring":
+ source_display = "[green]Keyring OAuth[/green]"
+ elif source == "auth.json":
+ source_display = "[green]Codex auth.json[/green]"
+ else:
+ source_display = "[green]OAuth token[/green]"
expires_at = codex_status.get("expires_at")
if expires_at:
expires_display = datetime.fromtimestamp(expires_at).strftime("%Y-%m-%d %H:%M")
@@ -210,7 +219,7 @@ def status(
else:
expires_display = "[green]unknown[/green]"
- codex_table.add_row("Token", token_display, expires_display)
+ codex_table.add_row("Token", token_display, source_display, expires_display)
print_section_header(console, "Codex OAuth", color="blue")
console.print(codex_table)
except Exception:
diff --git a/src/fast_agent/commands/handlers/mcp_runtime.py b/src/fast_agent/commands/handlers/mcp_runtime.py
index 8e50950b4..a15f0e356 100644
--- a/src/fast_agent/commands/handlers/mcp_runtime.py
+++ b/src/fast_agent/commands/handlers/mcp_runtime.py
@@ -3,11 +3,10 @@
from __future__ import annotations
import json
-import math
import os
import re
import shlex
-from dataclasses import dataclass
+from dataclasses import replace
from datetime import datetime
from pathlib import Path
from shutil import get_terminal_size
@@ -17,11 +16,10 @@
from fast_agent.commands.results import CommandOutcome
from fast_agent.mcp.connect_targets import (
+ ParsedMcpConnectRequest,
build_server_config_from_target,
infer_server_name,
-)
-from fast_agent.mcp.connect_targets import (
- infer_connect_mode as infer_connect_mode_shared,
+ render_normalized_target,
)
from fast_agent.mcp.experimental_session_client import ExperimentalSessionClient, SessionJarEntry
from fast_agent.mcp.mcp_aggregator import MCPAttachOptions
@@ -75,17 +73,6 @@ async def clear_cookie(self, server_identifier: str | None) -> str: ...
async def clear_all_cookies(self) -> list[str]: ...
-@dataclass(frozen=True, slots=True)
-class ParsedMcpConnectInput:
- target_text: str
- server_name: str | None
- timeout_seconds: float | None
- trigger_oauth: bool | None
- reconnect_on_disconnect: bool | None
- force_reconnect: bool
- auth_token: str | None
-
-
_AUTH_ENV_BRACED_RE = re.compile(r"^\$\{(?P[A-Za-z_][A-Za-z0-9_]*)(?::(?P.*))?\}$")
_AUTH_ENV_SIMPLE_RE = re.compile(r"^\$(?P[A-Za-z_][A-Za-z0-9_]*)$")
@@ -139,102 +126,26 @@ def _resolve_auth_token_value(raw_value: str) -> str:
return normalized_value
-def infer_connect_mode(target_text: str) -> str:
- return infer_connect_mode_shared(target_text)
-
-
-def _infer_server_name(target_text: str, mode: str) -> str:
- """Backward-compatible private wrapper used by interactive UI code."""
- return infer_server_name(target_text, mode)
-
-
-def _rebuild_target_text(tokens: list[str]) -> str:
- """Rebuild target text while preserving whitespace grouping for later shlex parsing."""
- if not tokens:
- return ""
-
- rebuilt_parts: list[str] = []
- for token in tokens:
- if token == "" or any(char.isspace() for char in token):
- rebuilt_parts.append(shlex.quote(token))
- else:
- rebuilt_parts.append(token)
- return " ".join(rebuilt_parts)
-
-
-def parse_connect_input(target_text: str) -> ParsedMcpConnectInput:
- tokens = shlex.split(target_text)
- target_tokens: list[str] = []
- server_name: str | None = None
- timeout_seconds: float | None = None
- trigger_oauth: bool | None = None
- reconnect_on_disconnect: bool | None = None
- force_reconnect = False
- auth_token: str | None = None
-
- idx = 0
- while idx < len(tokens):
- token = tokens[idx]
- if token in {"--name", "-n"}:
- idx += 1
- if idx >= len(tokens):
- raise ValueError("Missing value for --name")
- server_name = tokens[idx]
- elif token == "--timeout":
- idx += 1
- if idx >= len(tokens):
- raise ValueError("Missing value for --timeout")
- timeout_seconds = float(tokens[idx])
- if not math.isfinite(timeout_seconds) or timeout_seconds <= 0:
- raise ValueError(
- "Invalid value for --timeout: expected a finite number greater than 0"
- )
- elif token == "--oauth":
- trigger_oauth = True
- elif token == "--no-oauth":
- trigger_oauth = False
- elif token == "--reconnect":
- force_reconnect = True
- elif token == "--no-reconnect":
- reconnect_on_disconnect = False
- elif token == "--auth":
- idx += 1
- if idx >= len(tokens):
- raise ValueError("Missing value for --auth")
- auth_token = _resolve_auth_token_value(tokens[idx])
- elif token.startswith("--auth="):
- auth_token = token.split("=", 1)[1]
- if not auth_token:
- raise ValueError("Missing value for --auth")
- auth_token = _resolve_auth_token_value(auth_token)
- else:
- target_tokens.append(token)
- idx += 1
-
- normalized_target = _rebuild_target_text(target_tokens).strip()
- if not normalized_target:
- raise ValueError("Connection target is required")
-
- return ParsedMcpConnectInput(
- target_text=normalized_target,
- server_name=server_name,
- timeout_seconds=timeout_seconds,
- trigger_oauth=trigger_oauth,
- reconnect_on_disconnect=reconnect_on_disconnect,
- force_reconnect=force_reconnect,
- auth_token=auth_token,
+def _resolve_request_auth(request: ParsedMcpConnectRequest) -> ParsedMcpConnectRequest:
+ auth_token = request.options.auth_token
+ if auth_token is None:
+ return request
+ return replace(
+ request,
+ options=replace(
+ request.options,
+ auth_token=_resolve_auth_token_value(auth_token),
+ ),
)
def _build_server_config(
- target_text: str,
- server_name: str,
+ request: ParsedMcpConnectRequest,
*,
auth_token: str | None = None,
) -> tuple[str, MCPServerSettings]:
return build_server_config_from_target(
- target_text,
- server_name=server_name,
+ request.target,
auth_token=auth_token,
)
@@ -290,9 +201,7 @@ async def _resolve_configured_server_alias(
*,
manager: McpRuntimeManager,
agent_name: str,
- target_text: str,
- explicit_server_name: str | None,
- auth_token: str | None,
+ request: ParsedMcpConnectRequest,
) -> str | None:
"""Return configured server name when target text is an alias.
@@ -300,17 +209,16 @@ async def _resolve_configured_server_alias(
--name override or URL auth token is provided.
"""
- if explicit_server_name is not None or auth_token is not None:
+ if request.target.server_name is not None or request.options.auth_token is not None:
return None
- if infer_connect_mode(target_text) != "stdio":
+ if request.target.mode != "stdio":
return None
- tokens = shlex.split(target_text)
- if len(tokens) != 1:
+ if not request.target.command or request.target.args:
return None
- candidate = tokens[0]
+ candidate = request.target.command
if not candidate or candidate.startswith("-"):
return None
@@ -1252,7 +1160,7 @@ async def handle_mcp_connect(
*,
manager: McpRuntimeManager,
agent_name: str,
- target_text: str,
+ request: ParsedMcpConnectRequest,
on_progress: Callable[[str], Awaitable[None]] | None = None,
on_oauth_event: Callable[[OAuthEvent], Awaitable[None]] | None = None,
) -> CommandOutcome:
@@ -1306,7 +1214,7 @@ async def emit_oauth_event(event: OAuthEvent) -> None:
await emit_progress(f"OAuth status: {event.message}")
try:
- parsed = parse_connect_input(target_text)
+ parsed = _resolve_request_auth(request)
except ValueError as exc:
outcome.add_message(f"Invalid MCP connect arguments: {exc}", channel="error")
return outcome
@@ -1314,22 +1222,21 @@ async def emit_oauth_event(event: OAuthEvent) -> None:
configured_alias = await _resolve_configured_server_alias(
manager=manager,
agent_name=agent_name,
- target_text=parsed.target_text,
- explicit_server_name=parsed.server_name,
- auth_token=parsed.auth_token,
+ request=parsed,
)
- mode = "configured" if configured_alias is not None else infer_connect_mode(parsed.target_text)
- server_name = (
- configured_alias or parsed.server_name or infer_server_name(parsed.target_text, mode)
- )
+ target_mode = parsed.target.mode
+ mode = "configured" if configured_alias is not None else target_mode
+ server_name = configured_alias or infer_server_name(parsed.target)
if mode == "configured":
await emit_progress(f"Connecting MCP server '{server_name}' from config file…")
else:
await emit_progress(f"Connecting MCP server '{server_name}' via {mode}…")
- trigger_oauth = True if parsed.trigger_oauth is None else parsed.trigger_oauth
- startup_timeout_seconds = parsed.timeout_seconds
+ trigger_oauth = (
+ True if parsed.options.trigger_oauth is None else parsed.options.trigger_oauth
+ )
+ startup_timeout_seconds = parsed.options.timeout_seconds
if startup_timeout_seconds is None:
# OAuth-backed URL servers often need additional non-callback time for
# metadata discovery and token exchange after the browser callback.
@@ -1341,15 +1248,14 @@ async def emit_oauth_event(event: OAuthEvent) -> None:
config = None
else:
server_name, config = _build_server_config(
- parsed.target_text,
- server_name,
- auth_token=parsed.auth_token,
+ parsed,
+ auth_token=parsed.options.auth_token,
)
attach_options = MCPAttachOptions(
startup_timeout_seconds=startup_timeout_seconds,
trigger_oauth=trigger_oauth,
- force_reconnect=parsed.force_reconnect,
- reconnect_on_disconnect=parsed.reconnect_on_disconnect,
+ force_reconnect=parsed.options.force_reconnect,
+ reconnect_on_disconnect=parsed.options.reconnect_on_disconnect,
oauth_event_handler=emit_oauth_event
if (on_progress is not None or on_oauth_event is not None)
else None,
@@ -1450,7 +1356,7 @@ async def emit_oauth_event(event: OAuthEvent) -> None:
else prompts_added_count
)
- if already_attached and not parsed.force_reconnect:
+ if already_attached and not parsed.options.force_reconnect:
outcome.add_message(
(
f"MCP server '{server_name}' is already attached. "
@@ -1462,10 +1368,12 @@ async def emit_oauth_event(event: OAuthEvent) -> None:
)
await emit_progress(f"MCP server '{server_name}' is already connected.")
else:
- action = "Reconnected" if already_attached and parsed.force_reconnect else "Connected"
+ action = (
+ "Reconnected" if already_attached and parsed.options.force_reconnect else "Connected"
+ )
if mode == "configured":
configured_source = _resolve_configured_source_from_context(ctx, server_name)
- source_text = configured_source or parsed.target_text
+ source_text = configured_source or render_normalized_target(parsed.target)
message_text = f"{action} MCP server '{server_name}' from configuration: {source_text}."
else:
message_text = f"{action} MCP server '{server_name}' ({mode})."
diff --git a/src/fast_agent/commands/mcp_command_intents.py b/src/fast_agent/commands/mcp_command_intents.py
index 07b7f823b..16bb071e3 100644
--- a/src/fast_agent/commands/mcp_command_intents.py
+++ b/src/fast_agent/commands/mcp_command_intents.py
@@ -2,185 +2,9 @@
from __future__ import annotations
-import math
-import shlex
from typing import Literal
-from fast_agent.ui.command_payloads import McpConnectCommand, McpConnectMode, McpSessionCommand
-
-
-def _rebuild_target_text(tokens: list[str]) -> str:
- if not tokens:
- return ""
-
- rebuilt_parts: list[str] = []
- for token in tokens:
- if token == "" or any(char.isspace() for char in token):
- rebuilt_parts.append(shlex.quote(token))
- else:
- rebuilt_parts.append(token)
- return " ".join(rebuilt_parts)
-
-
-def parse_mcp_connect_tokens(connect_tokens: list[str]) -> McpConnectCommand:
- if not connect_tokens:
- return McpConnectCommand(
- target_text="",
- parsed_mode="stdio",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=(
- "Usage: /mcp connect [--name ] [--auth ] "
- "[--timeout ] [--oauth|--no-oauth] [--reconnect|--no-reconnect]"
- ),
- )
-
- try:
- (
- target_text,
- server_name,
- auth_token,
- timeout_seconds,
- trigger_oauth,
- reconnect_on_disconnect,
- force_reconnect,
- ) = _parse_connect_components(connect_tokens)
- except ValueError as exc:
- return McpConnectCommand(
- target_text="",
- parsed_mode="stdio",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=str(exc),
- )
-
- return McpConnectCommand(
- target_text=target_text,
- parsed_mode=_infer_connect_mode(target_text),
- server_name=server_name,
- auth_token=auth_token,
- timeout_seconds=timeout_seconds,
- trigger_oauth=trigger_oauth,
- reconnect_on_disconnect=reconnect_on_disconnect,
- force_reconnect=force_reconnect,
- error=None,
- )
-
-
-def _parse_connect_components(
- connect_tokens: list[str],
-) -> tuple[
- str,
- str | None,
- str | None,
- float | None,
- bool | None,
- bool | None,
- bool,
-]:
- target_tokens: list[str] = []
- server_name: str | None = None
- auth_token: str | None = None
- timeout_seconds: float | None = None
- trigger_oauth: bool | None = None
- reconnect_on_disconnect: bool | None = None
- force_reconnect = False
- idx = 0
- while idx < len(connect_tokens):
- token = connect_tokens[idx]
- if token in {"--name", "-n"}:
- idx += 1
- if idx >= len(connect_tokens):
- raise ValueError("Missing value for --name")
- server_name = connect_tokens[idx]
- elif token == "--timeout":
- idx += 1
- if idx >= len(connect_tokens):
- raise ValueError("Missing value for --timeout")
- timeout_seconds = float(connect_tokens[idx])
- if not math.isfinite(timeout_seconds) or timeout_seconds <= 0:
- raise ValueError(
- "Invalid value for --timeout: expected a finite number greater than 0"
- )
- elif token == "--auth":
- idx += 1
- if idx >= len(connect_tokens):
- raise ValueError("Missing value for --auth")
- auth_token = connect_tokens[idx]
- elif token.startswith("--auth="):
- auth_token = token.split("=", 1)[1]
- if not auth_token:
- raise ValueError("Missing value for --auth")
- elif token == "--oauth":
- trigger_oauth = True
- elif token == "--no-oauth":
- trigger_oauth = False
- elif token == "--reconnect":
- force_reconnect = True
- elif token == "--no-reconnect":
- reconnect_on_disconnect = False
- else:
- target_tokens.append(token)
- idx += 1
-
- target_text = _rebuild_target_text(target_tokens).strip()
- if not target_text:
- raise ValueError("Connection target is required")
-
- return (
- target_text,
- server_name,
- auth_token,
- timeout_seconds,
- trigger_oauth,
- reconnect_on_disconnect,
- force_reconnect,
- )
-
-
-def _infer_connect_mode(target_text: str) -> McpConnectMode:
- stripped = target_text.strip().lower()
- if stripped.startswith(("http://", "https://")):
- return "url"
- if stripped.startswith("@"):
- return "npx"
- if stripped.startswith("npx "):
- return "npx"
- if stripped.startswith("uvx "):
- return "uvx"
- return "stdio"
-
-
-def build_mcp_connect_runtime_target(
- command: McpConnectCommand,
- *,
- redact_auth: bool = False,
-) -> str:
- runtime_target = command.target_text
- if command.server_name:
- runtime_target += f" --name {command.server_name}"
- if command.auth_token:
- auth_token = "[REDACTED]" if redact_auth else command.auth_token
- runtime_target += f" --auth {shlex.quote(auth_token)}"
- if command.timeout_seconds is not None:
- runtime_target += f" --timeout {command.timeout_seconds}"
- if command.trigger_oauth is True:
- runtime_target += " --oauth"
- elif command.trigger_oauth is False:
- runtime_target += " --no-oauth"
- if command.reconnect_on_disconnect is False:
- runtime_target += " --no-reconnect"
- if command.force_reconnect:
- runtime_target += " --reconnect"
- return runtime_target
+from fast_agent.ui.command_payloads import McpSessionCommand
def parse_mcp_session_tokens(session_tokens: list[str]) -> McpSessionCommand:
@@ -324,16 +148,15 @@ def _parse_clear_session(args: list[str]) -> McpSessionCommand:
if token.startswith("--"):
parse_error = f"Unknown flag: {token}"
break
- if server_identity is None:
- server_identity = token
- else:
+ if server_identity is not None:
parse_error = f"Unexpected argument: {token}"
break
+ server_identity = token
- if parse_error is None and clear_all and server_identity is not None:
- parse_error = "Use either --all or a specific server, not both"
+ if clear_all and server_identity is not None:
+ parse_error = "Use either a server name or --all"
- if parse_error is None and not clear_all and server_identity is None:
+ if not clear_all and server_identity is None:
clear_all = True
return McpSessionCommand(
diff --git a/src/fast_agent/mcp/connect_targets.py b/src/fast_agent/mcp/connect_targets.py
index 5050f5ca3..3e216e8ca 100644
--- a/src/fast_agent/mcp/connect_targets.py
+++ b/src/fast_agent/mcp/connect_targets.py
@@ -1,17 +1,28 @@
-"""Shared MCP runtime connect target parsing and resolution helpers."""
+"""Canonical MCP connect target parsing and normalization."""
from __future__ import annotations
+import math
import re
-import shlex
-from typing import TYPE_CHECKING, Any, Mapping
-from urllib.parse import urlparse
-
-from fast_agent.cli.commands.url_parser import parse_server_urls
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Literal, Mapping, Sequence, cast
+
+import mslex
+
+from fast_agent.cli.commands.url_parser import generate_server_name as generate_url_server_name
+from fast_agent.cli.commands.url_parser import parse_server_url, parse_server_urls
+from fast_agent.utils.commandline import (
+ CommandLineSyntax,
+ join_commandline,
+ resolve_commandline_syntax,
+ split_commandline,
+)
if TYPE_CHECKING:
from fast_agent.config import MCPServerSettings
+McpConnectMode = Literal["url", "stdio", "npx", "uvx"]
+McpTransport = Literal["http", "sse", "stdio"]
_FAST_AGENT_CONNECT_FLAG_NAMES: tuple[str, ...] = (
"--auth",
@@ -24,31 +35,76 @@
"--no-reconnect",
)
+_WHOLE_SINGLE_QUOTED_ARG_PATTERN = re.compile(r"(^|\s)'([^']+)'(?=\s|$)")
-def _find_embedded_fast_agent_flag_for_url_target(target_text: str) -> str | None:
- """Return the first fast-agent CLI flag embedded in a URL target string."""
- if infer_connect_mode(target_text) != "url":
- return None
- try:
- tokens = shlex.split(target_text)
- except ValueError:
- # Preserve existing error behavior for invalid shell quoting.
- return None
+def _rewrite_shell_single_quotes_for_windows(text: str) -> str:
+ return _WHOLE_SINGLE_QUOTED_ARG_PATTERN.sub(
+ lambda match: f"{match.group(1)}{mslex.quote(match.group(2))}",
+ text,
+ )
- if len(tokens) <= 1:
- return None
- for token in tokens[1:]:
- if token in _FAST_AGENT_CONNECT_FLAG_NAMES:
- return token
- if token.startswith("--auth="):
- return "--auth"
- if token.startswith("--timeout="):
- return "--timeout"
- if token.startswith("--name="):
- return "--name"
+def _split_connect_command_text(
+ text: str,
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> list[str]:
+ if syntax == "auto" and _WHOLE_SINGLE_QUOTED_ARG_PATTERN.search(text):
+ # Preserve shell-style single-quoted arguments on Windows without
+ # misparsing apostrophes inside ordinary path/token text.
+ if resolve_commandline_syntax(syntax) == "windows":
+ return split_commandline(
+ _rewrite_shell_single_quotes_for_windows(text),
+ syntax="windows",
+ )
+ return split_commandline(text, syntax="posix")
+ return split_commandline(text, syntax=syntax)
+
+
+@dataclass(frozen=True, slots=True)
+class NormalizedMcpTarget:
+ mode: McpConnectMode
+ transport: McpTransport | None
+ url: str | None
+ command: str | None
+ args: tuple[str, ...]
+ server_name: str | None
+
+
+@dataclass(frozen=True, slots=True)
+class McpConnectOptions:
+ auth_token: str | None
+ timeout_seconds: float | None
+ trigger_oauth: bool | None
+ reconnect_on_disconnect: bool | None
+ force_reconnect: bool
+
+
+@dataclass(frozen=True, slots=True)
+class ParsedMcpConnectRequest:
+ target: NormalizedMcpTarget
+ options: McpConnectOptions
+
+
+def _slugify_server_name(value: str) -> str:
+ normalized = re.sub(r"[^a-zA-Z0-9_-]+", "-", value).strip("-_").lower()
+ return normalized or "mcp-server"
+
+
+def _basenameish(value: str) -> str:
+ return re.split(r"[/\\]", value.strip())[-1]
+
+def _flag_name(token: str) -> str | None:
+ if token in _FAST_AGENT_CONNECT_FLAG_NAMES:
+ return token
+ if token.startswith("--auth="):
+ return "--auth"
+ if token.startswith("--timeout="):
+ return "--timeout"
+ if token.startswith("--name="):
+ return "--name"
return None
@@ -64,103 +120,395 @@ def _build_url_target_flag_error(*, source_path: str, flag: str) -> str:
)
-def infer_connect_mode(target_text: str) -> str:
- """Infer runtime connect mode from a target string."""
- stripped = target_text.strip()
- if stripped.startswith(("http://", "https://")):
+def _validate_timeout(value: str) -> float:
+ timeout_seconds = float(value)
+ if not math.isfinite(timeout_seconds) or timeout_seconds <= 0:
+ raise ValueError(
+ "Invalid value for --timeout: expected a finite number greater than 0"
+ )
+ return timeout_seconds
+
+
+def infer_connect_mode_from_tokens(tokens: Sequence[str]) -> McpConnectMode:
+ if not tokens:
+ raise ValueError("Connection target is required")
+
+ first = tokens[0].strip()
+ lowered = first.lower()
+ if lowered.startswith(("http://", "https://")):
return "url"
- if stripped.startswith("@"):
+ if first.startswith("@"):
return "npx"
- if stripped.startswith("npx "):
+ if lowered == "npx":
return "npx"
- if stripped.startswith("uvx "):
+ if lowered == "uvx":
return "uvx"
return "stdio"
-def _slugify_server_name(value: str) -> str:
- normalized = re.sub(r"[^a-zA-Z0-9_-]+", "-", value).strip("-").lower()
- return normalized or "mcp-server"
+def infer_connect_mode_from_text(
+ text: str,
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> McpConnectMode:
+ return infer_connect_mode_from_tokens(split_commandline(text, syntax=syntax))
+
+
+def infer_connect_mode(target_text: str) -> McpConnectMode:
+ return infer_connect_mode_from_text(target_text)
-def infer_server_name(target_text: str, mode: str | None = None) -> str:
- """Infer an MCP server name from runtime connect target text."""
- resolved_mode = mode or infer_connect_mode(target_text)
- tokens = shlex.split(target_text)
- if resolved_mode == "url":
- parsed = urlparse(target_text)
- if parsed.hostname:
- return _slugify_server_name(parsed.hostname)
+def infer_transport(target: NormalizedMcpTarget) -> McpTransport | None:
+ if target.transport is not None:
+ return target.transport
+ if target.mode == "url":
+ return None
+ return "stdio"
+
- if resolved_mode in {"npx", "uvx"} and tokens:
+def _normalize_target_tokens(
+ tokens: Sequence[str],
+ *,
+ server_name: str | None = None,
+) -> NormalizedMcpTarget:
+ if not tokens:
+ raise ValueError("Connection target is required")
+
+ mode = infer_connect_mode_from_tokens(tokens)
+ resolved_server_name = server_name.strip() if isinstance(server_name, str) and server_name.strip() else None
+
+ if mode == "url":
+ if len(tokens) != 1:
+ raise ValueError("URL connect targets do not accept extra arguments")
+ if len(parse_server_urls(tokens[0])) != 1:
+ raise ValueError("Singular MCP connect targets do not support multiple URLs")
+ _generated_name, transport, parsed_url = parse_server_url(tokens[0])
+ return NormalizedMcpTarget(
+ mode="url",
+ transport=transport,
+ url=parsed_url,
+ command=None,
+ args=(),
+ server_name=resolved_server_name,
+ )
+
+ if mode == "npx":
if tokens[0].startswith("@"):
- package = tokens[0]
- elif len(tokens) >= 2:
- package = tokens[1]
+ command = "npx"
+ args = tuple(tokens)
else:
- package = tokens[0]
+ command = "npx"
+ args = tuple(tokens[1:])
+ if not args:
+ raise ValueError("Connection target is required")
+ return NormalizedMcpTarget(
+ mode="npx",
+ transport="stdio",
+ url=None,
+ command=command,
+ args=args,
+ server_name=resolved_server_name,
+ )
- if package.startswith("@"):
- package = package.rsplit("@", 1)[0] if package.count("@") > 1 else package
- else:
+ if mode == "uvx":
+ args = tuple(tokens[1:])
+ if not args:
+ raise ValueError("Connection target is required")
+ return NormalizedMcpTarget(
+ mode="uvx",
+ transport="stdio",
+ url=None,
+ command="uvx",
+ args=args,
+ server_name=resolved_server_name,
+ )
+
+ return NormalizedMcpTarget(
+ mode="stdio",
+ transport="stdio",
+ url=None,
+ command=tokens[0],
+ args=tuple(tokens[1:]),
+ server_name=resolved_server_name,
+ )
+
+
+def normalize_connect_target_text(
+ text: str,
+ *,
+ syntax: CommandLineSyntax = "auto",
+ server_name: str | None = None,
+) -> NormalizedMcpTarget:
+ normalized_text = text.strip()
+ if not normalized_text:
+ raise ValueError("Connection target is required")
+ return _normalize_target_tokens(
+ _split_connect_command_text(normalized_text, syntax=syntax),
+ server_name=server_name,
+ )
+
+
+def infer_server_name(target: str | NormalizedMcpTarget) -> str:
+ normalized_target = (
+ normalize_connect_target_text(target) if isinstance(target, str) else target
+ )
+ if normalized_target.server_name:
+ return normalized_target.server_name
+
+ if normalized_target.mode == "url":
+ url = normalized_target.url
+ if not url:
+ return "mcp-server"
+ return generate_url_server_name(url)
+
+ if normalized_target.mode in {"npx", "uvx"}:
+ package = normalized_target.args[0] if normalized_target.args else normalized_target.command or ""
+ if package.startswith("@") and package.count("@") > 1:
+ package = package.rsplit("@", 1)[0]
+ elif not package.startswith("@"):
package = package.split("@", 1)[0]
- package = package.rsplit("/", 1)[-1]
- return _slugify_server_name(package)
+ return _slugify_server_name(package.rsplit("/", 1)[-1])
- if tokens:
- return _slugify_server_name(tokens[0].rsplit("/", 1)[-1])
+ command = normalized_target.command or ""
+ if command:
+ return _slugify_server_name(_basenameish(command))
return "mcp-server"
+def parse_connect_command_tokens(tokens: Sequence[str]) -> ParsedMcpConnectRequest:
+ if not tokens:
+ raise ValueError("Connection target is required")
+
+ target_tokens: list[str] = []
+ server_name: str | None = None
+ auth_token: str | None = None
+ timeout_seconds: float | None = None
+ trigger_oauth: bool | None = None
+ reconnect_on_disconnect: bool | None = None
+ force_reconnect = False
+
+ idx = 0
+ token_list = list(tokens)
+ while idx < len(token_list):
+ token = token_list[idx]
+ if token in {"--name", "-n"}:
+ idx += 1
+ if idx >= len(token_list):
+ raise ValueError("Missing value for --name")
+ server_name = token_list[idx]
+ elif token.startswith("--name="):
+ server_name = token.split("=", 1)[1]
+ if not server_name:
+ raise ValueError("Missing value for --name")
+ elif token == "--auth":
+ idx += 1
+ if idx >= len(token_list):
+ raise ValueError("Missing value for --auth")
+ auth_token = token_list[idx]
+ elif token.startswith("--auth="):
+ auth_token = token.split("=", 1)[1]
+ if not auth_token:
+ raise ValueError("Missing value for --auth")
+ elif token == "--timeout":
+ idx += 1
+ if idx >= len(token_list):
+ raise ValueError("Missing value for --timeout")
+ timeout_seconds = _validate_timeout(token_list[idx])
+ elif token.startswith("--timeout="):
+ timeout_seconds = _validate_timeout(token.split("=", 1)[1])
+ elif token == "--oauth":
+ trigger_oauth = True
+ elif token == "--no-oauth":
+ trigger_oauth = False
+ elif token == "--reconnect":
+ force_reconnect = True
+ elif token == "--no-reconnect":
+ reconnect_on_disconnect = False
+ else:
+ target_tokens.append(token)
+ idx += 1
+
+ return ParsedMcpConnectRequest(
+ target=_normalize_target_tokens(target_tokens, server_name=server_name),
+ options=McpConnectOptions(
+ auth_token=auth_token,
+ timeout_seconds=timeout_seconds,
+ trigger_oauth=trigger_oauth,
+ reconnect_on_disconnect=reconnect_on_disconnect,
+ force_reconnect=force_reconnect,
+ ),
+ )
+
+
+def parse_connect_command_text(
+ text: str,
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> ParsedMcpConnectRequest:
+ return parse_connect_command_tokens(_split_connect_command_text(text, syntax=syntax))
+
+
+def render_normalized_target(
+ target: NormalizedMcpTarget,
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> str:
+ return join_commandline(_render_target_argv(target), syntax=syntax)
+
+
+def _render_target_argv(target: NormalizedMcpTarget) -> list[str]:
+ if target.mode == "url":
+ return [target.url] if target.url else []
+
+ if target.mode == "npx" and target.command == "npx" and target.args:
+ if target.args[0].startswith("@"):
+ return list(target.args)
+
+ argv: list[str] = []
+ if target.command:
+ argv.append(target.command)
+ argv.extend(target.args)
+ return argv
+
+
+def render_connect_request(
+ request: ParsedMcpConnectRequest,
+ *,
+ redact_auth: bool = False,
+ syntax: CommandLineSyntax = "auto",
+) -> str:
+ argv = _render_target_argv(request.target)
+ if request.target.server_name:
+ argv.extend(["--name", request.target.server_name])
+ if request.options.auth_token:
+ argv.extend(["--auth", "[REDACTED]" if redact_auth else request.options.auth_token])
+ if request.options.timeout_seconds is not None:
+ argv.extend(["--timeout", str(request.options.timeout_seconds)])
+ if request.options.trigger_oauth is True:
+ argv.append("--oauth")
+ elif request.options.trigger_oauth is False:
+ argv.append("--no-oauth")
+ if request.options.reconnect_on_disconnect is False:
+ argv.append("--no-reconnect")
+ if request.options.force_reconnect:
+ argv.append("--reconnect")
+ return join_commandline(argv, syntax=syntax)
+
+
+def normalize_connect_config_target(
+ *,
+ target: str | None = None,
+ transport: str | None = None,
+ url: str | None = None,
+ command: str | None = None,
+ args: Sequence[str] | None = None,
+ server_name: str | None = None,
+ headers: Mapping[str, str] | None = None,
+ auth: Mapping[str, Any] | None = None,
+ reconnect_on_disconnect: bool | None = None,
+ source_path: str = "target",
+) -> tuple[NormalizedMcpTarget, dict[str, Any]]:
+ overrides: dict[str, Any] = {}
+ if transport is not None:
+ overrides["transport"] = transport
+ if url is not None:
+ overrides["url"] = url
+ if command is not None:
+ overrides["command"] = command
+ if args is not None:
+ overrides["args"] = list(args)
+ if headers is not None:
+ overrides["headers"] = dict(headers)
+ if auth is not None:
+ overrides["auth"] = dict(auth)
+ if reconnect_on_disconnect is not None:
+ overrides["reconnect_on_disconnect"] = reconnect_on_disconnect
+
+ if target is not None:
+ normalized_target_text = target.strip()
+ if not normalized_target_text:
+ raise ValueError(f"`{source_path}` must be a non-empty string")
+ tokens = _split_connect_command_text(normalized_target_text)
+ if infer_connect_mode_from_tokens(tokens) == "url":
+ for token in tokens:
+ flag = _flag_name(token)
+ if flag is not None:
+ raise ValueError(_build_url_target_flag_error(source_path=source_path, flag=flag))
+ return _normalize_target_tokens(tokens, server_name=server_name), overrides
+
+ if url is not None:
+ return _normalize_target_tokens([url], server_name=server_name), overrides
+
+ if command is not None:
+ command_tokens = [command, *(list(args) if args else [])]
+ return _normalize_target_tokens(command_tokens, server_name=server_name), overrides
+
+ raise ValueError(f"`{source_path}` must be a non-empty string")
+
+
def build_server_config_from_target(
- target_text: str,
+ target: str | NormalizedMcpTarget,
*,
server_name: str | None = None,
auth_token: str | None = None,
+ overrides: Mapping[str, Any] | None = None,
) -> tuple[str, MCPServerSettings]:
- """Build a runtime MCP server configuration from a connect target string."""
from fast_agent.config import MCPServerSettings
- normalized_target = target_text.strip()
- if not normalized_target:
- raise ValueError("Connection target is required")
+ normalized_target = (
+ normalize_connect_target_text(target, server_name=server_name)
+ if isinstance(target, str)
+ else target
+ )
+ effective_target = (
+ normalized_target
+ if server_name is None or normalized_target.server_name == server_name
+ else NormalizedMcpTarget(
+ mode=normalized_target.mode,
+ transport=normalized_target.transport,
+ url=normalized_target.url,
+ command=normalized_target.command,
+ args=normalized_target.args,
+ server_name=server_name,
+ )
+ )
- mode = infer_connect_mode(normalized_target)
- resolved_name = (server_name or infer_server_name(normalized_target, mode)).strip()
- if not resolved_name:
- raise ValueError("Server name could not be resolved from connection target")
+ resolved_name = infer_server_name(effective_target)
+ payload: dict[str, Any] = {"name": resolved_name}
- if mode == "url":
- parsed_urls = parse_server_urls(normalized_target, auth_token=auth_token)
- if not parsed_urls:
+ if effective_target.mode == "url":
+ url_value = effective_target.url
+ if not url_value:
raise ValueError("Connection target is required")
- _parsed_name, transport, parsed_url, headers = parsed_urls[0]
- return resolved_name, MCPServerSettings(
- name=resolved_name,
- transport=transport,
- url=parsed_url,
- headers=headers,
+ parsed_urls = parse_server_urls(url_value, auth_token=auth_token)
+ if len(parsed_urls) != 1:
+ raise ValueError("Singular MCP connect targets do not support multiple URLs")
+ _generated_name, transport, parsed_url, headers = parsed_urls[0]
+ payload.update(
+ {
+ "transport": transport,
+ "url": parsed_url,
+ "headers": headers,
+ }
)
-
- tokens = shlex.split(normalized_target)
- if not tokens:
- raise ValueError("Connection target is required")
-
- if mode == "npx" and tokens[0].startswith("@"):
- return resolved_name, MCPServerSettings(
- name=resolved_name,
- transport="stdio",
- command="npx",
- args=tokens,
+ else:
+ if not effective_target.command:
+ raise ValueError("Connection target is required")
+ payload.update(
+ {
+ "transport": "stdio",
+ "command": effective_target.command,
+ "args": list(effective_target.args),
+ }
)
- return resolved_name, MCPServerSettings(
- name=resolved_name,
- transport="stdio",
- command=tokens[0],
- args=tokens[1:],
- )
+ if overrides:
+ payload.update(dict(overrides))
+
+ resolved_settings = MCPServerSettings.model_validate(payload)
+ final_name: str = resolved_settings.name or resolved_name
+ return final_name, resolved_settings
def resolve_target_entry(
@@ -170,34 +518,44 @@ def resolve_target_entry(
overrides: Mapping[str, Any],
source_path: str,
) -> tuple[str, MCPServerSettings]:
- """Resolve target shorthand + explicit overrides into canonical settings."""
- from fast_agent.config import MCPServerSettings
-
- normalized_target = target.strip()
- if not normalized_target:
- raise ValueError(f"`{source_path}` must be a non-empty string")
-
- embedded_flag = _find_embedded_fast_agent_flag_for_url_target(normalized_target)
- if embedded_flag is not None:
- raise ValueError(_build_url_target_flag_error(source_path=source_path, flag=embedded_flag))
+ normalized_target, _normalized_overrides = normalize_connect_config_target(
+ target=target,
+ server_name=default_name,
+ transport=cast("str | None", overrides.get("transport")),
+ url=cast("str | None", overrides.get("url")),
+ command=cast("str | None", overrides.get("command")),
+ args=cast("Sequence[str] | None", overrides.get("args")),
+ headers=cast("Mapping[str, str] | None", overrides.get("headers")),
+ auth=cast("Mapping[str, Any] | None", overrides.get("auth")),
+ reconnect_on_disconnect=cast("bool | None", overrides.get("reconnect_on_disconnect")),
+ source_path=source_path,
+ )
- resolved_default_name = default_name.strip() if isinstance(default_name, str) else None
- resolved_name, derived_settings = build_server_config_from_target(
+ resolved_name, resolved_settings = build_server_config_from_target(
normalized_target,
- server_name=resolved_default_name,
+ auth_token=None,
+ overrides=dict(overrides),
)
-
- merged_payload: dict[str, Any] = derived_settings.model_dump(mode="python")
- merged_payload.update(dict(overrides))
-
- final_name_value = merged_payload.get("name")
- if isinstance(final_name_value, str) and final_name_value.strip():
- final_name = final_name_value.strip()
- elif resolved_default_name:
- final_name = resolved_default_name
- else:
- final_name = resolved_name
- merged_payload["name"] = final_name
-
- resolved_settings = MCPServerSettings.model_validate(merged_payload)
- return final_name, resolved_settings
+ return resolved_name, resolved_settings
+
+
+__all__ = [
+ "McpConnectMode",
+ "McpConnectOptions",
+ "McpTransport",
+ "NormalizedMcpTarget",
+ "ParsedMcpConnectRequest",
+ "build_server_config_from_target",
+ "infer_connect_mode",
+ "infer_connect_mode_from_text",
+ "infer_connect_mode_from_tokens",
+ "infer_server_name",
+ "infer_transport",
+ "normalize_connect_config_target",
+ "normalize_connect_target_text",
+ "parse_connect_command_text",
+ "parse_connect_command_tokens",
+ "render_connect_request",
+ "render_normalized_target",
+ "resolve_target_entry",
+]
diff --git a/src/fast_agent/ui/command_payloads.py b/src/fast_agent/ui/command_payloads.py
index ec725c700..90037881d 100644
--- a/src/fast_agent/ui/command_payloads.py
+++ b/src/fast_agent/ui/command_payloads.py
@@ -1,6 +1,8 @@
from dataclasses import dataclass
from typing import Literal, TypeGuard
+from fast_agent.mcp.connect_targets import ParsedMcpConnectRequest, render_normalized_target
+
class CommandBase:
kind: str
@@ -36,17 +38,58 @@ class McpListCommand(CommandBase):
@dataclass(frozen=True, slots=True)
class McpConnectCommand(CommandBase):
- target_text: str
- parsed_mode: McpConnectMode
- server_name: str | None
- auth_token: str | None
- timeout_seconds: float | None
- trigger_oauth: bool | None
- reconnect_on_disconnect: bool | None
- force_reconnect: bool
+ request: ParsedMcpConnectRequest | None
error: str | None
kind: Literal["mcp_connect"] = "mcp_connect"
+ @property
+ def target_text(self) -> str:
+ if self.request is None:
+ return ""
+ return render_normalized_target(self.request.target)
+
+ @property
+ def parsed_mode(self) -> McpConnectMode:
+ if self.request is None:
+ return "stdio"
+ return self.request.target.mode
+
+ @property
+ def server_name(self) -> str | None:
+ if self.request is None:
+ return None
+ return self.request.target.server_name
+
+ @property
+ def auth_token(self) -> str | None:
+ if self.request is None:
+ return None
+ return self.request.options.auth_token
+
+ @property
+ def timeout_seconds(self) -> float | None:
+ if self.request is None:
+ return None
+ return self.request.options.timeout_seconds
+
+ @property
+ def trigger_oauth(self) -> bool | None:
+ if self.request is None:
+ return None
+ return self.request.options.trigger_oauth
+
+ @property
+ def reconnect_on_disconnect(self) -> bool | None:
+ if self.request is None:
+ return None
+ return self.request.options.reconnect_on_disconnect
+
+ @property
+ def force_reconnect(self) -> bool:
+ if self.request is None:
+ return False
+ return self.request.options.force_reconnect
+
@dataclass(frozen=True, slots=True)
class McpDisconnectCommand(CommandBase):
diff --git a/src/fast_agent/ui/interactive/command_dispatch.py b/src/fast_agent/ui/interactive/command_dispatch.py
index 72b693c70..d8f439ecc 100644
--- a/src/fast_agent/ui/interactive/command_dispatch.py
+++ b/src/fast_agent/ui/interactive/command_dispatch.py
@@ -19,7 +19,6 @@
from fast_agent.commands.handlers import skills as skills_handlers
from fast_agent.commands.handlers import tools as tools_handlers
from fast_agent.commands.handlers.shared import clear_agent_histories
-from fast_agent.commands.mcp_command_intents import build_mcp_connect_runtime_target
from fast_agent.ui import enhanced_prompt
from fast_agent.ui.command_payloads import (
AgentCommand,
@@ -405,24 +404,20 @@ async def _dispatch_mcp_payload(
)
await emit_command_outcome(context, outcome)
return result
- case McpConnectCommand(
- target_text=target_text,
- server_name=server_name,
- error=error,
- ):
+ case McpConnectCommand(request=request, error=error):
context = build_command_context(prompt_provider, agent)
if error:
rich_print(f"[red]{error}[/red]")
return result
- runtime_target = build_mcp_connect_runtime_target(payload)
+ if request is None:
+ rich_print("[red]Connection target is required[/red]")
+ return result
outcome = await handle_mcp_connect(
context=context,
prompt_provider=prompt_provider,
agent=agent,
- runtime_target=runtime_target,
- target_text=target_text,
- server_name=server_name,
+ request=request,
)
if outcome is not None:
await emit_command_outcome(context, outcome)
diff --git a/src/fast_agent/ui/interactive/mcp_connect_flow.py b/src/fast_agent/ui/interactive/mcp_connect_flow.py
index 1b81883cc..2cfc5787d 100644
--- a/src/fast_agent/ui/interactive/mcp_connect_flow.py
+++ b/src/fast_agent/ui/interactive/mcp_connect_flow.py
@@ -12,6 +12,7 @@
from rich.text import Text
from fast_agent.commands.handlers import mcp_runtime as mcp_runtime_handlers
+from fast_agent.mcp.connect_targets import ParsedMcpConnectRequest, infer_server_name
from fast_agent.ui.console import console, ensure_blocking_console
if TYPE_CHECKING:
@@ -25,11 +26,9 @@ async def handle_mcp_connect(
context: "CommandContext",
prompt_provider: "AgentApp",
agent: str,
- runtime_target: str,
- target_text: str,
- server_name: str | None,
+ request: ParsedMcpConnectRequest,
) -> "CommandOutcome | None":
- label = server_name or target_text.split(maxsplit=1)[0]
+ label = request.target.server_name or infer_server_name(request.target)
attached_before_connect: set[str] = set()
try:
attached_before_connect = set(await prompt_provider.list_attached_mcp_servers(agent))
@@ -37,19 +36,7 @@ async def handle_mcp_connect(
attached_before_connect = set()
async def _handle_mcp_connect_cancel() -> None:
- cancel_server_name = server_name
- if not cancel_server_name:
- try:
- parsed_runtime = mcp_runtime_handlers.parse_connect_input(runtime_target)
- cancel_server_name = parsed_runtime.server_name
- if not cancel_server_name:
- mode = mcp_runtime_handlers.infer_connect_mode(parsed_runtime.target_text)
- cancel_server_name = mcp_runtime_handlers._infer_server_name(
- parsed_runtime.target_text,
- mode,
- )
- except Exception:
- cancel_server_name = None
+ cancel_server_name = request.target.server_name or infer_server_name(request.target)
should_detach_on_cancel = bool(cancel_server_name) and (
cancel_server_name not in attached_before_connect
@@ -90,7 +77,7 @@ async def _emit_mcp_progress(message: str) -> None:
context,
manager=prompt_provider,
agent_name=agent,
- target_text=runtime_target,
+ request=request,
on_progress=_emit_mcp_progress,
)
)
diff --git a/src/fast_agent/ui/prompt/command_help.py b/src/fast_agent/ui/prompt/command_help.py
index 88a0dc7c9..e7b5bba3f 100644
--- a/src/fast_agent/ui/prompt/command_help.py
+++ b/src/fast_agent/ui/prompt/command_help.py
@@ -43,6 +43,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" /mcp list - List attached runtime MCP servers",
" /mcp connect - Connect MCP server at runtime",
" [dim]flags: --name --auth --timeout --oauth/--no-oauth --reconnect[/dim]",
+ " [dim]example: /mcp connect \"C:\\Program Files\\Tool\\tool.exe\" --flag[/dim]",
" /mcp disconnect - Disconnect attached MCP server",
" /mcp reconnect - Reconnect attached MCP server",
" /mcp session [server] - List sessions (all connected servers by default; active marker ▶)",
@@ -79,7 +80,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" /agent [name] --dump - Print an AgentCard to screen",
" /reload - Reload AgentCards",
" @agent_name - Switch to agent",
- " #agent_name - Send message to agent, return result to input buffer",
+ " #agent_name - Send message to agent (no space after #); '# Heading' stays plain text",
" STOP - Return control back to the workflow",
" EXIT - Exit fast-agent, terminating any running workflows",
"",
diff --git a/src/fast_agent/ui/prompt/input.py b/src/fast_agent/ui/prompt/input.py
index 795c1beed..bf13fccc6 100644
--- a/src/fast_agent/ui/prompt/input.py
+++ b/src/fast_agent/ui/prompt/input.py
@@ -23,6 +23,7 @@
from rich.text import Text
from fast_agent.agents.agent_types import AgentType
+from fast_agent.mcp.connect_targets import parse_connect_command_text
from fast_agent.mcp.types import McpAgentProtocol
from fast_agent.ui.command_payloads import (
AgentCommand,
@@ -157,16 +158,28 @@ def _mcp_connect_cmd(
force_reconnect: bool,
error: str | None,
) -> McpConnectCommand:
+ del parsed_mode
+ if error or not target_text:
+ return McpConnectCommand(request=None, error=error)
+
+ argv = [target_text]
+ if server_name:
+ argv.extend(["--name", shlex.quote(server_name)])
+ if auth_token:
+ argv.extend(["--auth", shlex.quote(auth_token)])
+ if timeout_seconds is not None:
+ argv.extend(["--timeout", str(timeout_seconds)])
+ if trigger_oauth is True:
+ argv.append("--oauth")
+ elif trigger_oauth is False:
+ argv.append("--no-oauth")
+ if reconnect_on_disconnect is False:
+ argv.append("--no-reconnect")
+ if force_reconnect:
+ argv.append("--reconnect")
return McpConnectCommand(
- target_text=target_text,
- parsed_mode=parsed_mode,
- server_name=server_name,
- auth_token=auth_token,
- timeout_seconds=timeout_seconds,
- trigger_oauth=trigger_oauth,
- reconnect_on_disconnect=reconnect_on_disconnect,
- force_reconnect=force_reconnect,
- error=error,
+ request=parse_connect_command_text(" ".join(argv)),
+ error=None,
)
diff --git a/src/fast_agent/ui/prompt/keybindings.py b/src/fast_agent/ui/prompt/keybindings.py
index 9b05fb864..0d2da4944 100644
--- a/src/fast_agent/ui/prompt/keybindings.py
+++ b/src/fast_agent/ui/prompt/keybindings.py
@@ -12,6 +12,7 @@
from rich import print as rich_print
from fast_agent.ui.prompt.editor import get_text_from_editor
+from fast_agent.ui.prompt.parser import try_parse_hash_agent_command
if TYPE_CHECKING:
from collections.abc import Callable
@@ -30,7 +31,7 @@ def get_line_tokens(line_number):
stripped = line.lstrip()
if stripped.startswith("!"):
return [("class:shell-command", line)]
- if stripped.startswith("#"):
+ if try_parse_hash_agent_command(stripped) is not None:
return [("class:comment-command", line)]
return [("", line)]
@@ -113,7 +114,9 @@ def _should_start_completion(text: str) -> bool:
return True
if stripped.startswith("!"):
return True
- if stripped.startswith(("/", "@", "#")):
+ if stripped.startswith(("/", "@")):
+ return True
+ if try_parse_hash_agent_command(stripped) is not None:
return True
return True
diff --git a/src/fast_agent/ui/prompt/parser.py b/src/fast_agent/ui/prompt/parser.py
index b309404ad..da1f6433a 100644
--- a/src/fast_agent/ui/prompt/parser.py
+++ b/src/fast_agent/ui/prompt/parser.py
@@ -12,14 +12,12 @@
if TYPE_CHECKING:
from collections.abc import Callable
-from fast_agent.commands.mcp_command_intents import (
- parse_mcp_connect_tokens,
- parse_mcp_session_tokens,
-)
+from fast_agent.commands.mcp_command_intents import parse_mcp_session_tokens
from fast_agent.commands.shared_command_intents import (
parse_current_agent_history_intent,
parse_session_command_intent,
)
+from fast_agent.mcp.connect_targets import parse_connect_command_text
from fast_agent.ui.command_payloads import (
AgentCommand,
CardsCommand,
@@ -40,7 +38,6 @@
LoadHistoryCommand,
LoadPromptCommand,
McpConnectCommand,
- McpConnectMode,
McpDisconnectCommand,
McpListCommand,
McpReconnectCommand,
@@ -67,6 +64,7 @@
TitleSessionCommand,
UnknownCommand,
)
+from fast_agent.utils.slash_commands import split_subcommand_and_remainder
def _default_shell_command() -> str:
@@ -89,32 +87,6 @@ def _default_shell_command() -> str:
return "sh"
-def _infer_mcp_connect_mode(target_text: str) -> McpConnectMode:
- stripped = target_text.strip().lower()
- if stripped.startswith(("http://", "https://")):
- return "url"
- if stripped.startswith("@"):
- return "npx"
- if stripped.startswith("npx "):
- return "npx"
- if stripped.startswith("uvx "):
- return "uvx"
- return "stdio"
-
-
-def _rebuild_mcp_target_text(tokens: list[str]) -> str:
- if not tokens:
- return ""
-
- rebuilt_parts: list[str] = []
- for token in tokens:
- if token == "" or any(char.isspace() for char in token):
- rebuilt_parts.append(shlex.quote(token))
- else:
- rebuilt_parts.append(token)
- return " ".join(rebuilt_parts)
-
-
def _parse_quoted_history_target(text: str) -> str | None:
stripped = text.strip()
if not stripped:
@@ -156,6 +128,37 @@ def _parse_hash_agent_command(body: str, *, quiet: bool) -> HashAgentCommand | s
return HashAgentCommand(agent_name=stripped, message="", quiet=quiet)
+def try_parse_hash_agent_command(text: str) -> HashAgentCommand | None:
+ prefix = ""
+ quiet = False
+ if text.startswith("##"):
+ prefix = "##"
+ quiet = True
+ elif text.startswith("#"):
+ prefix = "#"
+ else:
+ return None
+
+ body = text[len(prefix) :]
+ if not body or body[0].isspace():
+ return None
+
+ parsed = _parse_hash_agent_command(body, quiet=quiet)
+ return parsed if isinstance(parsed, HashAgentCommand) else None
+
+
+def _parse_connect_command(remainder: str, *, usage: str) -> McpConnectCommand:
+ if not remainder:
+ return McpConnectCommand(request=None, error=usage)
+ try:
+ return McpConnectCommand(
+ request=parse_connect_command_text(remainder),
+ error=None,
+ )
+ except ValueError as exc:
+ return McpConnectCommand(request=None, error=str(exc))
+
+
def _parse_history_command(remainder: str) -> CommandPayload:
if not remainder:
return ShowHistoryCommand(agent=None)
@@ -424,20 +427,21 @@ def _parse_mcp_command(remainder: str) -> CommandPayload:
if not remainder:
return ShowMcpStatusCommand()
+ subcmd, sub_remainder = split_subcommand_and_remainder(remainder)
+ subcmd = subcmd.lower()
+ if subcmd == "connect":
+ return _parse_connect_command(
+ sub_remainder,
+ usage=(
+ "Usage: /mcp connect [--name ] [--auth ] "
+ "[--timeout ] [--oauth|--no-oauth] [--reconnect|--no-reconnect]"
+ ),
+ )
+
try:
tokens = shlex.split(remainder)
except ValueError as exc:
- return McpConnectCommand(
- target_text="",
- parsed_mode="stdio",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=f"Invalid arguments: {exc}",
- )
+ return McpConnectCommand(request=None, error=f"Invalid arguments: {exc}")
subcmd = tokens[0].lower() if tokens else ""
if subcmd == "list":
@@ -456,35 +460,13 @@ def _parse_mcp_command(remainder: str) -> CommandPayload:
return McpReconnectCommand(server_name=name, error=error)
if subcmd == "session":
return parse_mcp_session_tokens(tokens[1:])
- if subcmd == "connect":
- return parse_mcp_connect_tokens(tokens[1:])
return UnknownCommand(command="mcp")
def _parse_connect_alias_command(remainder: str) -> McpConnectCommand:
- parsed_mode = _infer_mcp_connect_mode(remainder)
- if not remainder:
- return McpConnectCommand(
- target_text="",
- parsed_mode="stdio",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error="Usage: /connect ",
- )
- return McpConnectCommand(
- target_text=remainder,
- parsed_mode=parsed_mode,
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=None,
+ return _parse_connect_command(
+ remainder,
+ usage="Usage: /connect ",
)
@@ -632,14 +614,9 @@ def parse_special_input(text: str) -> str | CommandPayload:
if cmd_line and cmd_line.startswith("@"):
return SwitchAgentCommand(agent_name=cmd_line[1:].strip())
- if cmd_line and cmd_line.startswith("##"):
- quiet_body = cmd_line[2:]
- if quiet_body and not quiet_body[0].isspace():
- return _parse_hash_agent_command(quiet_body, quiet=True)
- return text
-
- if cmd_line and cmd_line.startswith("#"):
- return _parse_hash_agent_command(cmd_line[1:], quiet=False)
+ parsed_hash_command = try_parse_hash_agent_command(text)
+ if parsed_hash_command is not None:
+ return parsed_hash_command
if cmd_line and cmd_line.startswith("!"):
command = cmd_line[1:].strip()
diff --git a/src/fast_agent/utils/commandline.py b/src/fast_agent/utils/commandline.py
new file mode 100644
index 000000000..6828b7088
--- /dev/null
+++ b/src/fast_agent/utils/commandline.py
@@ -0,0 +1,52 @@
+"""Cross-platform argv split/join helpers."""
+
+from __future__ import annotations
+
+import os
+import shlex
+import subprocess
+from typing import Literal, Sequence
+
+import mslex
+
+CommandLineSyntax = Literal["auto", "posix", "windows"]
+ResolvedCommandLineSyntax = Literal["posix", "windows"]
+
+
+def resolve_commandline_syntax(
+ syntax: CommandLineSyntax = "auto",
+) -> ResolvedCommandLineSyntax:
+ if syntax == "auto":
+ return "windows" if os.name == "nt" else "posix"
+ if syntax in {"posix", "windows"}:
+ return syntax
+ raise ValueError(f"Unsupported command-line syntax: {syntax}")
+
+
+def split_commandline(
+ text: str,
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> list[str]:
+ resolved = resolve_commandline_syntax(syntax)
+ try:
+ if resolved == "windows":
+ return mslex.split(text)
+ return shlex.split(text, posix=True)
+ except Exception as exc: # noqa: BLE001 - normalize parsing failures
+ raise ValueError(str(exc)) from exc
+
+
+def join_commandline(
+ argv: Sequence[str],
+ *,
+ syntax: CommandLineSyntax = "auto",
+) -> str:
+ resolved = resolve_commandline_syntax(syntax)
+ normalized_argv = [str(token) for token in argv]
+ try:
+ if resolved == "windows":
+ return subprocess.list2cmdline(normalized_argv)
+ return shlex.join(normalized_argv)
+ except Exception as exc: # noqa: BLE001 - normalize join failures
+ raise ValueError(str(exc)) from exc
diff --git a/src/fast_agent/utils/slash_commands.py b/src/fast_agent/utils/slash_commands.py
new file mode 100644
index 000000000..fba7ad7a2
--- /dev/null
+++ b/src/fast_agent/utils/slash_commands.py
@@ -0,0 +1,14 @@
+"""Helpers for slash-command routing."""
+
+from __future__ import annotations
+
+
+def split_subcommand_and_remainder(text: str) -> tuple[str, str]:
+ stripped = text.strip()
+ if not stripped:
+ return "", ""
+
+ parts = stripped.split(maxsplit=1)
+ if len(parts) == 1:
+ return parts[0], ""
+ return parts[0], parts[1]
diff --git a/tests/unit/fast_agent/acp/test_slash_commands_mcp.py b/tests/unit/fast_agent/acp/test_slash_commands_mcp.py
index 5fedf26de..09475fa7b 100644
--- a/tests/unit/fast_agent/acp/test_slash_commands_mcp.py
+++ b/tests/unit/fast_agent/acp/test_slash_commands_mcp.py
@@ -324,6 +324,36 @@ async def test_slash_command_mcp_connect_preserves_quoted_target_arguments() ->
assert getattr(server_config, "args", None) == ["--root", "My Folder"]
+@pytest.mark.asyncio
+async def test_slash_command_mcp_connect_preserves_quoted_windows_path() -> None:
+ app = _App()
+ instance = AgentInstance(
+ app=cast("AgentApp", app),
+ agents={"main": cast("AgentProtocol", _Agent())},
+ registry_version=0,
+ )
+ handler = SlashCommandHandler(
+ session_id="s1",
+ instance=instance,
+ primary_agent_name="main",
+ attach_mcp_server_callback=app.attach_mcp_server,
+ detach_mcp_server_callback=app.detach_mcp_server,
+ list_attached_mcp_servers_callback=app.list_attached_mcp_servers,
+ list_configured_detached_mcp_servers_callback=app.list_configured_detached_mcp_servers,
+ )
+
+ connected = await handler.execute_command(
+ "mcp",
+ 'connect "C:\\Program Files\\Tool\\tool.exe" --flag --name docs',
+ )
+
+ assert "Connected MCP server 'docs'" in connected
+ assert app.attached_configs
+ server_config = app.attached_configs[-1]
+ assert getattr(server_config, "command", None) == "C:\\Program Files\\Tool\\tool.exe"
+ assert getattr(server_config, "args", None) == ["--flag"]
+
+
@pytest.mark.asyncio
async def test_slash_command_mcp_session_jar() -> None:
app = _App()
diff --git a/tests/unit/fast_agent/agents/test_agent_types.py b/tests/unit/fast_agent/agents/test_agent_types.py
index 7aa883fcd..9c26f0a76 100644
--- a/tests/unit/fast_agent/agents/test_agent_types.py
+++ b/tests/unit/fast_agent/agents/test_agent_types.py
@@ -182,3 +182,15 @@ async def test_apply_runtime_mcp_connections_raises_on_connect_error() -> None:
target_agent_name="main",
mcp_connect=["npx demo-server --name demo"],
)
+
+
+@pytest.mark.asyncio
+async def test_apply_runtime_mcp_connections_wraps_parse_errors() -> None:
+ agent = _FakeMcpAgent(default=True)
+ with pytest.raises(AgentConfigError, match="Failed to connect MCP server for smart tool call"):
+ await _apply_runtime_mcp_connections(
+ context=None,
+ agents_map=cast("Any", {"main": agent}),
+ target_agent_name="main",
+ mcp_connect=["npx demo-server --timeout 0"],
+ )
diff --git a/tests/unit/fast_agent/agents/test_smart_agent_command_tool.py b/tests/unit/fast_agent/agents/test_smart_agent_command_tool.py
index 255e21152..1d273577b 100644
--- a/tests/unit/fast_agent/agents/test_smart_agent_command_tool.py
+++ b/tests/unit/fast_agent/agents/test_smart_agent_command_tool.py
@@ -61,6 +61,15 @@ async def test_run_slash_command_check_rejects_invalid_argument_syntax(tmp_path:
await _run_slash_command_call(agent, '/check "')
+@pytest.mark.asyncio
+async def test_run_slash_command_mcp_connect_wraps_parse_errors(tmp_path: Path) -> None:
+ settings = Settings(environment_dir=str(tmp_path / ".fast-agent"))
+ agent = _SmartAgentStub(settings=settings)
+
+ with pytest.raises(AgentConfigError, match="Invalid /mcp connect arguments"):
+ await _run_slash_command_call(agent, "/mcp connect npx demo-server --timeout 0")
+
+
@pytest.mark.asyncio
async def test_run_slash_command_check_returns_markdown_heading(tmp_path: Path) -> None:
settings = Settings(environment_dir=str(tmp_path / ".fast-agent"))
diff --git a/tests/unit/fast_agent/commands/test_auth_command.py b/tests/unit/fast_agent/commands/test_auth_command.py
index 3edee2140..234992eee 100644
--- a/tests/unit/fast_agent/commands/test_auth_command.py
+++ b/tests/unit/fast_agent/commands/test_auth_command.py
@@ -9,6 +9,7 @@
import fast_agent.config as config_module
from fast_agent.cli.commands import auth as auth_command
from fast_agent.config import get_settings, update_global_settings
+from fast_agent.core.keyring_utils import KeyringStatus
def test_auth_status_reports_invalid_settings_yaml_without_traceback(tmp_path: Path) -> None:
@@ -47,3 +48,36 @@ def test_auth_status_reports_invalid_settings_yaml_without_traceback(tmp_path: P
else:
os.environ["ENVIRONMENT_DIR"] = old_env_dir
update_global_settings(old_settings)
+
+
+def test_auth_status_shows_codex_source(monkeypatch) -> None:
+ monkeypatch.setattr(
+ "fast_agent.cli.commands.auth.get_settings_or_exit",
+ lambda _config_path=None: get_settings(),
+ )
+ runner = CliRunner()
+ monkeypatch.setattr(
+ "fast_agent.cli.commands.auth.get_keyring_status",
+ lambda: KeyringStatus(name="SecretService Keyring", available=True, writable=True),
+ )
+ monkeypatch.setattr(
+ "fast_agent.cli.commands.auth.list_keyring_tokens",
+ lambda: [],
+ )
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.openai.codex_oauth.get_codex_token_status",
+ lambda: {
+ "present": True,
+ "expires_at": None,
+ "expired": False,
+ "source": "auth.json",
+ },
+ )
+
+ result = runner.invoke(auth_command.app, ["status"])
+
+ assert result.exit_code == 0, result.output
+ output = strip_ansi(result.output)
+ assert "Codex OAuth" in output
+ assert "Source" in output
+ assert "Codex auth.json" in output
diff --git a/tests/unit/fast_agent/commands/test_mcp_runtime_handlers.py b/tests/unit/fast_agent/commands/test_mcp_runtime_handlers.py
index 8e1444bb6..262d8fe5d 100644
--- a/tests/unit/fast_agent/commands/test_mcp_runtime_handlers.py
+++ b/tests/unit/fast_agent/commands/test_mcp_runtime_handlers.py
@@ -7,6 +7,7 @@
from fast_agent.commands.handlers import mcp_runtime
from fast_agent.commands.results import CommandMessage
from fast_agent.config import MCPServerSettings, MCPSettings, Settings
+from fast_agent.mcp.connect_targets import parse_connect_command_text
from fast_agent.mcp.experimental_session_client import SessionJarEntry
from fast_agent.mcp.mcp_aggregator import MCPAttachResult, MCPDetachResult
from fast_agent.mcp.oauth_client import OAuthEvent
@@ -50,6 +51,19 @@ async def display_system_prompt(self, agent_name, system_prompt, *, server_count
del agent_name, system_prompt, server_count
+def _request(text: str):
+ return parse_connect_command_text(text)
+
+ async def display_history_overview(self, agent_name, history, usage=None):
+ del agent_name, history, usage
+
+ async def display_usage_report(self, agents):
+ del agents
+
+ async def display_system_prompt(self, agent_name, system_prompt, *, server_count=0):
+ del agent_name, system_prompt, server_count
+
+
class _Provider:
def _agent(self, name: str):
del name
@@ -531,53 +545,57 @@ async def attach_mcp_server(self, agent_name, server_name, server_config=None, o
@pytest.mark.parametrize("raw_timeout", ["nan", "inf", "-inf", "0", "-1"])
-def test_parse_connect_input_rejects_non_finite_or_non_positive_timeout(
+def test_parse_connect_request_rejects_non_finite_or_non_positive_timeout(
raw_timeout: str,
) -> None:
with pytest.raises(ValueError, match="--timeout"):
- mcp_runtime.parse_connect_input(f"npx demo-server --timeout {raw_timeout}")
+ parse_connect_command_text(f"npx demo-server --timeout {raw_timeout}")
-def test_parse_connect_input_resolves_auth_env_reference(monkeypatch) -> None:
+def test_runtime_resolves_auth_env_reference(monkeypatch) -> None:
monkeypatch.setenv("DEMO_TOKEN", "token-from-env")
- parsed = mcp_runtime.parse_connect_input("https://example.com/api --auth ${DEMO_TOKEN}")
+ parsed = mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth ${DEMO_TOKEN}")
+ )
- assert parsed.auth_token == "token-from-env"
+ assert parsed.options.auth_token == "token-from-env"
-def test_parse_connect_input_resolves_simple_auth_env_reference(monkeypatch) -> None:
+def test_runtime_resolves_simple_auth_env_reference(monkeypatch) -> None:
monkeypatch.setenv("DEMO_TOKEN", "token-from-env")
- parsed = mcp_runtime.parse_connect_input("https://example.com/api --auth $DEMO_TOKEN")
+ parsed = mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth $DEMO_TOKEN")
+ )
- assert parsed.auth_token == "token-from-env"
+ assert parsed.options.auth_token == "token-from-env"
-def test_parse_connect_input_resolves_auth_env_reference_with_default(monkeypatch) -> None:
+def test_runtime_resolves_auth_env_reference_with_default(monkeypatch) -> None:
monkeypatch.delenv("MISSING_TOKEN", raising=False)
- parsed = mcp_runtime.parse_connect_input(
- "https://example.com/api --auth ${MISSING_TOKEN:default-token}"
+ parsed = mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth ${MISSING_TOKEN:default-token}")
)
- assert parsed.auth_token == "default-token"
+ assert parsed.options.auth_token == "default-token"
-def test_parse_connect_input_normalizes_bearer_prefix() -> None:
- parsed = mcp_runtime.parse_connect_input(
- "https://example.com/api --auth 'Bearer token-from-cli'"
+def test_runtime_normalizes_bearer_prefix() -> None:
+ parsed = mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth 'Bearer token-from-cli'")
)
- assert parsed.auth_token == "token-from-cli"
+ assert parsed.options.auth_token == "token-from-cli"
-def test_parse_connect_input_normalizes_bearer_prefix_before_env_resolution() -> None:
+def test_runtime_normalizes_bearer_prefix_before_env_resolution() -> None:
original_token = os.environ.get("DEMO_TOKEN")
os.environ["DEMO_TOKEN"] = "token-from-env"
try:
- parsed = mcp_runtime.parse_connect_input(
- "https://example.com/api --auth 'Bearer $DEMO_TOKEN'"
+ parsed = mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth 'Bearer $DEMO_TOKEN'")
)
finally:
if original_token is None:
@@ -585,14 +603,16 @@ def test_parse_connect_input_normalizes_bearer_prefix_before_env_resolution() ->
else:
os.environ["DEMO_TOKEN"] = original_token
- assert parsed.auth_token == "token-from-env"
+ assert parsed.options.auth_token == "token-from-env"
-def test_parse_connect_input_rejects_missing_auth_env_reference(monkeypatch) -> None:
+def test_runtime_rejects_missing_auth_env_reference(monkeypatch) -> None:
monkeypatch.delenv("MISSING_TOKEN", raising=False)
with pytest.raises(ValueError, match="Environment variable 'MISSING_TOKEN' is not set"):
- mcp_runtime.parse_connect_input("https://example.com/api --auth ${MISSING_TOKEN}")
+ mcp_runtime._resolve_request_auth(
+ parse_connect_command_text("https://example.com/api --auth ${MISSING_TOKEN}")
+ )
@pytest.mark.asyncio
@@ -604,7 +624,7 @@ async def test_handle_mcp_connect_and_disconnect() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="npx demo-server --name demo",
+ request=_request("npx demo-server --name demo"),
)
connect_text = "\n".join(str(message.text) for message in connect_outcome.messages)
assert "Connected MCP server" in connect_text
@@ -686,7 +706,7 @@ async def test_handle_mcp_connect_scoped_package_uses_npx_command() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="@modelcontextprotocol/server-everything",
+ request=_request("@modelcontextprotocol/server-everything"),
)
assert any("Connected MCP server" in str(msg.text) for msg in outcome.messages)
@@ -723,7 +743,7 @@ async def _capture_progress(message: str) -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="docs",
+ request=_request("docs"),
on_progress=_capture_progress,
)
@@ -745,7 +765,7 @@ async def test_handle_mcp_connect_scoped_package_with_args_infers_server_name()
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="@modelcontextprotocol/server-filesystem .",
+ request=_request("@modelcontextprotocol/server-filesystem ."),
)
message_text = "\n".join(str(msg.text) for msg in outcome.messages)
@@ -764,7 +784,7 @@ async def test_handle_mcp_connect_preserves_quoted_target_arguments() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text='demo-server --root "My Folder" --name demo',
+ request=_request('demo-server --root "My Folder" --name demo'),
)
assert any("Connected MCP server" in str(msg.text) for msg in outcome.messages)
@@ -782,7 +802,7 @@ async def test_handle_mcp_connect_reports_already_attached() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="@modelcontextprotocol/server-filesystem .",
+ request=_request("@modelcontextprotocol/server-filesystem ."),
)
message_text = "\n".join(str(msg.text) for msg in outcome.messages)
@@ -798,7 +818,7 @@ async def test_handle_mcp_connect_with_reconnect_reports_reconnected() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="@modelcontextprotocol/server-filesystem . --reconnect",
+ request=_request("@modelcontextprotocol/server-filesystem . --reconnect"),
)
message_text = "\n".join(str(msg.text) for msg in outcome.messages)
@@ -816,7 +836,7 @@ async def test_handle_mcp_connect_url_uses_cli_url_parsing_for_auth_headers() ->
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com/api --auth token123",
+ request=_request("https://example.com/api --auth token123"),
)
assert any("Connected MCP server" in str(msg.text) for msg in outcome.messages)
@@ -835,7 +855,7 @@ async def test_handle_mcp_connect_url_auto_appends_mcp_suffix() -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com/api",
+ request=_request("https://example.com/api"),
)
assert any("Connected MCP server" in str(msg.text) for msg in outcome.messages)
@@ -852,7 +872,7 @@ async def test_handle_mcp_connect_url_with_query_preserves_explicit_endpoint() -
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com/api?version=1",
+ request=_request("https://example.com/api?version=1"),
)
assert any(msg.channel == "error" for msg in outcome.messages)
@@ -869,7 +889,7 @@ async def test_handle_mcp_connect_hf_url_adds_hf_auth_from_env(monkeypatch) -> N
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://demo.hf.space",
+ request=_request("https://demo.hf.space"),
)
assert any("Connected MCP server" in str(msg.text) for msg in outcome.messages)
@@ -892,7 +912,7 @@ async def _capture_progress(message: str) -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="npx demo-server --name demo",
+ request=_request("npx demo-server --name demo"),
on_progress=_capture_progress,
)
@@ -912,7 +932,7 @@ async def test_handle_mcp_connect_enables_oauth_paste_fallback_without_progress_
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="npx demo-server --name demo",
+ request=_request("npx demo-server --name demo"),
)
assert manager.last_options is not None
@@ -933,7 +953,7 @@ async def _capture_progress(message: str) -> None:
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com",
+ request=_request("https://example.com"),
on_progress=_capture_progress,
)
@@ -953,7 +973,7 @@ async def test_handle_mcp_connect_oauth_registration_404_adds_guidance() -> None
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://api.githubcopilot.com/mcp/",
+ request=_request("https://api.githubcopilot.com/mcp/"),
)
message_text = "\n".join(str(msg.text) for msg in outcome.messages)
@@ -973,7 +993,7 @@ async def test_handle_mcp_connect_defaults_url_oauth_timeout_to_30_seconds() ->
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com",
+ request=_request("https://example.com"),
)
assert manager.last_options is not None
@@ -989,7 +1009,7 @@ async def test_handle_mcp_connect_defaults_url_no_oauth_timeout_to_10_seconds()
ctx,
manager=cast("mcp_runtime.McpRuntimeManager", manager),
agent_name="main",
- target_text="https://example.com --no-oauth",
+ request=_request("https://example.com --no-oauth"),
)
assert manager.last_options is not None
diff --git a/tests/unit/fast_agent/mcp/test_connect_targets.py b/tests/unit/fast_agent/mcp/test_connect_targets.py
new file mode 100644
index 000000000..0aa99d00f
--- /dev/null
+++ b/tests/unit/fast_agent/mcp/test_connect_targets.py
@@ -0,0 +1,173 @@
+from __future__ import annotations
+
+import pytest
+
+import fast_agent.mcp.connect_targets as connect_targets_module
+from fast_agent.mcp.connect_targets import (
+ build_server_config_from_target,
+ infer_server_name,
+ normalize_connect_config_target,
+ parse_connect_command_text,
+ render_connect_request,
+)
+from fast_agent.utils import commandline
+
+
+def _force_windows_commandline(monkeypatch: pytest.MonkeyPatch) -> None:
+ def _resolve_windows_syntax(syntax: commandline.CommandLineSyntax = "auto") -> str:
+ return "windows" if syntax == "auto" else syntax
+
+ monkeypatch.setattr(commandline, "resolve_commandline_syntax", _resolve_windows_syntax)
+ monkeypatch.setattr(
+ connect_targets_module,
+ "resolve_commandline_syntax",
+ _resolve_windows_syntax,
+ )
+
+
+def test_parse_connect_command_text_preserves_quoted_windows_path() -> None:
+ request = parse_connect_command_text('"C:\\Program Files\\Tool\\tool.exe" --flag --name docs')
+
+ assert request.target.mode == "stdio"
+ assert request.target.command == "C:\\Program Files\\Tool\\tool.exe"
+ assert request.target.args == ("--flag",)
+ assert request.target.server_name == "docs"
+
+
+def test_parse_connect_command_text_accepts_single_quoted_args_on_windows(monkeypatch) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ request = parse_connect_command_text("https://example.com --auth 'Bearer token-from-cli'")
+
+ assert request.options.auth_token == "Bearer token-from-cli"
+
+
+def test_parse_connect_command_text_preserves_apostrophes_in_windows_path(monkeypatch) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ request = parse_connect_command_text(r"C:\Users\O'Brien\tool.exe --flag --name docs")
+
+ assert request.target.mode == "stdio"
+ assert request.target.command == r"C:\Users\O'Brien\tool.exe"
+ assert request.target.args == ("--flag",)
+ assert request.target.server_name == "docs"
+
+
+def test_parse_connect_command_text_preserves_apostrophes_in_windows_tokens(monkeypatch) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ request = parse_connect_command_text("https://example.com --auth O'Reilly")
+
+ assert request.options.auth_token == "O'Reilly"
+
+
+def test_parse_connect_command_text_accepts_mixed_windows_apostrophes_and_single_quotes(
+ monkeypatch,
+) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ request = parse_connect_command_text(r"C:\Users\O'Brien\tool.exe --auth 'Bearer token'")
+
+ assert request.target.mode == "stdio"
+ assert request.target.command == r"C:\Users\O'Brien\tool.exe"
+ assert request.options.auth_token == "Bearer token"
+
+
+@pytest.mark.parametrize(
+ ("target_text", "mode"),
+ [
+ ("https://example.com", "url"),
+ ("https://example.com/sse", "url"),
+ ("@scope/server", "npx"),
+ ("npx demo-server", "npx"),
+ ("uvx demo-server", "uvx"),
+ ("python demo.py", "stdio"),
+ ],
+)
+def test_parse_connect_command_text_infers_mode(target_text: str, mode: str) -> None:
+ request = parse_connect_command_text(target_text)
+ assert request.target.mode == mode
+
+
+def test_render_connect_request_redacts_auth() -> None:
+ request = parse_connect_command_text("https://example.com --auth secret-token --name docs")
+
+ rendered = render_connect_request(request, redact_auth=True)
+
+ assert "secret-token" not in rendered
+ assert "[REDACTED]" in rendered
+ assert "--name docs" in rendered
+
+
+def test_parse_connect_command_text_rejects_multiple_urls() -> None:
+ with pytest.raises(ValueError, match="multiple URLs"):
+ parse_connect_command_text("https://one.example,https://two.example")
+
+
+def test_infer_server_name_handles_localhost_urls() -> None:
+ request = parse_connect_command_text("http://localhost:8080/api")
+ assert infer_server_name(request.target).startswith("localhost_8080_")
+
+
+def test_build_server_config_from_target_handles_scoped_package() -> None:
+ request = parse_connect_command_text("@modelcontextprotocol/server-filesystem .")
+
+ resolved_name, settings = build_server_config_from_target(request.target)
+
+ assert resolved_name == "server-filesystem"
+ assert settings.transport == "stdio"
+ assert settings.command == "npx"
+ assert settings.args == ["@modelcontextprotocol/server-filesystem", "."]
+
+
+def test_normalize_connect_config_target_rejects_embedded_fast_agent_flags() -> None:
+ with pytest.raises(ValueError, match="pure target string"):
+ normalize_connect_config_target(
+ target="https://demo.hf.space --auth token",
+ source_path="mcp.targets[0].target",
+ )
+
+
+def test_normalize_connect_config_target_allows_stdio_flags_in_target_args() -> None:
+ normalized_target, overrides = normalize_connect_config_target(
+ target="python server.py --timeout 30 --name workspace",
+ source_path="mcp.targets[0].target",
+ )
+
+ assert overrides == {}
+ assert normalized_target.mode == "stdio"
+ assert normalized_target.command == "python"
+ assert normalized_target.args == ("server.py", "--timeout", "30", "--name", "workspace")
+
+
+def test_normalize_connect_config_target_accepts_single_quoted_args_on_windows(
+ monkeypatch,
+) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ normalized_target, _overrides = normalize_connect_config_target(
+ target="python -c 'print(1)'",
+ source_path="mcp.targets[0].target",
+ )
+
+ assert normalized_target.mode == "stdio"
+ assert normalized_target.command == "python"
+ assert normalized_target.args == ("-c", "print(1)")
+
+
+def test_infer_server_name_accepts_single_quoted_args_on_windows(monkeypatch) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ assert infer_server_name("python -c 'print(1)'") == "python"
+
+
+def test_build_server_config_from_target_accepts_single_quoted_args_on_windows(
+ monkeypatch,
+) -> None:
+ _force_windows_commandline(monkeypatch)
+
+ resolved_name, settings = build_server_config_from_target("python -c 'print(1)'")
+
+ assert resolved_name == "python"
+ assert settings.command == "python"
+ assert settings.args == ["-c", "print(1)"]
diff --git a/tests/unit/fast_agent/test_config_mcp_target_shorthand.py b/tests/unit/fast_agent/test_config_mcp_target_shorthand.py
index 7c93a2105..9f053603d 100644
--- a/tests/unit/fast_agent/test_config_mcp_target_shorthand.py
+++ b/tests/unit/fast_agent/test_config_mcp_target_shorthand.py
@@ -105,10 +105,10 @@ def test_config_mcp_targets_list_derives_server_aliases() -> None:
)
assert settings.mcp is not None
- assert "demo-hf-space" in settings.mcp.servers
+ assert "demo_hf_space" in settings.mcp.servers
assert "server-filesystem" in settings.mcp.servers
- remote = settings.mcp.servers["demo-hf-space"]
+ remote = settings.mcp.servers["demo_hf_space"]
assert remote.transport == "http"
assert remote.url == "https://demo.hf.space/mcp"
diff --git a/tests/unit/fast_agent/ui/test_command_intent_contract.py b/tests/unit/fast_agent/ui/test_command_intent_contract.py
index 6ae78d80e..38b175628 100644
--- a/tests/unit/fast_agent/ui/test_command_intent_contract.py
+++ b/tests/unit/fast_agent/ui/test_command_intent_contract.py
@@ -15,7 +15,7 @@
)
from fast_agent.ui.prompt import parse_special_input
-type ExpectedParseResult = str | CommandPayload
+type ExpectedParseResult = str | CommandPayload | dict[str, object]
@pytest.mark.parametrize(
@@ -75,62 +75,46 @@
),
pytest.param(
"/connect https://example.com/mcp",
- McpConnectCommand(
- target_text="https://example.com/mcp",
- parsed_mode="url",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=None,
- ),
+ {
+ "kind": "mcp_connect",
+ "target_text": "https://example.com/mcp",
+ "parsed_mode": "url",
+ "server_name": None,
+ "error": None,
+ },
id="connect-alias-url",
),
pytest.param(
"/connect @modelcontextprotocol/server-everything",
- McpConnectCommand(
- target_text="@modelcontextprotocol/server-everything",
- parsed_mode="npx",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=None,
- ),
+ {
+ "kind": "mcp_connect",
+ "target_text": "@modelcontextprotocol/server-everything",
+ "parsed_mode": "npx",
+ "server_name": None,
+ "error": None,
+ },
id="connect-alias-npx-scoped-package",
),
pytest.param(
"/connect uvx demo-server",
- McpConnectCommand(
- target_text="uvx demo-server",
- parsed_mode="uvx",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=None,
- ),
+ {
+ "kind": "mcp_connect",
+ "target_text": "uvx demo-server",
+ "parsed_mode": "uvx",
+ "server_name": None,
+ "error": None,
+ },
id="connect-alias-uvx",
),
pytest.param(
"/connect python demo_server.py",
- McpConnectCommand(
- target_text="python demo_server.py",
- parsed_mode="stdio",
- server_name=None,
- auth_token=None,
- timeout_seconds=None,
- trigger_oauth=None,
- reconnect_on_disconnect=None,
- force_reconnect=False,
- error=None,
- ),
+ {
+ "kind": "mcp_connect",
+ "target_text": "python demo_server.py",
+ "parsed_mode": "stdio",
+ "server_name": None,
+ "error": None,
+ },
id="connect-alias-stdio",
),
pytest.param(
@@ -159,4 +143,13 @@ def test_parse_special_input_intent_contract(
raw_input: str,
expected: ExpectedParseResult,
) -> None:
- assert parse_special_input(raw_input) == expected
+ actual = parse_special_input(raw_input)
+ if isinstance(expected, dict):
+ assert isinstance(actual, McpConnectCommand)
+ assert actual.kind == expected["kind"]
+ assert actual.target_text == expected["target_text"]
+ assert actual.parsed_mode == expected["parsed_mode"]
+ assert actual.server_name == expected["server_name"]
+ assert actual.error == expected["error"]
+ return
+ assert actual == expected
diff --git a/tests/unit/fast_agent/ui/test_hash_agent_command.py b/tests/unit/fast_agent/ui/test_hash_agent_command.py
index 01dd0304a..06a08cd1d 100644
--- a/tests/unit/fast_agent/ui/test_hash_agent_command.py
+++ b/tests/unit/fast_agent/ui/test_hash_agent_command.py
@@ -17,13 +17,10 @@ def test_parse_hash_agent_preserves_message_spaces(self):
assert result.message == "this is a long message"
assert result.quiet is False
- def test_parse_hash_agent_strips_agent_name(self):
- """Test that agent name is stripped of whitespace."""
+ def test_parse_hash_with_space_after_prefix_is_plain_text(self):
+ """Headings and spaced hashes should stay as text."""
result = parse_special_input("# agent_name message")
- assert isinstance(result, HashAgentCommand)
- assert result.agent_name == "agent_name"
- assert result.message == "message"
- assert result.quiet is False
+ assert result == "# agent_name message"
def test_parse_hash_only_returns_plain_text(self):
"""Test that # alone returns original text."""
@@ -41,6 +38,14 @@ def test_parse_quiet_hash_with_space_returns_plain_text(self):
result = parse_special_input("## heading")
assert result == "## heading"
+ def test_parse_heading_returns_plain_text(self):
+ result = parse_special_input("# Heading")
+ assert result == "# Heading"
+
+ def test_parse_multiline_heading_returns_plain_text(self):
+ result = parse_special_input("# heading\nmore")
+ assert result == "# heading\nmore"
+
def test_parse_hash_agent_multiline_message(self):
"""Test parsing with newlines in message."""
result = parse_special_input("#agent line1\nline2")
diff --git a/tests/unit/fast_agent/ui/test_parse_mcp_commands.py b/tests/unit/fast_agent/ui/test_parse_mcp_commands.py
index 3986f81dd..e2143b680 100644
--- a/tests/unit/fast_agent/ui/test_parse_mcp_commands.py
+++ b/tests/unit/fast_agent/ui/test_parse_mcp_commands.py
@@ -43,6 +43,22 @@ def test_parse_mcp_connect_preserves_quoted_target_arguments() -> None:
assert result.server_name == "docs"
+def test_parse_mcp_connect_preserves_quoted_windows_path() -> None:
+ result = parse_special_input('/mcp connect "C:\\Program Files\\Tool\\tool.exe" --flag')
+ assert isinstance(result, McpConnectCommand)
+ assert result.request is not None
+ assert result.request.target.command == "C:\\Program Files\\Tool\\tool.exe"
+ assert result.request.target.args == ("--flag",)
+
+
+def test_connect_alias_matches_mcp_connect() -> None:
+ alias = parse_special_input('/connect demo-server --root "My Folder" --name docs')
+ explicit = parse_special_input('/mcp connect demo-server --root "My Folder" --name docs')
+ assert isinstance(alias, McpConnectCommand)
+ assert isinstance(explicit, McpConnectCommand)
+ assert alias.request == explicit.request
+
+
def test_parse_mcp_disconnect() -> None:
result = parse_special_input("/mcp disconnect local")
assert isinstance(result, McpDisconnectCommand)
diff --git a/tests/unit/fast_agent/utils/test_commandline.py b/tests/unit/fast_agent/utils/test_commandline.py
new file mode 100644
index 000000000..97f176b75
--- /dev/null
+++ b/tests/unit/fast_agent/utils/test_commandline.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+from fast_agent.utils.commandline import join_commandline, split_commandline
+
+
+def test_split_commandline_posix_preserves_spaces() -> None:
+ assert split_commandline('demo --root "My Folder"', syntax="posix") == [
+ "demo",
+ "--root",
+ "My Folder",
+ ]
+
+
+def test_join_commandline_posix_quotes_spaces() -> None:
+ rendered = join_commandline(["demo", "--root", "My Folder"], syntax="posix")
+ assert split_commandline(rendered, syntax="posix") == ["demo", "--root", "My Folder"]
+
+
+def test_split_commandline_windows_preserves_quoted_path() -> None:
+ text = '"C:\\Program Files\\Tool\\tool.exe" --flag'
+ assert split_commandline(text, syntax="windows") == [
+ "C:\\Program Files\\Tool\\tool.exe",
+ "--flag",
+ ]
+
+
+def test_join_commandline_windows_round_trips_unc_path_and_empty_arg() -> None:
+ argv = [r"\\server\share\tool.exe", "", "--flag"]
+ rendered = join_commandline(argv, syntax="windows")
+ assert split_commandline(rendered, syntax="windows") == argv
+
+
+def test_split_commandline_windows_handles_backslashes() -> None:
+ text = 'tool.exe "C:\\tmp\\path with spaces\\\\"'
+ assert split_commandline(text, syntax="windows") == [
+ "tool.exe",
+ "C:\\tmp\\path with spaces\\",
+ ]
From fddb6a4c6101c230d3d07ed4a8ee04f4db56b080 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Tue, 24 Mar 2026 17:15:56 +0000
Subject: [PATCH 2/9] add mslex
---
pyproject.toml | 1 +
uv.lock | 11 +++++++++++
2 files changed, 12 insertions(+)
diff --git a/pyproject.toml b/pyproject.toml
index 9a70a5460..b563bcb06 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,6 +45,7 @@ dependencies = [
"uvloop>=0.22.1; platform_system != 'Windows'",
"multilspy>=0.0.15",
"ruamel.yaml>=0.18.16",
+ "mslex>=1.3.0",
]
[project.optional-dependencies]
diff --git a/uv.lock b/uv.lock
index 152701702..684e4a7b1 100644
--- a/uv.lock
+++ b/uv.lock
@@ -722,6 +722,7 @@ dependencies = [
{ name = "google-genai" },
{ name = "keyring" },
{ name = "mcp" },
+ { name = "mslex" },
{ name = "multilspy" },
{ name = "openai", extra = ["aiohttp"] },
{ name = "opentelemetry-distro" },
@@ -796,6 +797,7 @@ requires-dist = [
{ name = "google-genai", specifier = ">=1.66.0" },
{ name = "keyring", specifier = ">=24.3.1" },
{ name = "mcp", specifier = "==1.26.0" },
+ { name = "mslex", specifier = ">=1.3.0" },
{ name = "multilspy", specifier = ">=0.0.15" },
{ name = "openai", extras = ["aiohttp"], specifier = ">=2.28.0" },
{ name = "opentelemetry-distro", specifier = "==0.60b1" },
@@ -1584,6 +1586,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" },
]
+[[package]]
+name = "mslex"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e0/97/7022667073c99a0fe028f2e34b9bf76b49a611afd21b02527fbfd92d4cd5/mslex-1.3.0.tar.gz", hash = "sha256:641c887d1d3db610eee2af37a8e5abda3f70b3006cdfd2d0d29dc0d1ae28a85d", size = 11583, upload-time = "2024-10-16T13:16:18.523Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/64/f2/66bd65ca0139675a0d7b18f0bada6e12b51a984e41a76dbe44761bf1b3ee/mslex-1.3.0-py3-none-any.whl", hash = "sha256:c7074b347201b3466fc077c5692fbce9b5f62a63a51f537a53fbbd02eff2eea4", size = 7820, upload-time = "2024-10-16T13:16:17.566Z" },
+]
+
[[package]]
name = "multidict"
version = "6.7.0"
From 447d874a571ff96fa0fa64af5249aa8e1678153b Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Tue, 24 Mar 2026 23:10:11 +0000
Subject: [PATCH 3/9] attachment in tui, anthropic via vertex provider.
---
pyproject.toml | 4 +-
src/fast_agent/config.py | 10 +
src/fast_agent/llm/model_display_name.py | 19 +-
src/fast_agent/llm/model_factory.py | 18 ++
.../llm/provider/anthropic/llm_anthropic.py | 110 +++++++++--
.../llm/provider/anthropic/vertex_config.py | 164 +++++++++++++++
.../llm/provider/google/llm_google_native.py | 12 +-
src/fast_agent/llm/provider_key_manager.py | 22 ++-
src/fast_agent/llm/resolved_model.py | 2 +
src/fast_agent/ui/attachment_indicator.py | 79 ++++++++
src/fast_agent/ui/command_payloads.py | 9 +
.../ui/interactive/command_dispatch.py | 49 +++++
src/fast_agent/ui/interactive_prompt.py | 11 +-
src/fast_agent/ui/message_display_helpers.py | 30 ++-
src/fast_agent/ui/model_chip_display.py | 8 +-
src/fast_agent/ui/model_picker.py | 31 ++-
src/fast_agent/ui/model_picker_common.py | 186 +++++++++++++-----
src/fast_agent/ui/prompt/attachment_tokens.py | 103 ++++++++++
src/fast_agent/ui/prompt/command_help.py | 2 +
src/fast_agent/ui/prompt/completer.py | 53 +++++
.../ui/prompt/completion_sources.py | 63 ++++++
src/fast_agent/ui/prompt/input.py | 9 +-
src/fast_agent/ui/prompt/input_toolbar.py | 32 ++-
src/fast_agent/ui/prompt/keybindings.py | 13 ++
src/fast_agent/ui/prompt/parser.py | 17 ++
src/fast_agent/ui/prompt/resource_mentions.py | 54 ++++-
.../ui/test_command_dispatch_flows.py | 54 ++++-
tests/support/command_surface.py | 2 +
.../llm/provider/anthropic/test_vertex.py | 153 ++++++++++++++
.../llm/providers/test_llm_google_vertex.py | 44 +++++
.../unit/fast_agent/llm/test_model_factory.py | 21 ++
.../llm/test_model_selection_catalog.py | 24 +++
.../fast_agent/ui/test_agent_completer.py | 37 ++++
.../ui/test_attachment_indicator.py | 59 ++++++
.../fast_agent/ui/test_attachment_tokens.py | 19 ++
.../ui/test_command_intent_contract.py | 16 ++
.../unit/fast_agent/ui/test_input_toolbar.py | 25 +++
...st_interactive_prompt_resource_mentions.py | 66 +++++++
.../ui/test_message_display_helpers.py | 38 +++-
.../fast_agent/ui/test_model_chip_display.py | 4 +-
.../unit/fast_agent/ui/test_model_display.py | 22 +++
tests/unit/fast_agent/ui/test_model_picker.py | 69 +++++++
.../fast_agent/ui/test_resource_mentions.py | 48 ++++-
uv.lock | 23 ++-
44 files changed, 1719 insertions(+), 115 deletions(-)
create mode 100644 src/fast_agent/llm/provider/anthropic/vertex_config.py
create mode 100644 src/fast_agent/ui/attachment_indicator.py
create mode 100644 src/fast_agent/ui/prompt/attachment_tokens.py
create mode 100644 tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
create mode 100644 tests/unit/fast_agent/ui/test_attachment_indicator.py
create mode 100644 tests/unit/fast_agent/ui/test_attachment_tokens.py
diff --git a/pyproject.toml b/pyproject.toml
index b563bcb06..e39ba3836 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,8 +22,8 @@ dependencies = [
"pyyaml>=6.0.2",
"rich>=14.3.3",
"typer>=0.24.1",
- "anthropic>=0.84.0",
- "openai[aiohttp]>=2.28.0",
+ "anthropic[vertex]>=0.86.0",
+ "openai[aiohttp]>=2.29.0",
"prompt-toolkit>=3.0.52",
"aiohttp>=3.13.2",
"opentelemetry-distro==0.60b1",
diff --git a/src/fast_agent/config.py b/src/fast_agent/config.py
index 2a092ccfa..257dbb8ec 100644
--- a/src/fast_agent/config.py
+++ b/src/fast_agent/config.py
@@ -659,6 +659,15 @@ def _validate_domain_xor(self) -> "AnthropicWebFetchSettings":
return self
+class AnthropicVertexSettings(BaseModel):
+ """Anthropic-on-Vertex configuration."""
+
+ enabled: bool = False
+ project_id: str | None = None
+ location: str | None = None
+ base_url: str | None = None
+
+
class AnthropicSettings(BaseModel):
"""Settings for using Anthropic models in the fast-agent application."""
@@ -690,6 +699,7 @@ class AnthropicSettings(BaseModel):
default="auto",
description="Structured output mode: auto, json, or tool_use",
)
+ vertex_ai: AnthropicVertexSettings = Field(default_factory=AnthropicVertexSettings)
web_search: AnthropicWebSearchSettings = Field(default_factory=AnthropicWebSearchSettings)
web_fetch: AnthropicWebFetchSettings = Field(default_factory=AnthropicWebFetchSettings)
diff --git a/src/fast_agent/llm/model_display_name.py b/src/fast_agent/llm/model_display_name.py
index 67c269c94..5ccaac2ae 100644
--- a/src/fast_agent/llm/model_display_name.py
+++ b/src/fast_agent/llm/model_display_name.py
@@ -1,6 +1,7 @@
from __future__ import annotations
from typing import TYPE_CHECKING
+from urllib.parse import parse_qs, urlsplit
if TYPE_CHECKING:
from fast_agent.llm.resolved_model import ResolvedModelSpec
@@ -40,6 +41,12 @@ def resolve_resolved_model_display_name(
or resolved_model.wire_model_name
)
+ if (
+ resolved_model.provider.value == "anthropic"
+ and resolved_model.model_config.via == "vertex"
+ ):
+ display = f"{display} · Vertex"
+
if max_len is not None and len(display) > max_len:
return display[: max_len - 1] + "…"
return display
@@ -67,4 +74,14 @@ def resolve_model_display_name(
resolved_display = resolve_llm_display_name(llm, max_len=max_len)
if resolved_display is not None:
return resolved_display
- return format_model_display_name(model, max_len=max_len)
+ display = format_model_display_name(model)
+ if display is None:
+ return None
+ if model:
+ query = parse_qs(urlsplit(model).query)
+ via_values = query.get("via") or query.get("source") or []
+ if via_values and via_values[-1].strip().lower() == "vertex":
+ display = f"{display} · Vertex"
+ if max_len is not None and len(display) > max_len:
+ return display[: max_len - 1] + "…"
+ return display
diff --git a/src/fast_agent/llm/model_factory.py b/src/fast_agent/llm/model_factory.py
index f2421fb24..1a64815e0 100644
--- a/src/fast_agent/llm/model_factory.py
+++ b/src/fast_agent/llm/model_factory.py
@@ -14,6 +14,7 @@
from fast_agent.llm.internal.slow import SlowLLM
from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.model_overlays import load_model_overlay_registry
+from fast_agent.llm.provider.anthropic.vertex_config import AnthropicRoute
from fast_agent.llm.provider_types import Provider
from fast_agent.llm.reasoning_effort import ReasoningEffortSetting, parse_reasoning_setting
from fast_agent.llm.resolved_model import ResolvedModelSpec, resolve_base_model_params
@@ -35,6 +36,7 @@ class ModelConfig(BaseModel):
provider: Provider
model_name: str
+ via: AnthropicRoute | None = None
reasoning_effort: ReasoningEffortSetting | None = None
text_verbosity: TextVerbosityLevel | None = None
structured_output_mode: StructuredOutputMode | None = None
@@ -55,6 +57,7 @@ class ModelConfig(BaseModel):
class ModelQueryOverrides:
"""Typed query overrides parsed from a model spec query string."""
+ via: AnthropicRoute | None = None
reasoning_effort: ReasoningEffortSetting | None = None
instant: bool | None = None
text_verbosity: TextVerbosityLevel | None = None
@@ -74,6 +77,7 @@ class ModelQueryOverrides:
def with_defaults(self, defaults: Self) -> "ModelQueryOverrides":
"""Return a copy with unset values filled from defaults."""
return ModelQueryOverrides(
+ via=self.via if self.via is not None else defaults.via,
reasoning_effort=(
self.reasoning_effort
if self.reasoning_effort is not None
@@ -128,6 +132,7 @@ def to_model_config(self) -> ModelConfig:
return ModelConfig(
provider=self.provider,
model_name=self.model_name,
+ via=self.query_overrides.via,
reasoning_effort=self.reasoning_effort,
text_verbosity=self.query_overrides.text_verbosity,
structured_output_mode=self.query_overrides.structured_output_mode,
@@ -221,6 +226,7 @@ def _parse_query_overrides(
model_spec: str,
) -> ModelQueryOverrides:
reasoning_effort: ReasoningEffortSetting | None = None
+ via: AnthropicRoute | None = None
text_verbosity: TextVerbosityLevel | None = None
structured_output_mode: StructuredOutputMode | None = None
instant: bool | None = None
@@ -239,6 +245,13 @@ def _parse_query_overrides(
)
reasoning_effort = parsed_reasoning
+ route_values = _collect_query_values(query_params, ("via", "source"))
+ if route_values:
+ raw_value = route_values[-1].strip().lower()
+ if raw_value not in {"direct", "vertex"}:
+ raise ModelConfigError(f"Invalid via query value: '{raw_value}' in '{model_spec}'")
+ via = "vertex" if raw_value == "vertex" else "direct"
+
if "verbosity" in query_params:
raw_value = _collect_query_values(query_params, ("verbosity",))[-1]
parsed_verbosity = parse_text_verbosity(raw_value)
@@ -305,6 +318,7 @@ def _parse_query_overrides(
web_fetch = _parse_on_off_query(raw_value, "web_fetch", model_spec)
return ModelQueryOverrides(
+ via=via,
reasoning_effort=reasoning_effort,
instant=instant,
text_verbosity=text_verbosity,
@@ -678,6 +692,10 @@ def parse_model_spec(
_validate_transport_constraints(provider, model_name, merged_overrides.transport)
_validate_service_tier_constraints(provider, model_name, merged_overrides.service_tier)
+ if merged_overrides.via is not None and provider != Provider.ANTHROPIC:
+ raise ModelConfigError(
+ f"Query parameter 'via' is only supported for Anthropic models, got '{expanded_model_spec}'."
+ )
return ParsedModelSpec(
raw_input=raw_input,
diff --git a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
index bfb2d028f..b9f101c8b 100644
--- a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
+++ b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
@@ -8,7 +8,13 @@
from pathlib import Path
from typing import Any, Mapping, Sequence, Type, Union, cast
-from anthropic import APIError, AsyncAnthropic, AuthenticationError, transform_schema
+from anthropic import (
+ APIError,
+ AsyncAnthropic,
+ AsyncAnthropicVertex,
+ AuthenticationError,
+ transform_schema,
+)
from anthropic.lib.streaming import BetaAsyncMessageStream
from mcp import Tool
from mcp.types import (
@@ -67,6 +73,14 @@
from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
AnthropicConverter,
)
+from fast_agent.llm.provider.anthropic.vertex_config import (
+ AnthropicRoute,
+ anthropic_vertex_config,
+ detect_google_adc,
+ resolve_anthropic_route,
+ resolve_anthropic_vertex_location,
+ resolve_anthropic_vertex_project_id,
+)
from fast_agent.llm.provider.anthropic.web_tools import (
build_web_tool_params,
dedupe_preserve_order,
@@ -354,11 +368,17 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
def __init__(self, **kwargs) -> None:
# Initialize logger - keep it simple without name reference
kwargs.pop("provider", None)
+ route_override = kwargs.pop("via", None)
structured_override = kwargs.pop("structured_output_mode", None)
long_context_requested = kwargs.pop("long_context", False)
web_search_override = kwargs.pop("web_search", None)
web_fetch_override = kwargs.pop("web_fetch", None)
super().__init__(provider=Provider.ANTHROPIC, **kwargs)
+ self._route_override: AnthropicRoute | None = (
+ cast("AnthropicRoute | None", route_override)
+ if route_override in {"direct", "vertex"}
+ else None
+ )
self._structured_output_mode_override: StructuredOutputMode | None = structured_override
self._web_search_override: bool | None = (
bool(web_search_override) if isinstance(web_search_override, bool) else None
@@ -486,6 +506,8 @@ def _list_supported_long_context_models(self) -> list[str]:
def _provider_base_url(self) -> str | None:
assert self.context.config
+ if self._anthropic_route() == "vertex":
+ return self._vertex_cfg().base_url
return self.context.config.anthropic.base_url if self.context.config.anthropic else None
def _provider_default_headers(self) -> dict[str, str] | None:
@@ -495,6 +517,80 @@ def _provider_default_headers(self) -> dict[str, str] | None:
self.context.config.anthropic.default_headers if self.context.config.anthropic else None
)
+ def _provider_api_key(self):
+ from fast_agent.llm.provider_key_manager import ProviderKeyManager
+
+ return ProviderKeyManager.get_api_key(
+ self.provider.config_name,
+ self.context.config,
+ route_hint=self._anthropic_route(),
+ )
+
+ def _vertex_cfg(self):
+ return anthropic_vertex_config(getattr(self.context, "config", None))
+
+ def _anthropic_route(self) -> AnthropicRoute:
+ explicit_route = self._route_override
+ if explicit_route is None:
+ resolved = self._resolved_model_spec
+ if resolved is not None:
+ explicit_route = resolved.model_config.via
+ return resolve_anthropic_route(
+ getattr(self.context, "config", None),
+ explicit_route=explicit_route,
+ )
+
+ def _vertex_project_id(self) -> str:
+ project_id = resolve_anthropic_vertex_project_id(getattr(self.context, "config", None))
+ if project_id is None:
+ raise ProviderKeyError(
+ "Google Cloud project not configured",
+ "Set anthropic.vertex_ai.project_id or configure "
+ "GOOGLE_CLOUD_PROJECT before using Anthropic via Vertex.",
+ )
+ return project_id
+
+ def _vertex_location(self) -> str:
+ location = resolve_anthropic_vertex_location(getattr(self.context, "config", None))
+ if location is None:
+ raise ProviderKeyError(
+ "Google Cloud location not configured",
+ "Set anthropic.vertex_ai.location before using Anthropic via Vertex.",
+ )
+ return location
+
+ def _vertex_credentials(self) -> object:
+ adc_status = detect_google_adc()
+ if not adc_status.available or adc_status.credentials is None:
+ raise ProviderKeyError(
+ "Google ADC not found",
+ "Anthropic via Vertex uses Google Application Default Credentials.\n"
+ "Run `gcloud auth application-default login` or configure a service account.",
+ )
+ return adc_status.credentials
+
+ def _initialize_anthropic_client(self) -> AsyncAnthropic | AsyncAnthropicVertex:
+ base_url = self._base_url()
+ default_headers = self._default_headers()
+
+ if self._anthropic_route() == "vertex":
+ return AsyncAnthropicVertex(
+ project_id=self._vertex_project_id(),
+ region=self._vertex_location(),
+ credentials=cast("Any", self._vertex_credentials()),
+ base_url=base_url,
+ default_headers=default_headers,
+ )
+
+ api_key = self._api_key()
+ if base_url and base_url.endswith("/v1"):
+ base_url = base_url.rstrip("/v1")
+ return AsyncAnthropic(
+ api_key=api_key,
+ base_url=base_url,
+ default_headers=default_headers,
+ )
+
def _get_cache_mode(self) -> str:
"""Get the cache mode configuration."""
cache_mode = "auto" # Default to auto
@@ -1320,7 +1416,7 @@ def _apply_anthropic_cache_plan(
async def _execute_anthropic_stream(
self,
*,
- anthropic: AsyncAnthropic,
+ anthropic: AsyncAnthropic | AsyncAnthropicVertex,
arguments: dict[str, Any],
model: str,
capture_filename: Path | None,
@@ -1566,16 +1662,8 @@ async def _anthropic_completion(
Override this method to use a different LLM.
"""
- api_key = self._api_key()
- base_url = self._base_url()
- if base_url and base_url.endswith("/v1"):
- base_url = base_url.rstrip("/v1")
- default_headers = self._default_headers()
-
try:
- anthropic = AsyncAnthropic(
- api_key=api_key, base_url=base_url, default_headers=default_headers
- )
+ anthropic = self._initialize_anthropic_client()
params = self.get_request_params(request_params)
messages = self._build_request_messages(
params, message_param, pre_messages, history=history
diff --git a/src/fast_agent/llm/provider/anthropic/vertex_config.py b/src/fast_agent/llm/provider/anthropic/vertex_config.py
new file mode 100644
index 000000000..2ea499394
--- /dev/null
+++ b/src/fast_agent/llm/provider/anthropic/vertex_config.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import os
+from collections.abc import Mapping
+from dataclasses import dataclass
+from typing import Any, Literal
+
+AnthropicRoute = Literal["direct", "vertex"]
+
+_VERTEX_PROJECT_ENV_VARS: tuple[str, ...] = (
+ "ANTHROPIC_VERTEX_PROJECT_ID",
+ "GOOGLE_CLOUD_PROJECT",
+ "GOOGLE_PROJECT_ID",
+ "GCLOUD_PROJECT",
+ "GCP_PROJECT",
+)
+_VERTEX_LOCATION_ENV_VARS: tuple[str, ...] = (
+ "ANTHROPIC_VERTEX_LOCATION",
+ "GOOGLE_CLOUD_LOCATION",
+ "GOOGLE_CLOUD_REGION",
+ "CLOUD_ML_REGION",
+ "VERTEX_REGION",
+)
+_CLOUD_PLATFORM_SCOPE = "https://www.googleapis.com/auth/cloud-platform"
+
+
+@dataclass(frozen=True, slots=True)
+class AnthropicVertexConfig:
+ enabled: bool = False
+ project_id: str | None = None
+ location: str | None = None
+ base_url: str | None = None
+
+
+@dataclass(frozen=True, slots=True)
+class GoogleAdcStatus:
+ available: bool
+ project_id: str | None = None
+ error: Exception | None = None
+ credentials: object | None = None
+
+
+def _get_value(source: Any, key: str) -> Any:
+ if source is None:
+ return None
+ if isinstance(source, Mapping):
+ return source.get(key)
+ return getattr(source, key, None)
+
+
+def _clean_str(value: Any) -> str | None:
+ if not isinstance(value, str):
+ return None
+ stripped = value.strip()
+ return stripped or None
+
+
+def anthropic_vertex_source(config: Any) -> Any:
+ anthropic_cfg = _get_value(config, "anthropic")
+ return _get_value(anthropic_cfg, "vertex_ai")
+
+
+def anthropic_vertex_config(config: Any) -> AnthropicVertexConfig:
+ source = anthropic_vertex_source(config)
+ if source is None:
+ return AnthropicVertexConfig()
+
+ return AnthropicVertexConfig(
+ enabled=bool(_get_value(source, "enabled")),
+ project_id=_clean_str(_get_value(source, "project_id")),
+ location=_clean_str(_get_value(source, "location")),
+ base_url=_clean_str(_get_value(source, "base_url")),
+ )
+
+
+def anthropic_vertex_intent(config: Any) -> bool:
+ cfg = anthropic_vertex_config(config)
+ return bool(cfg.enabled or cfg.project_id or cfg.location or cfg.base_url)
+
+
+def anthropic_vertex_enabled(config: Any) -> bool:
+ return anthropic_vertex_config(config).enabled
+
+
+def detect_google_adc() -> GoogleAdcStatus:
+ try:
+ import google.auth
+
+ credentials, project_id = google.auth.default(scopes=[_CLOUD_PLATFORM_SCOPE])
+ except Exception as exc: # pragma: no cover - exercised via callers
+ return GoogleAdcStatus(available=False, error=exc)
+
+ return GoogleAdcStatus(
+ available=True,
+ project_id=_clean_str(project_id),
+ credentials=credentials,
+ )
+
+
+def resolve_anthropic_vertex_project_id(
+ config: Any,
+ *,
+ adc_status: GoogleAdcStatus | None = None,
+) -> str | None:
+ cfg = anthropic_vertex_config(config)
+ if cfg.project_id is not None:
+ return cfg.project_id
+
+ for env_var in _VERTEX_PROJECT_ENV_VARS:
+ value = _clean_str(os.getenv(env_var))
+ if value is not None:
+ return value
+
+ if adc_status is None:
+ adc_status = detect_google_adc()
+ return adc_status.project_id
+
+
+def resolve_anthropic_vertex_location(config: Any) -> str | None:
+ cfg = anthropic_vertex_config(config)
+ if cfg.location is not None:
+ return cfg.location
+
+ for env_var in _VERTEX_LOCATION_ENV_VARS:
+ value = _clean_str(os.getenv(env_var))
+ if value is not None:
+ return value
+
+ return "global"
+
+
+def resolve_anthropic_route(
+ config: Any,
+ *,
+ explicit_route: AnthropicRoute | None = None,
+) -> AnthropicRoute:
+ if explicit_route is not None:
+ return explicit_route
+ return "direct"
+
+
+def anthropic_vertex_ready(
+ config: Any,
+ *,
+ adc_status: GoogleAdcStatus | None = None,
+) -> tuple[bool, str | None]:
+ if not anthropic_vertex_intent(config):
+ return (False, None)
+
+ if adc_status is None:
+ adc_status = detect_google_adc()
+
+ project_id = resolve_anthropic_vertex_project_id(config, adc_status=adc_status)
+ if project_id is None:
+ return (
+ False,
+ "Google Cloud project not found",
+ )
+ if not adc_status.available:
+ return (
+ False,
+ "Google ADC not found",
+ )
+ return (True, None)
diff --git a/src/fast_agent/llm/provider/google/llm_google_native.py b/src/fast_agent/llm/provider/google/llm_google_native.py
index 103cb5bbc..dd3bfdd89 100644
--- a/src/fast_agent/llm/provider/google/llm_google_native.py
+++ b/src/fast_agent/llm/provider/google/llm_google_native.py
@@ -35,6 +35,7 @@
# Define default model and potentially other Google-specific defaults
DEFAULT_GOOGLE_MODEL = "gemini3"
+_GOOGLE_VERTEX_PARTNER_MODEL_PREFIXES = ("claude",)
# Define Google-specific parameter exclusions if necessary
@@ -184,8 +185,11 @@ def _resolve_model_name(self, model: str) -> str:
* If the caller passes a full publisher resource name, it is respected as-is.
* If Vertex is not enabled, the short id is returned unchanged (Developer API path).
- * If Vertex is enabled and the id contains '-preview-', the suffix is stripped so that
- e.g. 'gemini-2.5-flash-preview-09-2025' becomes 'gemini-2.5-flash'.
+ * If Vertex is enabled, short first-party Google model ids are expanded under
+ `publishers/google`, applying a preview→base fallback so that e.g.
+ 'gemini-2.5-flash-preview-09-2025' becomes 'gemini-2.5-flash'.
+ * Known partner model ids such as Anthropic Claude are left untouched so Vertex can
+ resolve them using the provider-native short model name from the docs.
"""
# Fully-qualified publisher / model resource: do not rewrite.
if model.startswith(("projects/", "publishers/")) or "/publishers/" in model:
@@ -196,6 +200,10 @@ def _resolve_model_name(self, model: str) -> str:
if not (enabled and project_id and location):
return model
+ normalized = model.strip().lower()
+ if normalized.startswith(_GOOGLE_VERTEX_PARTNER_MODEL_PREFIXES):
+ return model
+
# Vertex path: strip any '-preview-…' suffix to fall back to the base model id.
base_model = model.split("-preview-", 1)[0] if "-preview-" in model else model
diff --git a/src/fast_agent/llm/provider_key_manager.py b/src/fast_agent/llm/provider_key_manager.py
index 74d98b039..46e2377e6 100644
--- a/src/fast_agent/llm/provider_key_manager.py
+++ b/src/fast_agent/llm/provider_key_manager.py
@@ -9,6 +9,7 @@
from pydantic import BaseModel
from fast_agent.core.exceptions import ProviderKeyError
+from fast_agent.llm.provider.anthropic.vertex_config import AnthropicRoute, resolve_anthropic_route
from fast_agent.utils.huggingface_hub import get_huggingface_hub_token
PROVIDER_ENVIRONMENT_MAP: dict[str, str] = {
@@ -74,7 +75,12 @@ def _get_provider_config_keys(provider_name: str) -> list[str]:
return keys
@staticmethod
- def get_api_key(provider_name: str, config: Any) -> str:
+ def get_api_key(
+ provider_name: str,
+ config: Any,
+ *,
+ route_hint: str | None = None,
+ ) -> str:
"""
Gets the API key for the specified provider.
@@ -117,6 +123,20 @@ def get_api_key(provider_name: str, config: Any) -> str:
except Exception:
pass
+ if provider_name == "anthropic":
+ try:
+ explicit_route: AnthropicRoute | None
+ if route_hint == "direct":
+ explicit_route = "direct"
+ elif route_hint == "vertex":
+ explicit_route = "vertex"
+ else:
+ explicit_route = None
+ if resolve_anthropic_route(config, explicit_route=explicit_route) == "vertex":
+ return ""
+ except Exception:
+ pass
+
api_key = ProviderKeyManager.get_config_file_key(provider_name, config)
if not api_key:
api_key = ProviderKeyManager.get_env_var(provider_name)
diff --git a/src/fast_agent/llm/resolved_model.py b/src/fast_agent/llm/resolved_model.py
index 9d3c4b4c9..5490002a6 100644
--- a/src/fast_agent/llm/resolved_model.py
+++ b/src/fast_agent/llm/resolved_model.py
@@ -211,6 +211,8 @@ def build_llm_kwargs(self) -> dict[str, object]:
config = self.model_config
kwargs: dict[str, object] = {}
+ if config.via is not None and self.provider == Provider.ANTHROPIC:
+ kwargs["via"] = config.via
if config.reasoning_effort:
kwargs["reasoning_effort"] = config.reasoning_effort
if config.text_verbosity:
diff --git a/src/fast_agent/ui/attachment_indicator.py b/src/fast_agent/ui/attachment_indicator.py
new file mode 100644
index 000000000..4296b2159
--- /dev/null
+++ b/src/fast_agent/ui/attachment_indicator.py
@@ -0,0 +1,79 @@
+"""Current-draft attachment indicator helpers."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from pathlib import Path
+
+from fast_agent.llm.model_database import ModelDatabase
+from fast_agent.mcp.mime_utils import guess_mime_type
+
+ATTACHMENT_GLYPH = "▲"
+ATTACHMENT_SUPPORTED_COLOR = "ansigreen"
+ATTACHMENT_QUESTIONABLE_COLOR = "ansired"
+ATTACHMENT_IDLE_COLOR = "ansibrightblack"
+
+
+@dataclass(frozen=True, slots=True)
+class DraftAttachmentSummary:
+ count: int
+ mime_types: tuple[str, ...]
+ any_questionable: bool
+
+
+def summarize_draft_attachments(
+ text: str,
+ *,
+ model_name: str | None,
+) -> DraftAttachmentSummary | None:
+ from fast_agent.ui.prompt.attachment_tokens import FILE_MENTION_SERVER
+ from fast_agent.ui.prompt.resource_mentions import parse_mentions
+
+ parsed = parse_mentions(text)
+ local_mentions = [
+ mention for mention in parsed.mentions if mention.server_name == FILE_MENTION_SERVER
+ ]
+ if not local_mentions:
+ return None
+
+ mime_types: list[str] = []
+ any_questionable = False
+ for mention in local_mentions:
+ path = Path(mention.resource_uri)
+ if not path.exists():
+ any_questionable = True
+ mime_types.append("application/octet-stream")
+ continue
+ if not path.is_file():
+ any_questionable = True
+ mime_types.append("application/octet-stream")
+ continue
+
+ mime_type = guess_mime_type(str(path))
+ mime_types.append(mime_type)
+ if mime_type == "application/octet-stream":
+ any_questionable = True
+ continue
+ if model_name and not ModelDatabase.supports_mime(model_name, mime_type):
+ any_questionable = True
+
+ return DraftAttachmentSummary(
+ count=len(local_mentions),
+ mime_types=tuple(mime_types),
+ any_questionable=any_questionable,
+ )
+
+
+def render_attachment_indicator(summary: DraftAttachmentSummary | None) -> str | None:
+ if summary is None or summary.count <= 0:
+ return f""
+
+ if summary.count >= 10:
+ label = f" {ATTACHMENT_GLYPH}+"
+ else:
+ label = f" {ATTACHMENT_GLYPH}{summary.count}"
+
+ color = (
+ ATTACHMENT_QUESTIONABLE_COLOR if summary.any_questionable else ATTACHMENT_SUPPORTED_COLOR
+ )
+ return f""
diff --git a/src/fast_agent/ui/command_payloads.py b/src/fast_agent/ui/command_payloads.py
index 90037881d..cb7a5a952 100644
--- a/src/fast_agent/ui/command_payloads.py
+++ b/src/fast_agent/ui/command_payloads.py
@@ -323,6 +323,14 @@ class ShellCommand(CommandBase):
kind: Literal["shell_command"] = "shell_command"
+@dataclass(frozen=True, slots=True)
+class AttachCommand(CommandBase):
+ paths: tuple[str, ...]
+ clear: bool = False
+ error: str | None = None
+ kind: Literal["attach_command"] = "attach_command"
+
+
@dataclass(frozen=True, slots=True)
class ModelReasoningCommand(CommandBase):
value: str | None
@@ -413,6 +421,7 @@ class UnknownCommand(CommandBase):
| ClearSessionsCommand
| PinSessionCommand
| ShellCommand
+ | AttachCommand
| ModelReasoningCommand
| ModelVerbosityCommand
| ModelFastCommand
diff --git a/src/fast_agent/ui/interactive/command_dispatch.py b/src/fast_agent/ui/interactive/command_dispatch.py
index d8f439ecc..3bba73274 100644
--- a/src/fast_agent/ui/interactive/command_dispatch.py
+++ b/src/fast_agent/ui/interactive/command_dispatch.py
@@ -22,6 +22,7 @@
from fast_agent.ui import enhanced_prompt
from fast_agent.ui.command_payloads import (
AgentCommand,
+ AttachCommand,
CardsCommand,
ClearCommand,
ClearSessionsCommand,
@@ -69,6 +70,12 @@
UnknownCommand,
)
from fast_agent.ui.history_display import display_history_show
+from fast_agent.ui.prompt.attachment_tokens import (
+ append_attachment_tokens,
+ build_local_attachment_token,
+ normalize_local_attachment_reference,
+ strip_local_attachment_tokens,
+)
from .command_context import build_command_context, emit_command_outcome
from .mcp_connect_flow import handle_mcp_connect
@@ -131,6 +138,8 @@ async def _dispatch_local_ui_payload(
*,
prompt_provider: "AgentApp",
available_agents_set: set[str],
+ agent_name: str,
+ buffer_prefill: str,
) -> DispatchResult | None:
result = DispatchResult(handled=True)
match payload:
@@ -159,6 +168,43 @@ async def _dispatch_local_ui_payload(
case ShellCommand(command=shell_cmd):
result.shell_execute_cmd = shell_cmd
return result
+ case AttachCommand(paths=paths, clear=clear, error=error):
+ if error:
+ rich_print(f"[red]{error}[/red]")
+ return result
+
+ if clear:
+ result.buffer_prefill = strip_local_attachment_tokens(buffer_prefill)
+ return result
+
+ resolved_paths = list(paths)
+ if not resolved_paths:
+ context = build_command_context(prompt_provider, agent_name)
+ prompted_path = await context.io.prompt_text(
+ "Attach file path:",
+ allow_empty=False,
+ )
+ if not prompted_path:
+ result.buffer_prefill = buffer_prefill
+ return result
+ resolved_paths = [prompted_path]
+
+ tokens: list[str] = []
+ for raw_path in resolved_paths:
+ try:
+ attachment_path = normalize_local_attachment_reference(raw_path)
+ if not attachment_path.exists():
+ raise FileNotFoundError(raw_path)
+ if not attachment_path.is_file():
+ raise IsADirectoryError(raw_path)
+ token = build_local_attachment_token(attachment_path)
+ except Exception as exc:
+ rich_print(f"[red]Unable to attach '{raw_path}': {exc}[/red]")
+ continue
+ tokens.append(token)
+
+ result.buffer_prefill = append_attachment_tokens(buffer_prefill, tokens)
+ return result
case UnknownCommand(command=command):
rich_print(f"[red]Command not found: {command}[/red]")
return result
@@ -728,6 +774,7 @@ async def dispatch_command_payload(
available_agents: list[str],
available_agents_set: set[str],
merge_pinned_agents: Callable[[list[str]], list[str]],
+ buffer_prefill: str = "",
) -> DispatchResult:
del available_agents
@@ -735,6 +782,8 @@ async def dispatch_command_payload(
payload,
prompt_provider=prompt_provider,
available_agents_set=available_agents_set,
+ agent_name=agent,
+ buffer_prefill=buffer_prefill,
)
if local_result is not None:
return local_result
diff --git a/src/fast_agent/ui/interactive_prompt.py b/src/fast_agent/ui/interactive_prompt.py
index a837c8b95..fb7f6b8a0 100644
--- a/src/fast_agent/ui/interactive_prompt.py
+++ b/src/fast_agent/ui/interactive_prompt.py
@@ -573,7 +573,11 @@ def _apply_dispatch_result(
hash_send_quiet=dispatch_result.hash_send_quiet,
shell_execute_cmd=dispatch_result.shell_execute_cmd,
)
- next_buffer_prefill = dispatch_result.buffer_prefill or buffer_prefill
+ next_buffer_prefill = (
+ dispatch_result.buffer_prefill
+ if dispatch_result.buffer_prefill is not None
+ else buffer_prefill
+ )
should_continue = dispatch_result.handled and not pending.has_pending_execution()
return next_state, pending, next_buffer_prefill, should_continue
@@ -732,6 +736,7 @@ async def _process_turn_command_phase(
agent_names=agent_names,
pinned_agent=pinned_agent,
),
+ buffer_prefill=buffer_prefill,
)
except KeyboardInterrupt:
self._handle_ctrl_c_interrupt(
@@ -877,14 +882,14 @@ async def _resolve_prompt_payload(
agent_for_mentions = prompt_provider._agent(agent_name)
except Exception:
rich_print(f"[red]Unable to resolve resource mentions: agent '{agent_name}' unavailable[/red]")
- return None
+ return user_input
try:
resolved_mentions = await resolve_mentions(agent_for_mentions, parsed_mentions)
return build_prompt_with_resources(user_input, resolved_mentions)
except Exception as exc:
rich_print(f"[red]Failed to resolve resource mentions: {exc}[/red]")
- return None
+ return user_input
async def _send_regular_message(
self,
diff --git a/src/fast_agent/ui/message_display_helpers.py b/src/fast_agent/ui/message_display_helpers.py
index 2731f6bd5..796d7e78d 100644
--- a/src/fast_agent/ui/message_display_helpers.py
+++ b/src/fast_agent/ui/message_display_helpers.py
@@ -28,7 +28,8 @@ def extract_user_attachments(message: PromptMessageExtended) -> list[str]:
label = content.name or content.mimeType or "resource"
attachments.append(label)
elif is_image_content(content):
- attachments.append("image")
+ source_uri = _content_source_uri(content)
+ attachments.append(f"image ({source_uri})" if source_uri else "image")
elif is_resource_content(content):
# EmbeddedResource: show name or uri
from mcp.types import EmbeddedResource
@@ -39,6 +40,29 @@ def extract_user_attachments(message: PromptMessageExtended) -> list[str]:
return attachments
+def _content_source_uri(content: object) -> str | None:
+ meta = getattr(content, "meta", None)
+ if not isinstance(meta, dict):
+ return None
+ source_uri = meta.get("fast_agent_source_uri")
+ return source_uri if isinstance(source_uri, str) and source_uri else None
+
+
+def _message_display_text(message: PromptMessageExtended) -> str:
+ from mcp.types import TextContent
+
+ for content in message.content:
+ if not isinstance(content, TextContent):
+ continue
+ meta = getattr(content, "meta", None)
+ if isinstance(meta, dict):
+ original_text = meta.get("fast_agent_original_text")
+ if isinstance(original_text, str):
+ return original_text
+ return content.text
+ return message.last_text() or ""
+
+
def build_user_message_display(
messages: Sequence[PromptMessageExtended],
) -> tuple[str, list[str] | None]:
@@ -47,7 +71,7 @@ def build_user_message_display(
if len(messages) == 1:
message = messages[0]
- message_text = message.last_text() or ""
+ message_text = _message_display_text(message)
attachments = extract_user_attachments(message)
return message_text, attachments or None
@@ -56,7 +80,7 @@ def build_user_message_display(
attachments = extract_user_attachments(message)
if attachments:
lines.append(f"🔗 {', '.join(attachments)}")
- message_text = message.last_text() or ""
+ message_text = _message_display_text(message)
if message_text:
lines.append(message_text)
if index < len(messages):
diff --git a/src/fast_agent/ui/model_chip_display.py b/src/fast_agent/ui/model_chip_display.py
index 742748285..ea602bf67 100644
--- a/src/fast_agent/ui/model_chip_display.py
+++ b/src/fast_agent/ui/model_chip_display.py
@@ -11,10 +11,16 @@ def render_model_chip(
web_search_indicator: str | None = None,
web_fetch_indicator: str | None = None,
service_tier_indicator: str | None = None,
+ attachment_indicator: str | None = None,
) -> str:
indicators = "".join(
indicator
- for indicator in (service_tier_indicator, web_search_indicator, web_fetch_indicator)
+ for indicator in (
+ service_tier_indicator,
+ web_search_indicator,
+ web_fetch_indicator,
+ attachment_indicator,
+ )
if indicator is not None
)
return f"{indicators}"
diff --git a/src/fast_agent/ui/model_picker.py b/src/fast_agent/ui/model_picker.py
index afdfee7a0..8596f415f 100644
--- a/src/fast_agent/ui/model_picker.py
+++ b/src/fast_agent/ui/model_picker.py
@@ -28,7 +28,7 @@
build_snapshot,
find_provider,
model_identity,
- model_options_for_provider,
+ model_options_for_option,
provider_activation_action,
)
from fast_agent.ui.picker_theme import build_picker_style
@@ -166,13 +166,9 @@ def _provider_activation_action(
@property
def current_models(self) -> list[ModelOption]:
- if self.current_provider.overlay_group:
- return self._overlay_models()
- provider = self.current_provider.provider
- assert provider is not None
- return model_options_for_provider(
+ return model_options_for_option(
self.snapshot,
- provider,
+ self.current_provider,
source=self.state.source,
)
@@ -240,16 +236,11 @@ def _apply_initial_model_selection(self) -> None:
self.current_provider.option_key,
)
for source in ("curated", "all"):
- if provider_option.overlay_group:
- models = self._overlay_models()
- else:
- provider = provider_option.provider
- assert provider is not None
- models = model_options_for_provider(
- self.snapshot,
- provider,
- source=source,
- )
+ models = model_options_for_option(
+ self.snapshot,
+ provider_option,
+ source=source,
+ )
match_index = _find_initial_model_index(models, self._initial_model_spec)
if match_index is None:
continue
@@ -299,6 +290,8 @@ def _provider_availability_label(self, option: ProviderOption) -> str:
return "none yet"
if option.active:
return "available"
+ if option.disabled_reason is not None:
+ return "disabled"
if self._provider_activation_action(option) is not None:
return "sign in required"
return "not configured"
@@ -311,6 +304,8 @@ def _provider_availability_style(
return "inactive"
if option.active:
return "active"
+ if option.disabled_reason is not None:
+ return "attention"
if self._provider_activation_action(option) is not None:
return "attention"
return "inactive"
@@ -468,6 +463,8 @@ def _render_status_bar(self) -> StyleFragments:
warning = ""
if self._provider_requires_docs_only():
warning = " · see docs"
+ elif provider.disabled_reason is not None:
+ warning = f" · {provider.disabled_reason}"
elif self._provider_activation_action(provider) is not None:
warning = " · press Enter to log in"
diff --git a/src/fast_agent/ui/model_picker_common.py b/src/fast_agent/ui/model_picker_common.py
index d9f98f3c1..0ebc86506 100644
--- a/src/fast_agent/ui/model_picker_common.py
+++ b/src/fast_agent/ui/model_picker_common.py
@@ -10,6 +10,10 @@
from fast_agent.llm.model_factory import ModelFactory
from fast_agent.llm.model_overlays import load_model_overlay_registry
from fast_agent.llm.model_selection import CatalogModelEntry, ModelSelectionCatalog
+from fast_agent.llm.provider.anthropic.vertex_config import (
+ anthropic_vertex_intent,
+ anthropic_vertex_ready,
+)
from fast_agent.llm.provider_key_manager import ProviderKeyManager
from fast_agent.llm.provider_types import Provider
from fast_agent.llm.reasoning_effort import available_reasoning_values, format_reasoning_setting
@@ -49,6 +53,7 @@
GENERIC_CUSTOM_MODEL_SENTINEL = "generic.__custom__"
CODEX_LOGIN_SENTINEL = "codexresponses.__login__"
+ANTHROPIC_VERTEX_PROVIDER_KEY = "anthropic-vertex"
@dataclass(frozen=True)
@@ -59,6 +64,7 @@ class ProviderOption:
key: str | None = None
display_name: str | None = None
overlay_group: bool = False
+ disabled_reason: str | None = None
@property
def option_key(self) -> str:
@@ -145,6 +151,126 @@ def _provider_is_active(provider: Provider, config_payload: dict[str, Any]) -> b
return False
+def _force_anthropic_vertex_route(model_spec: str) -> str:
+ return _update_query_param(model_spec, key="via", value="vertex")
+
+
+def _catalog_options_from_entries(
+ entries: tuple[CatalogModelEntry, ...],
+ *,
+ provider: Provider,
+ source: ModelSource,
+ spec_transform: Any = None,
+) -> list[ModelOption]:
+ transform = spec_transform or (lambda value: value)
+
+ curated_options: list[ModelOption] = []
+ for entry in entries:
+ spec = transform(entry.model)
+ tags: list[str] = []
+ if entry.local:
+ tags.append("local")
+ if entry.fast:
+ tags.append("fast")
+ if not entry.current:
+ tags.append("legacy")
+
+ suffix = f" ({', '.join(tags)})" if tags else ""
+ entry_label = entry.display_label or entry.alias
+ label = f"{entry_label:<19} → {spec}{suffix}"
+ if entry.description:
+ label = f"{label} — {entry.description}"
+ curated_options.append(
+ ModelOption(
+ spec=spec,
+ label=label,
+ preset_token=entry.alias,
+ fast=entry.fast,
+ curated=entry.current,
+ )
+ )
+
+ if source == "curated":
+ return curated_options
+
+ seen_identities: set[tuple[Provider, str]] = set()
+ options: list[ModelOption] = list(curated_options)
+ for curated in curated_options:
+ identity = model_identity(curated.spec)
+ if identity is not None:
+ seen_identities.add(identity)
+
+ for spec in _static_provider_models(provider):
+ transformed_spec = transform(spec)
+ identity = model_identity(transformed_spec)
+ if identity is not None and identity in seen_identities:
+ continue
+ if identity is not None:
+ seen_identities.add(identity)
+ options.append(ModelOption(spec=transformed_spec, label=f"{transformed_spec} (catalog)"))
+
+ return options
+
+
+def model_options_for_option(
+ snapshot: ModelPickerSnapshot,
+ option: ProviderOption,
+ *,
+ source: ModelSource,
+) -> list[ModelOption]:
+ if option.overlay_group:
+ return _catalog_options_from_entries(
+ option.curated_entries,
+ provider=Provider.ANTHROPIC,
+ source="curated",
+ )
+
+ provider = option.provider
+ assert provider is not None
+ spec_transform = (
+ _force_anthropic_vertex_route
+ if option.option_key == ANTHROPIC_VERTEX_PROVIDER_KEY
+ else None
+ )
+ return _catalog_options_from_entries(
+ option.curated_entries,
+ provider=provider,
+ source=source,
+ spec_transform=spec_transform,
+ )
+
+
+def _anthropic_vertex_provider_option(
+ *,
+ entries: tuple[CatalogModelEntry, ...],
+ config_payload: dict[str, Any],
+) -> ProviderOption | None:
+ if not anthropic_vertex_intent(config_payload):
+ return None
+
+ vertex_ready, disabled_reason = anthropic_vertex_ready(config_payload)
+ rewritten_entries = tuple(
+ CatalogModelEntry(
+ alias=entry.alias,
+ model=_force_anthropic_vertex_route(entry.model),
+ current=entry.current,
+ fast=entry.fast,
+ local=entry.local,
+ display_label=entry.display_label,
+ description=entry.description,
+ )
+ for entry in entries
+ )
+ return ProviderOption(
+ provider=Provider.ANTHROPIC,
+ active=vertex_ready,
+ curated_entries=rewritten_entries,
+ key=ANTHROPIC_VERTEX_PROVIDER_KEY,
+ display_name="Anthropic (Vertex)",
+ disabled_reason=disabled_reason,
+ )
+
+
def build_snapshot(
config_path: str | Path | None = None,
*,
@@ -214,6 +340,13 @@ def build_snapshot(
curated_entries=entries,
)
)
+ if provider == Provider.ANTHROPIC:
+ vertex_option = _anthropic_vertex_provider_option(
+ entries=entries,
+ config_payload=config_payload,
+ )
+ if vertex_option is not None:
+ providers.append(vertex_option)
return ModelPickerSnapshot(providers=tuple(providers), config_payload=config_payload)
@@ -286,7 +419,7 @@ def find_provider(snapshot: ModelPickerSnapshot, provider_name: str) -> Provider
def build_provider_label(option: ProviderOption) -> str:
- status = "active" if option.active else "inactive"
+ status = "active" if option.active else "disabled" if option.disabled_reason else "inactive"
curated_count = len(option.curated_entries)
if option.overlay_group:
entry_text = "overlay" if curated_count == 1 else "overlays"
@@ -386,52 +519,11 @@ def model_options_for_provider(
activation_action=activation_action,
)
]
-
- curated_options: list[ModelOption] = []
- for entry in provider_option.curated_entries:
- tags: list[str] = []
- if entry.local:
- tags.append("local")
- if entry.fast:
- tags.append("fast")
- if not entry.current:
- tags.append("legacy")
-
- suffix = f" ({', '.join(tags)})" if tags else ""
- entry_label = entry.display_label or entry.alias
- label = f"{entry_label:<19} → {entry.model}{suffix}"
- if entry.description:
- label = f"{label} — {entry.description}"
- curated_options.append(
- ModelOption(
- spec=entry.model,
- label=label,
- preset_token=entry.alias,
- fast=entry.fast,
- curated=entry.current,
- )
- )
-
- if source == "curated":
- return curated_options
-
- seen_identities: set[tuple[Provider, str]] = set()
- options: list[ModelOption] = list(curated_options)
-
- for curated in curated_options:
- identity = model_identity(curated.spec)
- if identity is not None:
- seen_identities.add(identity)
-
- for spec in _static_provider_models(provider):
- identity = model_identity(spec)
- if identity is not None and identity in seen_identities:
- continue
- if identity is not None:
- seen_identities.add(identity)
- options.append(ModelOption(spec=spec, label=f"{spec} (catalog)"))
-
- return options
+ return _catalog_options_from_entries(
+ provider_option.curated_entries,
+ provider=provider,
+ source=source,
+ )
def model_capabilities(model_spec: str) -> ModelCapabilities:
diff --git a/src/fast_agent/ui/prompt/attachment_tokens.py b/src/fast_agent/ui/prompt/attachment_tokens.py
new file mode 100644
index 000000000..36a4978a1
--- /dev/null
+++ b/src/fast_agent/ui/prompt/attachment_tokens.py
@@ -0,0 +1,103 @@
+"""Helpers for inline local attachment tokens."""
+
+from __future__ import annotations
+
+import os
+import re
+from pathlib import Path
+from urllib.parse import quote, unquote, urlparse
+from urllib.request import url2pathname
+
+FILE_MENTION_SERVER = "file"
+_LOCAL_ATTACHMENT_TOKEN_RE = re.compile(r"(?P^|\s)(?P\^file:[^\s]+)")
+_LOCAL_ATTACHMENT_BODY_RE = r"\^file:[^\s]+"
+
+
+def normalize_local_attachment_reference(
+ reference: str,
+ *,
+ cwd: Path | None = None,
+) -> Path:
+ """Normalize a ``^file:...`` payload into an absolute local path."""
+ raw_value = reference.strip()
+ if not raw_value:
+ raise ValueError("Attachment path is empty")
+
+ decoded_value = unquote(raw_value)
+ path_value = os.path.expandvars(decoded_value)
+
+ if path_value.lower().startswith("file://"):
+ parsed = urlparse(path_value)
+ if parsed.scheme.lower() != "file":
+ raise ValueError(f"Unsupported attachment URI scheme: {parsed.scheme}")
+ uri_path = parsed.path
+ if parsed.netloc and parsed.netloc.lower() != "localhost":
+ uri_path = f"//{parsed.netloc}{uri_path}"
+ if not uri_path:
+ raise ValueError("Attachment URI path is empty")
+ resolved_path = Path(url2pathname(uri_path))
+ else:
+ resolved_path = Path(os.path.expanduser(path_value))
+
+ if not resolved_path.is_absolute():
+ resolved_path = (cwd or Path.cwd()) / resolved_path
+
+ return resolved_path.resolve(strict=False)
+
+
+def encode_local_attachment_reference(path_text: str) -> str:
+ """Percent-encode a token path while keeping it compact and path-like."""
+ normalized = path_text.replace("\\", "/")
+ return quote(normalized, safe="/._~-:")
+
+
+def build_local_attachment_token(path: str | Path) -> str:
+ """Build a canonical ``^file:...`` token for a local path."""
+ if not isinstance(path, Path):
+ path = normalize_local_attachment_reference(path)
+ normalized = path.resolve(strict=False)
+ return f"^{FILE_MENTION_SERVER}:{encode_local_attachment_reference(normalized.as_posix())}"
+
+
+def strip_local_attachment_tokens(text: str) -> str:
+ """Remove inline local attachment tokens while preserving other text."""
+ stripped = re.sub(
+ rf"(^|\n)[ \t]*{_LOCAL_ATTACHMENT_BODY_RE}[ \t]*(?:\n|$)",
+ lambda match: match.group(1),
+ text,
+ flags=re.MULTILINE,
+ )
+ stripped = re.sub(
+ rf"(?P[ \t]){_LOCAL_ATTACHMENT_BODY_RE}(?P[ \t])",
+ r"\g",
+ stripped,
+ )
+ stripped = re.sub(
+ rf"(?P[ \t]+){_LOCAL_ATTACHMENT_BODY_RE}(?=$|\n)",
+ "",
+ stripped,
+ )
+ stripped = re.sub(
+ rf"(?:(?<=^)|(?<=\s)){_LOCAL_ATTACHMENT_BODY_RE}(?P[ \t]+)",
+ "",
+ stripped,
+ flags=re.MULTILINE,
+ )
+ stripped = re.sub(
+ rf"(?:(?<=^)|(?<=\s)){_LOCAL_ATTACHMENT_BODY_RE}",
+ "",
+ stripped,
+ flags=re.MULTILINE,
+ )
+ return stripped
+
+
+def append_attachment_tokens(text: str, tokens: list[str]) -> str:
+ """Append attachment tokens to existing draft text."""
+ if not tokens:
+ return text
+ if not text:
+ return " ".join(tokens)
+ if text[-1].isspace():
+ return f"{text}{' '.join(tokens)}"
+ return f"{text} {' '.join(tokens)}"
diff --git a/src/fast_agent/ui/prompt/command_help.py b/src/fast_agent/ui/prompt/command_help.py
index e7b5bba3f..c2934487f 100644
--- a/src/fast_agent/ui/prompt/command_help.py
+++ b/src/fast_agent/ui/prompt/command_help.py
@@ -9,6 +9,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" /help - Show this help",
" /system - Show the current system prompt",
" /prompt - Load a Prompt File or use MCP Prompt",
+ " /attach [path ...|clear] - Stage or clear local ^file: attachments",
" /usage - Show current usage statistics",
" /skills - List local skills for the manager directory",
" /skills available - Browse marketplace skills before installing",
@@ -94,6 +95,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" F7 - Cycle verbosity (when supported)",
" F8 - Toggle web search (when supported)",
" F9 - Toggle web fetch (when supported)",
+ " F10 - Remove staged local ^file: attachments from the draft",
" Ctrl+T - Toggle multiline mode",
" Ctrl+E - Edit in external editor",
" Ctrl+Y - Copy last assistant response to clipboard",
diff --git a/src/fast_agent/ui/prompt/completer.py b/src/fast_agent/ui/prompt/completer.py
index af9a5c8e7..9228f50fd 100644
--- a/src/fast_agent/ui/prompt/completer.py
+++ b/src/fast_agent/ui/prompt/completer.py
@@ -11,6 +11,7 @@
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any, TypeVar
+from urllib.parse import unquote
from mcp.types import ResourceTemplate
from prompt_toolkit.completion import Completer, Completion
@@ -21,6 +22,10 @@
from fast_agent.config import get_settings
from fast_agent.llm.reasoning_effort import available_reasoning_values
from fast_agent.llm.text_verbosity import available_text_verbosity_values
+from fast_agent.ui.prompt.attachment_tokens import (
+ FILE_MENTION_SERVER,
+ encode_local_attachment_reference,
+)
from fast_agent.ui.prompt.resource_mentions import template_argument_names
if TYPE_CHECKING:
@@ -90,6 +95,7 @@ def __init__(
"(/cards, /cards add, /cards remove, /cards update, /cards publish, /cards registry)"
),
"prompt": "Load a Prompt File or use MCP Prompt",
+ "attach": "Stage local file attachment token(s) for the next prompt",
"system": "Show the current system prompt",
"usage": "Show current usage statistics",
"markdown": "Show last assistant message without markdown formatting",
@@ -653,6 +659,49 @@ def _complete_shell_paths(self, partial: str, delete_len: int, max_results: int
except (PermissionError, FileNotFoundError, NotADirectoryError):
pass
+ def _complete_local_attachment_paths(self, partial: str) -> list[Completion]:
+ decoded_partial = unquote(partial)
+ if decoded_partial.lower().startswith("file://"):
+ from fast_agent.ui.prompt.attachment_tokens import normalize_local_attachment_reference
+
+ try:
+ decoded_partial = str(normalize_local_attachment_reference(decoded_partial))
+ except ValueError:
+ return []
+
+ resolved = self._resolve_completion_search(decoded_partial)
+ if not resolved:
+ return []
+
+ search_dir = resolved.search_dir
+ prefix = resolved.prefix
+ completion_prefix = resolved.completion_prefix
+ completions: list[Completion] = []
+ try:
+ for entry in sorted(search_dir.iterdir()):
+ name = entry.name
+ if name.startswith(".") and not prefix.startswith("."):
+ continue
+ if not name.lower().startswith(prefix.lower()):
+ continue
+
+ completion_text = f"{completion_prefix}{name}" if completion_prefix else name
+ if entry.is_dir():
+ completion_text += "/"
+
+ completions.append(
+ Completion(
+ encode_local_attachment_reference(completion_text),
+ start_position=-len(partial),
+ display=name + ("/" if entry.is_dir() else ""),
+ display_meta="directory" if entry.is_dir() else "file",
+ )
+ )
+ except (PermissionError, FileNotFoundError, NotADirectoryError):
+ return []
+
+ return completions
+
def _complete_subcommands(
self,
parts: Sequence[str],
@@ -1267,6 +1316,7 @@ def _mention_completions(self, text_before_cursor: str) -> list[Completion] | No
return list(cached)
server_names = self._run_async_completion(self._list_connected_resource_servers()) or []
+ server_names = list(dict.fromkeys([*server_names, FILE_MENTION_SERVER]))
partial = context.partial.lower()
completions = [
Completion(
@@ -1285,6 +1335,9 @@ def _mention_completions(self, text_before_cursor: str) -> list[Completion] | No
return []
if context.kind == "resource":
+ if context.server_name == FILE_MENTION_SERVER:
+ return self._complete_local_attachment_paths(context.partial)
+
cache_key = (
"resource",
self.current_agent,
diff --git a/src/fast_agent/ui/prompt/completion_sources.py b/src/fast_agent/ui/prompt/completion_sources.py
index 63f697e5b..db43d6255 100644
--- a/src/fast_agent/ui/prompt/completion_sources.py
+++ b/src/fast_agent/ui/prompt/completion_sources.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import shlex
from typing import TYPE_CHECKING
from prompt_toolkit.completion import Completion
@@ -184,6 +185,64 @@ def _prompt_command_completions(
return list(completer._complete_history_files(partial))
+def _attach_command_completions(
+ completer: "AgentCompleter",
+ text: str,
+ text_lower: str,
+) -> list[Completion] | None:
+ if not text_lower.startswith("/attach "):
+ return None
+
+ remainder = text[len("/attach ") :]
+ if not remainder:
+ results = [
+ Completion(
+ "clear",
+ start_position=0,
+ display="clear",
+ display_meta="remove staged local attachments from the next draft buffer",
+ )
+ ]
+ results.extend(list(completer._complete_shell_paths("", 0)))
+ return results
+
+ try:
+ parts = shlex.split(remainder)
+ except ValueError:
+ return []
+
+ if remainder.endswith((" ", "\t")):
+ partial = ""
+ token_count = len(parts)
+ else:
+ partial = parts[-1] if parts else remainder
+ token_count = len(parts) if parts else 1
+
+ results: list[Completion] = []
+ if token_count <= 1 and "clear".startswith(partial.lower()):
+ results.append(
+ Completion(
+ "clear",
+ start_position=-len(partial),
+ display="clear",
+ display_meta="remove staged local attachments from the next draft buffer",
+ )
+ )
+ for completion in completer._complete_shell_paths(partial, len(partial)):
+ completion_text = completion.text
+ if any(char.isspace() for char in completion_text):
+ completion_text = shlex.quote(completion_text)
+ results.append(
+ Completion(
+ completion_text,
+ start_position=completion.start_position,
+ display=completion.display,
+ display_meta=completion.display_meta,
+ )
+ )
+ return results
+
+
def _session_delete_completions(
completer: "AgentCompleter",
partial: str,
@@ -948,6 +1007,10 @@ def command_completions(
if prompt_result is not None:
return prompt_result
+ attach_result = _attach_command_completions(completer, text, text_lower)
+ if attach_result is not None:
+ return attach_result
+
session_result = _session_command_completions(completer, text, text_lower)
if session_result is not None:
return session_result
diff --git a/src/fast_agent/ui/prompt/input.py b/src/fast_agent/ui/prompt/input.py
index bf13fccc6..707efb109 100644
--- a/src/fast_agent/ui/prompt/input.py
+++ b/src/fast_agent/ui/prompt/input.py
@@ -482,6 +482,7 @@ def _build_toolbar(
toolbar_color: str,
agent_provider: "AgentApp | None",
shell_context: ShellInputContext,
+ session_factory: "Callable[[], PromptSession]",
) -> "Callable[[], HTML]":
shell_state = ShellToolbarState(
enabled=shell_context.enabled,
@@ -491,6 +492,10 @@ def _build_toolbar(
def get_toolbar() -> HTML:
global _copy_notice
+ try:
+ current_input_text = session_factory().default_buffer.text
+ except Exception:
+ current_input_text = ""
result = render_input_toolbar(
agent_name=agent_name,
toolbar_color=toolbar_color,
@@ -501,6 +506,7 @@ def get_toolbar() -> HTML:
copy_notice=_copy_notice,
copy_notice_until=_copy_notice_until,
shell_path_switch_delay_seconds=_SHELL_PATH_SWITCH_DELAY_SECONDS,
+ current_input_text=current_input_text,
)
shell_state.show_path_segment = result.show_shell_path_segment
if result.clear_copy_notice:
@@ -704,7 +710,7 @@ def _show_input_help_banner(
rich_print(
"""[dim]Use '/' for commands, '!' for shell. '#' to query, '@' to switch agents\n"""
"""CTRL+T multiline, CTRL+Y copy last message, CTRL+E external editor.\n"""
- """CTRL+Space or Tab for path completion. '^' for resource attach.[/dim]"""
+ """CTRL+Space or Tab for path completion. Use /attach, `^file:`, or F10 for attachments.[/dim]"""
)
@@ -919,6 +925,7 @@ def session_factory() -> PromptSession:
toolbar_color=toolbar_color,
agent_provider=agent_provider,
shell_context=shell_context,
+ session_factory=session_factory,
)
session = create_prompt_session(
history=agent_histories[agent_name],
diff --git a/src/fast_agent/ui/prompt/input_toolbar.py b/src/fast_agent/ui/prompt/input_toolbar.py
index ff504b83e..76a6a3c0e 100644
--- a/src/fast_agent/ui/prompt/input_toolbar.py
+++ b/src/fast_agent/ui/prompt/input_toolbar.py
@@ -14,6 +14,10 @@
from fast_agent.llm.model_info import ModelInfo
from fast_agent.llm.provider_types import Provider
from fast_agent.ui import notification_tracker
+from fast_agent.ui.attachment_indicator import (
+ render_attachment_indicator,
+ summarize_draft_attachments,
+)
from fast_agent.ui.model_chip_display import render_model_chip
from fast_agent.ui.prompt.alert_flags import _resolve_alert_flags_from_history
from fast_agent.ui.prompt.toolbar import (
@@ -53,6 +57,7 @@ class ToolbarRenderResult:
@dataclass(slots=True)
class ToolbarAgentState:
agent: object | None = None
+ model_name: str | None = None
model_display: str | None = None
tdv_segment: str | None = None
turn_count: int = 0
@@ -98,6 +103,7 @@ def render_input_toolbar(
copy_notice: str | None,
copy_notice_until: float,
shell_path_switch_delay_seconds: float,
+ current_input_text: str = "",
) -> ToolbarRenderResult:
mode_style, mode_text = _resolve_toolbar_mode(multiline_mode)
shortcut_text = ""
@@ -107,7 +113,11 @@ def render_input_toolbar(
toolbar_color,
agent_state.agent,
)
- middle = _build_middle_segment(agent_state, shortcut_text)
+ attachment_summary = summarize_draft_attachments(
+ current_input_text,
+ model_name=agent_state.model_name,
+ )
+ middle = _build_middle_segment(agent_state, shortcut_text, attachment_summary=attachment_summary)
notification_segment = _build_notification_segment()
copy_notice_segment, clear_copy_notice = _build_copy_notice_segment(
copy_notice,
@@ -165,6 +175,7 @@ def _resolve_toolbar_agent_state(
tdv_segment = _resolve_tdv_segment(agent, model_name, llm)
return ToolbarAgentState(
agent=agent,
+ model_name=model_name,
model_display=model_display,
tdv_segment=tdv_segment,
turn_count=turn_count,
@@ -369,7 +380,12 @@ def _style_tdv_flag(letter: str, supported: bool, alert_flags: set[str]) -> str:
return f""
-def _build_middle_segment(agent_state: ToolbarAgentState, shortcut_text: str) -> str:
+def _build_middle_segment(
+ agent_state: ToolbarAgentState,
+ shortcut_text: str,
+ *,
+ attachment_summary=None,
+) -> str:
middle_segments: list[str] = []
if agent_state.model_display:
model_prefix = ""
@@ -378,17 +394,21 @@ def _build_middle_segment(agent_state: ToolbarAgentState, shortcut_text: str) ->
elif agent_state.is_overlay_model:
model_prefix = "▼"
model_label = f"{model_prefix}{agent_state.model_display}"
- gauge_segment = f" {agent_state.model_gauges}" if agent_state.model_gauges else ""
+ attachment_indicator = render_attachment_indicator(attachment_summary)
model_chip = render_model_chip(
model_label=model_label,
web_search_indicator=agent_state.web_search_indicator,
web_fetch_indicator=agent_state.web_fetch_indicator,
service_tier_indicator=agent_state.service_tier_indicator,
)
+ prefix = ""
if agent_state.tdv_segment:
- middle_segments.append(f"{agent_state.tdv_segment}{gauge_segment} {model_chip}")
- else:
- middle_segments.append(f"{gauge_segment} {model_chip}")
+ prefix += agent_state.tdv_segment
+ if attachment_indicator:
+ prefix += attachment_indicator
+ if agent_state.model_gauges:
+ prefix += agent_state.model_gauges
+ middle_segments.append(f"{prefix} {model_chip}" if prefix else model_chip)
context_chip = _format_context_usage_percent_for_toolbar(agent_state.context_pct)
middle_segments.append(
diff --git a/src/fast_agent/ui/prompt/keybindings.py b/src/fast_agent/ui/prompt/keybindings.py
index 0d2da4944..1cee3509f 100644
--- a/src/fast_agent/ui/prompt/keybindings.py
+++ b/src/fast_agent/ui/prompt/keybindings.py
@@ -11,6 +11,7 @@
from prompt_toolkit.lexers import Lexer
from rich import print as rich_print
+from fast_agent.ui.prompt.attachment_tokens import strip_local_attachment_tokens
from fast_agent.ui.prompt.editor import get_text_from_editor
from fast_agent.ui.prompt.parser import try_parse_hash_agent_command
@@ -187,6 +188,18 @@ def _(event) -> None:
if _invoke_callback(on_cycle_web_fetch, event):
return
+ @kb.add("f10")
+ def _(event) -> None:
+ cleared = strip_local_attachment_tokens(event.current_buffer.text)
+ if cleared == event.current_buffer.text:
+ return
+ event.current_buffer.text = cleared
+ event.current_buffer.cursor_position = len(cleared)
+ if event.app:
+ event.app.invalidate()
+ elif app:
+ app.invalidate()
+
@kb.add("c-m", filter=Condition(lambda: _has_any_completions()), eager=True)
@kb.add("enter", filter=Condition(lambda: _has_any_completions()), eager=True)
def _(event) -> None:
diff --git a/src/fast_agent/ui/prompt/parser.py b/src/fast_agent/ui/prompt/parser.py
index da1f6433a..782286c36 100644
--- a/src/fast_agent/ui/prompt/parser.py
+++ b/src/fast_agent/ui/prompt/parser.py
@@ -20,6 +20,7 @@
from fast_agent.mcp.connect_targets import parse_connect_command_text
from fast_agent.ui.command_payloads import (
AgentCommand,
+ AttachCommand,
CardsCommand,
ClearCommand,
ClearSessionsCommand,
@@ -159,6 +160,21 @@ def _parse_connect_command(remainder: str, *, usage: str) -> McpConnectCommand:
return McpConnectCommand(request=None, error=str(exc))
+def _parse_attach_command(remainder: str) -> AttachCommand:
+ if not remainder:
+ return AttachCommand(paths=())
+
+ try:
+ tokens = shlex.split(remainder)
+ except ValueError as exc:
+ return AttachCommand(paths=(), error=str(exc))
+
+ if len(tokens) == 1 and tokens[0].lower() == "clear":
+ return AttachCommand(paths=(), clear=True)
+
+ return AttachCommand(paths=tuple(tokens))
+
+
def _parse_history_command(remainder: str) -> CommandPayload:
if not remainder:
return ShowHistoryCommand(agent=None)
@@ -588,6 +604,7 @@ def _parse_slash_command(cmd_line: str) -> str | CommandPayload:
"mcp": _parse_mcp_command,
"connect": _parse_connect_alias_command,
"prompt": _parse_prompt_command,
+ "attach": _parse_attach_command,
}
parser = command_parsers.get(cmd)
if parser is not None:
diff --git a/src/fast_agent/ui/prompt/resource_mentions.py b/src/fast_agent/ui/prompt/resource_mentions.py
index 428a516df..076cf1607 100644
--- a/src/fast_agent/ui/prompt/resource_mentions.py
+++ b/src/fast_agent/ui/prompt/resource_mentions.py
@@ -4,12 +4,18 @@
import re
from dataclasses import dataclass
+from pathlib import Path
from typing import TYPE_CHECKING, Any
from urllib.parse import quote
from mcp.types import ContentBlock, EmbeddedResource, ReadResourceResult, TextContent
+from fast_agent.mcp.mcp_content import MCPFile, MCPImage
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
+from fast_agent.ui.prompt.attachment_tokens import (
+ FILE_MENTION_SERVER,
+ normalize_local_attachment_reference,
+)
if TYPE_CHECKING:
from collections.abc import Sequence
@@ -45,7 +51,7 @@ class ResolvedMentions:
text: str
cleaned_text: str
mentions: list[ParsedMention]
- resources: list[EmbeddedResource]
+ resources: list[ContentBlock]
class ResourceMentionError(ValueError):
@@ -235,8 +241,11 @@ def _parse_token(token: str, *, start: int, end: int) -> ParsedMention | None:
if not server_name or not resource_expr:
return None
- template_uri, args = _parse_template_args(resource_expr)
- resource_uri = _render_template_uri(template_uri, args)
+ if server_name == FILE_MENTION_SERVER:
+ resource_uri = str(normalize_local_attachment_reference(resource_expr))
+ else:
+ template_uri, args = _parse_template_args(resource_expr)
+ resource_uri = _render_template_uri(template_uri, args)
return ParsedMention(
raw=token,
@@ -264,7 +273,7 @@ def parse_mentions(text: str) -> ParsedMentions:
parsed: ParsedMention | None
try:
parsed = _parse_token(token, start=token_start, end=token_end)
- except ResourceMentionError as exc:
+ except (ResourceMentionError, ValueError) as exc:
parsed = None
warnings.append(f"Malformed resource mention '{token}': {exc}")
@@ -307,14 +316,20 @@ async def resolve_mentions(agent: Any, parsed: ParsedMentions) -> ResolvedMentio
resources=[],
)
+ remote_mentions = [mention for mention in parsed.mentions if mention.server_name != FILE_MENTION_SERVER]
get_resource = getattr(agent, "get_resource", None)
- if not callable(get_resource):
+ if remote_mentions and not callable(get_resource):
raise ResourceMentionError("Current agent does not support MCP resources")
- resources: list[EmbeddedResource] = []
+ resources: list[ContentBlock] = []
failures: list[str] = []
for mention in parsed.mentions:
try:
+ if mention.server_name == FILE_MENTION_SERVER:
+ resources.append(_resolve_local_content_block(mention.resource_uri))
+ continue
+ if not callable(get_resource):
+ raise ResourceMentionError("Current agent does not support MCP resources")
result: ReadResourceResult = await get_resource(
mention.resource_uri,
namespace=mention.server_name,
@@ -341,7 +356,11 @@ def build_prompt_with_resources(
) -> PromptMessageExtended:
"""Build PromptMessageExtended with text content and embedded resources."""
text = resolved.cleaned_text if resolved.mentions else original_text
- content: list[ContentBlock] = [TextContent(type="text", text=text)]
+ text_content = TextContent(type="text", text=text)
+ text_meta = dict(getattr(text_content, "meta", None) or {})
+ text_meta["fast_agent_original_text"] = original_text
+ text_content.meta = text_meta
+ content: list[ContentBlock] = [text_content]
content.extend(resolved.resources)
return PromptMessageExtended(role="user", content=content)
@@ -349,3 +368,24 @@ def build_prompt_with_resources(
def mentions_in_text(text: str) -> Sequence[ParsedMention]:
"""Convenience helper primarily for tests."""
return parse_mentions(text).mentions
+
+
+def _resolve_local_content_block(path_text: str) -> ContentBlock:
+ path = Path(path_text)
+ if not path.exists():
+ raise FileNotFoundError(path)
+ if not path.is_file():
+ raise IsADirectoryError(path)
+
+ message = MCPImage(path=path) if _is_image_path(path) else MCPFile(path=path)
+ content = message["content"]
+ meta = dict(getattr(content, "meta", None) or {})
+ meta["fast_agent_source_uri"] = path.as_uri()
+ content.meta = meta
+ return content
+
+
+def _is_image_path(path: Path) -> bool:
+ from fast_agent.mcp.mime_utils import guess_mime_type, is_image_mime_type
+
+ return is_image_mime_type(guess_mime_type(str(path)))
diff --git a/tests/integration/ui/test_command_dispatch_flows.py b/tests/integration/ui/test_command_dispatch_flows.py
index f9d485244..a3967b171 100644
--- a/tests/integration/ui/test_command_dispatch_flows.py
+++ b/tests/integration/ui/test_command_dispatch_flows.py
@@ -19,7 +19,6 @@
if TYPE_CHECKING:
from pathlib import Path
-
@pytest.mark.integration
@pytest.mark.asyncio
async def test_dispatch_session_flow_updates_session_state(tmp_path: Path) -> None:
@@ -128,3 +127,56 @@ async def test_dispatch_hash_agent_sets_message_handoff() -> None:
assert result.hash_send_target == "review"
assert result.hash_send_message == "please assess this change"
assert result.hash_send_quiet is True
+
+
+@pytest.mark.integration
+@pytest.mark.asyncio
+async def test_dispatch_attach_command_prefills_buffer_with_file_tokens(tmp_path: Path) -> None:
+ attachment = tmp_path / "scan.pdf"
+ attachment.write_bytes(b"%PDF-1.4")
+
+ provider = CommandSurfaceProvider({"main": CommandSurfaceAgent(name="main")})
+ owner = CommandSurfaceOwner(agent_types=provider.agent_types())
+
+ result = await dispatch_tui_command(
+ f"/attach {attachment}",
+ owner=owner,
+ prompt_provider=provider,
+ buffer_prefill="summarize this",
+ )
+
+ assert result.buffer_prefill is not None
+ assert result.buffer_prefill.startswith("summarize this ^file:")
+ assert str(attachment) in result.buffer_prefill
+
+
+@pytest.mark.integration
+@pytest.mark.asyncio
+async def test_dispatch_attach_clear_removes_only_local_tokens() -> None:
+ provider = CommandSurfaceProvider({"main": CommandSurfaceAgent(name="main")})
+ owner = CommandSurfaceOwner(agent_types=provider.agent_types())
+
+ result = await dispatch_tui_command(
+ "/attach clear",
+ owner=owner,
+ prompt_provider=provider,
+ buffer_prefill="compare ^file:/tmp/a.png with ^demo:file:///tmp/ref keep this",
+ )
+
+ assert result.buffer_prefill == "compare with ^demo:file:///tmp/ref keep this"
+
+
+@pytest.mark.integration
+@pytest.mark.asyncio
+async def test_dispatch_attach_command_rejects_directories(tmp_path: Path) -> None:
+ provider = CommandSurfaceProvider({"main": CommandSurfaceAgent(name="main")})
+ owner = CommandSurfaceOwner(agent_types=provider.agent_types())
+
+ result = await dispatch_tui_command(
+ f"/attach {tmp_path}",
+ owner=owner,
+ prompt_provider=provider,
+ buffer_prefill="draft",
+ )
+
+ assert result.buffer_prefill == "draft"
diff --git a/tests/support/command_surface.py b/tests/support/command_surface.py
index c71e263d2..48d57e26b 100644
--- a/tests/support/command_surface.py
+++ b/tests/support/command_surface.py
@@ -223,6 +223,7 @@ async def dispatch_tui_command(
owner: CommandSurfaceOwner,
prompt_provider: CommandSurfaceProvider,
agent_name: str = "main",
+ buffer_prefill: str = "",
) -> DispatchResult:
parsed = parse_special_input(raw_input)
assert is_command_payload(parsed)
@@ -234,6 +235,7 @@ async def dispatch_tui_command(
available_agents=prompt_provider.agent_names(),
available_agents_set=set(prompt_provider.agent_names()),
merge_pinned_agents=merge_pinned_agents,
+ buffer_prefill=buffer_prefill,
)
diff --git a/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py b/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
new file mode 100644
index 000000000..a14bbea58
--- /dev/null
+++ b/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
@@ -0,0 +1,153 @@
+import types
+
+import pytest
+
+from fast_agent.config import AnthropicSettings, Settings
+from fast_agent.context import Context
+from fast_agent.core.exceptions import ProviderKeyError
+from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
+from fast_agent.llm.provider.anthropic.vertex_config import GoogleAdcStatus
+from fast_agent.llm.provider_key_manager import ProviderKeyManager
+
+
+def _build_llm(config: Settings, *, via: str | None = None) -> AnthropicLLM:
+ kwargs = {"context": Context(config=config), "model": "claude-sonnet-4-6"}
+ if via is not None:
+ kwargs["via"] = via
+ return AnthropicLLM(**kwargs)
+
+
+def test_vertex_cfg_accepts_model_object() -> None:
+ anthropic = AnthropicSettings()
+ setattr(
+ anthropic,
+ "vertex_ai",
+ types.SimpleNamespace(
+ enabled=True,
+ project_id="proj",
+ location="global",
+ base_url="https://vertex.example",
+ ),
+ )
+ config = Settings(anthropic=anthropic)
+
+ llm = _build_llm(config)
+ vertex_cfg = llm._vertex_cfg()
+
+ assert vertex_cfg.enabled is True
+ assert vertex_cfg.project_id == "proj"
+ assert vertex_cfg.location == "global"
+ assert vertex_cfg.base_url == "https://vertex.example"
+
+
+def test_provider_key_manager_allows_vertex_route_without_api_key() -> None:
+ config = Settings.model_validate(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ assert ProviderKeyManager.get_api_key("anthropic", config, route_hint="vertex") == ""
+ with pytest.raises(ProviderKeyError):
+ ProviderKeyManager.get_api_key("anthropic", config)
+ with pytest.raises(ProviderKeyError):
+ ProviderKeyManager.get_api_key("anthropic", config, route_hint="direct")
+
+
+def test_initialize_anthropic_client_uses_vertex(monkeypatch) -> None:
+ config = Settings.model_validate(
+ {
+ "anthropic": {
+ "default_headers": {"X-Test": "vertex"},
+ "vertex_ai": {
+ "project_id": "proj",
+ "location": "global",
+ "base_url": "https://vertex.example",
+ },
+ }
+ }
+ )
+ llm = _build_llm(config, via="vertex")
+
+ called: dict[str, object] = {}
+
+ class FakeVertexClient:
+ def __init__(self, **kwargs) -> None:
+ called.update(kwargs)
+
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.llm_anthropic.AsyncAnthropicVertex",
+ FakeVertexClient,
+ )
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.llm_anthropic.detect_google_adc",
+ lambda: GoogleAdcStatus(available=True, project_id="proj", credentials=object()),
+ )
+
+ client = llm._initialize_anthropic_client()
+
+ assert isinstance(client, FakeVertexClient)
+ assert called["project_id"] == "proj"
+ assert called["region"] == "global"
+ assert called["base_url"] == "https://vertex.example"
+ assert called["default_headers"] == {"X-Test": "vertex"}
+ assert "api_key" not in called
+
+
+def test_initialize_anthropic_client_uses_direct_sdk(monkeypatch) -> None:
+ config = Settings.model_validate(
+ {
+ "anthropic": {
+ "api_key": "sk-ant",
+ "base_url": "https://api.anthropic.example/v1",
+ "default_headers": {"X-Test": "direct"},
+ }
+ }
+ )
+ llm = _build_llm(config)
+
+ called: dict[str, object] = {}
+
+ class FakeClient:
+ def __init__(self, **kwargs) -> None:
+ called.update(kwargs)
+
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.llm_anthropic.AsyncAnthropic",
+ FakeClient,
+ )
+
+ client = llm._initialize_anthropic_client()
+
+ assert isinstance(client, FakeClient)
+ assert called["api_key"] == "sk-ant"
+ assert called["base_url"] == "https://api.anthropic.example"
+ assert called["default_headers"] == {"X-Test": "direct"}
+
+
+def test_vertex_client_requires_google_adc(monkeypatch) -> None:
+ config = Settings.model_validate(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+ llm = _build_llm(config, via="vertex")
+
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.llm_anthropic.detect_google_adc",
+ lambda: GoogleAdcStatus(available=False, error=RuntimeError("missing")),
+ )
+
+ with pytest.raises(ProviderKeyError, match="Google ADC not found"):
+ llm._initialize_anthropic_client()
diff --git a/tests/unit/fast_agent/llm/providers/test_llm_google_vertex.py b/tests/unit/fast_agent/llm/providers/test_llm_google_vertex.py
index 3f8da1a7d..9a9229323 100644
--- a/tests/unit/fast_agent/llm/providers/test_llm_google_vertex.py
+++ b/tests/unit/fast_agent/llm/providers/test_llm_google_vertex.py
@@ -66,6 +66,50 @@ def test_vertex_cfg_accepts_dict_and_provider_key_manager_allows_adc() -> None:
assert ProviderKeyManager.get_api_key("google", config) == ""
+def test_vertex_partner_model_names_are_not_rewritten_to_google_publisher() -> None:
+ """Vertex partner models should keep the provider-native model id."""
+ config = Settings.model_validate(
+ {
+ "google": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ llm = _build_llm(config)
+
+ assert llm._resolve_model_name("claude-sonnet-4-6") == "claude-sonnet-4-6"
+ assert (
+ llm._resolve_model_name("publishers/anthropic/models/claude-sonnet-4-6")
+ == "publishers/anthropic/models/claude-sonnet-4-6"
+ )
+
+
+def test_vertex_first_party_non_gemini_models_are_rewritten_to_google_publisher() -> None:
+ config = Settings.model_validate(
+ {
+ "google": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ llm = _build_llm(config)
+
+ assert (
+ llm._resolve_model_name("text-embedding-005")
+ == "projects/proj/locations/global/publishers/google/models/text-embedding-005"
+ )
+
+
def test_initialize_google_client_prefers_vertex_with_dict_config(monkeypatch) -> None:
"""Ensure dict-based vertex config builds a Vertex client (ADC, no API key)."""
config = Settings.model_validate(
diff --git a/tests/unit/fast_agent/llm/test_model_factory.py b/tests/unit/fast_agent/llm/test_model_factory.py
index 0154bf389..22e90fc35 100644
--- a/tests/unit/fast_agent/llm/test_model_factory.py
+++ b/tests/unit/fast_agent/llm/test_model_factory.py
@@ -124,6 +124,27 @@ def test_model_query_structured_tool_use():
assert config.structured_output_mode == "tool_use"
+def test_model_query_via_vertex():
+ config = ModelFactory.parse_model_string("claude-sonnet-4-6?via=vertex")
+
+ assert config.provider == Provider.ANTHROPIC
+ assert config.model_name == "claude-sonnet-4-6"
+ assert config.via == "vertex"
+
+
+def test_model_query_source_alias_maps_to_via():
+ config = ModelFactory.parse_model_string("sonnet?source=vertex")
+
+ assert config.provider == Provider.ANTHROPIC
+ assert config.model_name == "claude-sonnet-4-6"
+ assert config.via == "vertex"
+
+
+def test_model_query_via_rejected_for_non_anthropic_model():
+ with pytest.raises(ModelConfigError, match="only supported for Anthropic"):
+ ModelFactory.parse_model_string("openai.gpt-4.1?via=vertex")
+
+
def test_model_query_text_verbosity():
config = ModelFactory.parse_model_string("gpt-5?verbosity=med&reasoning=high")
assert config.provider == Provider.RESPONSES
diff --git a/tests/unit/fast_agent/llm/test_model_selection_catalog.py b/tests/unit/fast_agent/llm/test_model_selection_catalog.py
index 9bd7be272..86ac7e2a3 100644
--- a/tests/unit/fast_agent/llm/test_model_selection_catalog.py
+++ b/tests/unit/fast_agent/llm/test_model_selection_catalog.py
@@ -8,6 +8,7 @@
from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.model_overlays import load_model_overlay_registry
from fast_agent.llm.model_selection import ModelSelectionCatalog
+from fast_agent.llm.provider.anthropic.vertex_config import GoogleAdcStatus
from fast_agent.llm.provider_types import Provider
if TYPE_CHECKING:
@@ -132,6 +133,29 @@ def test_configured_providers_reads_config_keys() -> None:
assert Provider.RESPONSES in providers
+def test_configured_providers_does_not_treat_anthropic_vertex_as_base_provider(
+ monkeypatch,
+) -> None:
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
+ lambda: GoogleAdcStatus(available=True, project_id="proj", credentials=object()),
+ )
+
+ providers = ModelSelectionCatalog.configured_providers(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ assert Provider.ANTHROPIC not in providers
+
+
def test_configured_providers_reads_environment_keys() -> None:
original = os.environ.get("OPENAI_API_KEY")
diff --git a/tests/unit/fast_agent/ui/test_agent_completer.py b/tests/unit/fast_agent/ui/test_agent_completer.py
index 3aa7ee805..5a84b66ec 100644
--- a/tests/unit/fast_agent/ui/test_agent_completer.py
+++ b/tests/unit/fast_agent/ui/test_agent_completer.py
@@ -1435,6 +1435,7 @@ def test_resource_mention_server_completion_filters_connected_resource_servers()
names = [c.text for c in completions]
assert "demo:" in names
+ assert "file:" in names
assert "offline:" not in names
assert "nores:" not in names
@@ -1454,6 +1455,42 @@ def test_resource_mention_resource_and_template_completion() -> None:
assert "repo://items/{id}{" in names
+def test_resource_mention_local_file_completion_encodes_spaces() -> None:
+ with tempfile.TemporaryDirectory() as tmpdir:
+ base = Path(tmpdir)
+ (base / "two words.txt").write_text("hi", encoding="utf-8")
+
+ completer = AgentCompleter(agents=["agent1"])
+ original_cwd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ doc = Document("^file:./two", cursor_position=len("^file:./two"))
+ completions = list(completer.get_completions(doc, None))
+ finally:
+ os.chdir(original_cwd)
+
+ assert any(completion.text == "./two%20words.txt" for completion in completions)
+
+
+def test_attach_command_completion_offers_clear_and_paths() -> None:
+ with tempfile.TemporaryDirectory() as tmpdir:
+ base = Path(tmpdir)
+ (base / "report.pdf").write_bytes(b"%PDF-1.4")
+ (base / "two words.pdf").write_bytes(b"%PDF-1.4")
+
+ completer = AgentCompleter(agents=["agent1"])
+ original_cwd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ doc = Document("/attach t", cursor_position=len("/attach t"))
+ completions = list(completer.get_completions(doc, None))
+ finally:
+ os.chdir(original_cwd)
+
+ names = [completion.text for completion in completions]
+ assert "'two words.pdf'" in names
+
+
def test_resource_mention_argument_value_completion() -> None:
completer = AgentCompleter(
agents=["agent1"],
diff --git a/tests/unit/fast_agent/ui/test_attachment_indicator.py b/tests/unit/fast_agent/ui/test_attachment_indicator.py
new file mode 100644
index 000000000..9521519e9
--- /dev/null
+++ b/tests/unit/fast_agent/ui/test_attachment_indicator.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+from fast_agent.ui.attachment_indicator import (
+ ATTACHMENT_GLYPH,
+ ATTACHMENT_IDLE_COLOR,
+ ATTACHMENT_QUESTIONABLE_COLOR,
+ ATTACHMENT_SUPPORTED_COLOR,
+ DraftAttachmentSummary,
+ render_attachment_indicator,
+ summarize_draft_attachments,
+)
+
+
+def test_summarize_draft_attachments_marks_supported_local_file(tmp_path) -> None:
+ image = tmp_path / "image.png"
+ image.write_bytes(b"\x89PNG\r\n\x1a\n")
+
+ summary = summarize_draft_attachments(
+ f"describe ^file:{image}",
+ model_name="gpt-4.1",
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is False
+ assert summary.mime_types == ("image/png",)
+
+
+def test_summarize_draft_attachments_marks_missing_file_questionable() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^file:/tmp/does-not-exist.png",
+ model_name="gpt-4.1",
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is True
+
+
+def test_render_attachment_indicator_uses_red_count_for_questionable_summary() -> None:
+ indicator = render_attachment_indicator(
+ DraftAttachmentSummary(count=2, mime_types=("image/png",), any_questionable=True)
+ )
+
+ assert indicator == f""
+
+
+def test_render_attachment_indicator_formats_supported_indicator() -> None:
+ indicator = render_attachment_indicator(
+ DraftAttachmentSummary(count=1, mime_types=("image/png",), any_questionable=False)
+ )
+
+ assert indicator == f""
+
+
+def test_render_attachment_indicator_formats_idle_indicator() -> None:
+ indicator = render_attachment_indicator(None)
+
+ assert indicator == f""
diff --git a/tests/unit/fast_agent/ui/test_attachment_tokens.py b/tests/unit/fast_agent/ui/test_attachment_tokens.py
new file mode 100644
index 000000000..9f939916a
--- /dev/null
+++ b/tests/unit/fast_agent/ui/test_attachment_tokens.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from fast_agent.ui.prompt.attachment_tokens import strip_local_attachment_tokens
+
+
+def test_strip_local_attachment_tokens_preserves_multiline_whitespace() -> None:
+ text = "line one\n code block\n^file:/tmp/a.png\nline two"
+
+ stripped = strip_local_attachment_tokens(text)
+
+ assert stripped == "line one\n code block\nline two"
+
+
+def test_strip_local_attachment_tokens_collapses_only_attachment_gap_between_words() -> None:
+ text = "compare ^file:/tmp/a.png with this"
+
+ stripped = strip_local_attachment_tokens(text)
+
+ assert stripped == "compare with this"
diff --git a/tests/unit/fast_agent/ui/test_command_intent_contract.py b/tests/unit/fast_agent/ui/test_command_intent_contract.py
index 38b175628..c9193b23f 100644
--- a/tests/unit/fast_agent/ui/test_command_intent_contract.py
+++ b/tests/unit/fast_agent/ui/test_command_intent_contract.py
@@ -3,6 +3,7 @@
import pytest
from fast_agent.ui.command_payloads import (
+ AttachCommand,
CommandPayload,
HashAgentCommand,
HistoryShowCommand,
@@ -21,6 +22,21 @@
@pytest.mark.parametrize(
("raw_input", "expected"),
[
+ pytest.param(
+ "/attach",
+ AttachCommand(paths=(), clear=False, error=None),
+ id="attach-open-prompt",
+ ),
+ pytest.param(
+ '/attach "./report one.pdf" ../two.png',
+ AttachCommand(paths=("./report one.pdf", "../two.png"), clear=False, error=None),
+ id="attach-paths",
+ ),
+ pytest.param(
+ "/attach clear",
+ AttachCommand(paths=(), clear=True, error=None),
+ id="attach-clear",
+ ),
pytest.param(
"/history analyst",
ShowHistoryCommand(agent="analyst"),
diff --git a/tests/unit/fast_agent/ui/test_input_toolbar.py b/tests/unit/fast_agent/ui/test_input_toolbar.py
index 8ca64502f..11adda319 100644
--- a/tests/unit/fast_agent/ui/test_input_toolbar.py
+++ b/tests/unit/fast_agent/ui/test_input_toolbar.py
@@ -1,3 +1,4 @@
+from fast_agent.ui.attachment_indicator import DraftAttachmentSummary
from fast_agent.ui.prompt.input_toolbar import ToolbarAgentState, _build_middle_segment
@@ -27,3 +28,27 @@ def test_build_middle_segment_prefixes_codex_before_overlay() -> None:
assert "∞gpt-5-codex" in middle
assert "▼gpt-5-codex" not in middle
+
+
+def test_build_middle_segment_renders_attachment_indicator() -> None:
+ middle = _build_middle_segment(
+ ToolbarAgentState(
+ model_display="gpt-4.1",
+ model_name="gpt-4.1",
+ model_gauges="RG",
+ tdv_segment="TVD",
+ service_tier_indicator="FAST",
+ web_search_indicator="WEB",
+ turn_count=3,
+ ),
+ shortcut_text="",
+ attachment_summary=DraftAttachmentSummary(
+ count=2,
+ mime_types=("image/png",),
+ any_questionable=False,
+ ),
+ )
+
+ assert "▲2" in middle
+ assert middle.index("TVD") < middle.index("▲2") < middle.index("RG") < middle.index("gpt-4.1")
+ assert middle.index("gpt-4.1") < middle.index("FAST") < middle.index("WEB")
diff --git a/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py b/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
index 52b175bca..c5761ea6e 100644
--- a/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
+++ b/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import base64
from typing import Any, cast
import pytest
@@ -29,6 +30,11 @@ async def get_resource(self, resource_uri: str, namespace: str | None = None):
)
+class _LocalMentionAgent:
+ def __init__(self) -> None:
+ self.message_history = []
+
+
class _MentionAgentApp:
def __init__(self) -> None:
self._agent_obj = _MentionAgent()
@@ -87,3 +93,63 @@ async def fake_send(payload, _agent_name: str) -> str:
assert isinstance(payload, PromptMessageExtended)
assert any(isinstance(item, EmbeddedResource) for item in payload.content)
assert payload.first_text() == "Summarize"
+
+
+@pytest.mark.asyncio
+async def test_prompt_loop_materializes_local_file_mentions(
+ monkeypatch,
+ tmp_path,
+) -> None:
+ image_path = tmp_path / "pixel.png"
+ image_path.write_bytes(
+ base64.b64decode(
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAusB9s2nRwAAAABJRU5ErkJggg=="
+ )
+ )
+ inputs = iter([f"Compare ^file:{image_path}", "STOP"])
+
+ async def fake_get_enhanced_input(*_args: Any, **_kwargs: Any) -> str:
+ return next(inputs)
+
+ monkeypatch.setattr(interactive_prompt, "get_enhanced_input", fake_get_enhanced_input)
+
+ sent_payloads: list[str | PromptMessageExtended] = []
+
+ async def fake_send(payload, _agent_name: str) -> str:
+ sent_payloads.append(payload)
+ return "ok"
+
+ prompt_ui = InteractivePrompt()
+ app = _MentionAgentApp()
+ app._agent_obj = _LocalMentionAgent()
+
+ await prompt_ui.prompt_loop(
+ send_func=fake_send,
+ default_agent="agent1",
+ available_agents=["agent1"],
+ prompt_provider=cast("Any", app),
+ )
+
+ assert len(sent_payloads) == 1
+ payload = sent_payloads[0]
+ assert isinstance(payload, PromptMessageExtended)
+ assert payload.first_text() == "Compare"
+ assert any(getattr(item, "type", None) == "image" for item in payload.content)
+
+
+@pytest.mark.asyncio
+async def test_resolve_prompt_payload_falls_back_to_plain_text_on_resolution_error(
+ tmp_path,
+) -> None:
+ missing = tmp_path / "missing.png"
+ prompt_ui = InteractivePrompt()
+ app = _MentionAgentApp()
+ app._agent_obj = _LocalMentionAgent()
+
+ payload = await prompt_ui._resolve_prompt_payload(
+ prompt_provider=cast("Any", app),
+ agent_name="agent1",
+ user_input=f"can you see ^file:{missing}",
+ )
+
+ assert payload == f"can you see ^file:{missing}"
diff --git a/tests/unit/fast_agent/ui/test_message_display_helpers.py b/tests/unit/fast_agent/ui/test_message_display_helpers.py
index cbe3640c9..765618615 100644
--- a/tests/unit/fast_agent/ui/test_message_display_helpers.py
+++ b/tests/unit/fast_agent/ui/test_message_display_helpers.py
@@ -1,9 +1,11 @@
-from mcp.types import CallToolRequest, CallToolRequestParams
+from mcp.types import CallToolRequest, CallToolRequestParams, ImageContent
from fast_agent.types import PromptMessageExtended
from fast_agent.types.llm_stop_reason import LlmStopReason
from fast_agent.ui.message_display_helpers import (
build_tool_use_additional_message,
+ build_user_message_display,
+ extract_user_attachments,
resolve_highlight_index,
tool_use_requests_file_read_access,
tool_use_requests_shell_access,
@@ -100,3 +102,37 @@ def test_resolve_highlight_index_handles_empty_candidate_list() -> None:
def test_resolve_highlight_index_returns_none_without_items() -> None:
assert resolve_highlight_index(None, "shell") is None
+
+
+def test_extract_user_attachments_includes_local_image_source_uri() -> None:
+ image = ImageContent(
+ type="image",
+ data="ZmFrZQ==",
+ mimeType="image/png",
+ )
+ image.meta = {"fast_agent_source_uri": "file:///tmp/photo.png"}
+ message = PromptMessageExtended(
+ role="user",
+ content=[image],
+ )
+
+ assert extract_user_attachments(message) == ["image (file:///tmp/photo.png)"]
+
+
+def test_build_user_message_display_prefers_original_text_metadata() -> None:
+ image = ImageContent(type="image", data="ZmFrZQ==", mimeType="image/png")
+ image.meta = {"fast_agent_source_uri": "file:///tmp/photo.png"}
+ text = PromptMessageExtended.model_validate(
+ {
+ "role": "user",
+ "content": [{"type": "text", "text": "can you see"}],
+ }
+ )
+ text.content[0].meta = {"fast_agent_original_text": "can you see ^file:/tmp/photo.png"}
+
+ message = PromptMessageExtended(role="user", content=[text.content[0], image])
+
+ message_text, attachments = build_user_message_display([message])
+
+ assert message_text == "can you see ^file:/tmp/photo.png"
+ assert attachments == ["image (file:///tmp/photo.png)"]
diff --git a/tests/unit/fast_agent/ui/test_model_chip_display.py b/tests/unit/fast_agent/ui/test_model_chip_display.py
index e60422fd0..615dd28ba 100644
--- a/tests/unit/fast_agent/ui/test_model_chip_display.py
+++ b/tests/unit/fast_agent/ui/test_model_chip_display.py
@@ -3,6 +3,7 @@
WEB_INDICATOR = ""
WEB_FETCH_INDICATOR = ""
SERVICE_TIER_INDICATOR = ""
+ATTACHMENT_INDICATOR = ""
def test_render_model_chip_places_indicators_after_model_label() -> None:
@@ -11,11 +12,12 @@ def test_render_model_chip_places_indicators_after_model_label() -> None:
web_search_indicator=WEB_INDICATOR,
service_tier_indicator=SERVICE_TIER_INDICATOR,
web_fetch_indicator=WEB_FETCH_INDICATOR,
+ attachment_indicator=ATTACHMENT_INDICATOR,
)
assert chip == (
f""
- f"{SERVICE_TIER_INDICATOR}{WEB_INDICATOR}{WEB_FETCH_INDICATOR}"
+ f"{SERVICE_TIER_INDICATOR}{WEB_INDICATOR}{WEB_FETCH_INDICATOR}{ATTACHMENT_INDICATOR}"
)
diff --git a/tests/unit/fast_agent/ui/test_model_display.py b/tests/unit/fast_agent/ui/test_model_display.py
index da2bba613..677e5b8ea 100644
--- a/tests/unit/fast_agent/ui/test_model_display.py
+++ b/tests/unit/fast_agent/ui/test_model_display.py
@@ -50,6 +50,24 @@ def test_resolve_llm_display_name_uses_wire_model_name_for_anthropic_presets() -
assert resolve_llm_display_name(_StubLLM(resolved_model)) == "claude-sonnet-4-6"
+def test_resolve_llm_display_name_marks_anthropic_vertex_route() -> None:
+ resolved_model = ResolvedModelSpec(
+ raw_input="sonnet?via=vertex",
+ selected_model_name="sonnet?via=vertex",
+ source="preset",
+ model_config=ModelConfig(
+ provider=Provider.ANTHROPIC,
+ model_name="claude-sonnet-4-6",
+ via="vertex",
+ ),
+ provider=Provider.ANTHROPIC,
+ wire_model_name="claude-sonnet-4-6",
+ )
+
+ assert resolved_model.display_name == "claude-sonnet-4-6 · Vertex"
+ assert resolve_llm_display_name(_StubLLM(resolved_model)) == "claude-sonnet-4-6 · Vertex"
+
+
def test_resolve_llm_display_name_uses_wire_model_name_for_provider_routed_presets() -> None:
resolved_model = ResolvedModelSpec(
raw_input="glm",
@@ -90,6 +108,10 @@ def test_resolve_model_display_name_formats_raw_model_strings() -> None:
== "Kimi-K2-Instruct-0905"
)
assert resolve_model_display_name("zai-org/GLM-5:novita") == "GLM-5"
+ assert (
+ resolve_model_display_name("claude-sonnet-4-6?via=vertex")
+ == "claude-sonnet-4-6 · Vertex"
+ )
def test_resolve_llm_display_name_uses_overlay_name() -> None:
diff --git a/tests/unit/fast_agent/ui/test_model_picker.py b/tests/unit/fast_agent/ui/test_model_picker.py
index 67c19b7f4..c59e9d0b8 100644
--- a/tests/unit/fast_agent/ui/test_model_picker.py
+++ b/tests/unit/fast_agent/ui/test_model_picker.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import types
from typing import Any, cast
from prompt_toolkit.data_structures import Point
@@ -11,10 +12,12 @@
from fast_agent.llm.provider_types import Provider
from fast_agent.ui.model_picker import _find_initial_model_index, _SplitListPicker
from fast_agent.ui.model_picker_common import (
+ ANTHROPIC_VERTEX_PROVIDER_KEY,
GENERIC_CUSTOM_MODEL_SENTINEL,
ModelOption,
ModelPickerSnapshot,
ProviderOption,
+ build_snapshot,
model_options_for_provider,
provider_activation_action,
)
@@ -289,3 +292,69 @@ def __init__(self, app: _FakeApp) -> None:
assert app.result is not None
assert app.result.selected_model == "haikutiny"
assert app.result.resolved_model == "haikutiny"
+
+
+def test_snapshot_adds_anthropic_vertex_group_when_ready(monkeypatch) -> None:
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
+ lambda: types.SimpleNamespace(
+ available=True,
+ project_id="proj",
+ credentials=object(),
+ ),
+ )
+
+ snapshot = build_snapshot(
+ config_payload={
+ "anthropic": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ option = next(
+ provider
+ for provider in snapshot.providers
+ if provider.option_key == ANTHROPIC_VERTEX_PROVIDER_KEY
+ )
+
+ assert option.active is True
+ assert option.option_display_name == "Anthropic (Vertex)"
+ assert all("?via=vertex" in entry.model for entry in option.curated_entries)
+
+
+def test_snapshot_disables_anthropic_vertex_group_when_adc_missing(monkeypatch) -> None:
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
+ lambda: types.SimpleNamespace(
+ available=False,
+ project_id=None,
+ error=RuntimeError("missing"),
+ credentials=None,
+ ),
+ )
+
+ snapshot = build_snapshot(
+ config_payload={
+ "anthropic": {
+ "vertex_ai": {
+ "enabled": True,
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+
+ option = next(
+ provider
+ for provider in snapshot.providers
+ if provider.option_key == ANTHROPIC_VERTEX_PROVIDER_KEY
+ )
+
+ assert option.active is False
+ assert option.disabled_reason == "Google ADC not found"
diff --git a/tests/unit/fast_agent/ui/test_resource_mentions.py b/tests/unit/fast_agent/ui/test_resource_mentions.py
index cbd226abe..aa1b71c1f 100644
--- a/tests/unit/fast_agent/ui/test_resource_mentions.py
+++ b/tests/unit/fast_agent/ui/test_resource_mentions.py
@@ -1,7 +1,9 @@
from __future__ import annotations
+import base64
+
import pytest
-from mcp.types import ReadResourceResult, TextResourceContents
+from mcp.types import EmbeddedResource, ImageContent, ReadResourceResult, TextResourceContents
from pydantic import AnyUrl
from fast_agent.ui.prompt.resource_mentions import (
@@ -75,6 +77,21 @@ def test_parse_mentions_records_template_warning_on_missing_args() -> None:
assert parsed.warnings
+def test_parse_mentions_normalizes_local_file_paths(
+ tmp_path,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
+ report = tmp_path / "report.pdf"
+ report.write_bytes(b"%PDF-1.4")
+ monkeypatch.chdir(tmp_path)
+
+ parsed = parse_mentions("Summarize ^file:./report.pdf")
+
+ assert len(parsed.mentions) == 1
+ assert parsed.mentions[0].server_name == "file"
+ assert parsed.mentions[0].resource_uri == str(report.resolve())
+
+
@pytest.mark.asyncio
async def test_resolve_mentions_builds_embedded_resources() -> None:
parsed = parse_mentions("Read ^demo:file:///tmp/notes.txt")
@@ -86,6 +103,35 @@ async def test_resolve_mentions_builds_embedded_resources() -> None:
assert len(prompt.content) == 2
+@pytest.mark.asyncio
+async def test_resolve_mentions_builds_local_file_resource_without_agent_support(tmp_path) -> None:
+ notes = tmp_path / "notes.txt"
+ notes.write_text("hello", encoding="utf-8")
+ parsed = parse_mentions(f"Read ^file:{notes}")
+
+ resolved = await resolve_mentions(object(), parsed)
+ prompt = build_prompt_with_resources(parsed.text, resolved)
+
+ assert any(isinstance(item, EmbeddedResource) for item in prompt.content)
+
+
+@pytest.mark.asyncio
+async def test_resolve_mentions_builds_local_image_content(tmp_path) -> None:
+ image_path = tmp_path / "pixel.png"
+ image_path.write_bytes(
+ base64.b64decode(
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAusB9s2nRwAAAABJRU5ErkJggg=="
+ )
+ )
+ parsed = parse_mentions(f"^file:{image_path}")
+
+ resolved = await resolve_mentions(object(), parsed)
+ prompt = build_prompt_with_resources(parsed.text, resolved)
+
+ assert len(prompt.content) == 2
+ assert isinstance(prompt.content[1], ImageContent)
+
+
@pytest.mark.asyncio
async def test_resolve_mentions_raises_on_resource_errors() -> None:
class _FailingAgent:
diff --git a/uv.lock b/uv.lock
index 684e4a7b1..a3b6324db 100644
--- a/uv.lock
+++ b/uv.lock
@@ -158,7 +158,7 @@ wheels = [
[[package]]
name = "anthropic"
-version = "0.84.0"
+version = "0.86.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -170,9 +170,14 @@ dependencies = [
{ name = "sniffio" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/04/ea/0869d6df9ef83dcf393aeefc12dd81677d091c6ffc86f783e51cf44062f2/anthropic-0.84.0.tar.gz", hash = "sha256:72f5f90e5aebe62dca316cb013629cfa24996b0f5a4593b8c3d712bc03c43c37", size = 539457, upload-time = "2026-02-25T05:22:38.54Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/37/7a/8b390dc47945d3169875d342847431e5f7d5fa716b2e37494d57cfc1db10/anthropic-0.86.0.tar.gz", hash = "sha256:60023a7e879aa4fbb1fed99d487fe407b2ebf6569603e5047cfe304cebdaa0e5", size = 583820, upload-time = "2026-03-18T18:43:08.017Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/64/ca/218fa25002a332c0aa149ba18ffc0543175998b1f65de63f6d106689a345/anthropic-0.84.0-py3-none-any.whl", hash = "sha256:861c4c50f91ca45f942e091d83b60530ad6d4f98733bfe648065364da05d29e7", size = 455156, upload-time = "2026-02-25T05:22:40.468Z" },
+ { url = "https://files.pythonhosted.org/packages/63/5f/67db29c6e5d16c8c9c4652d3efb934d89cb750cad201539141781d8eae14/anthropic-0.86.0-py3-none-any.whl", hash = "sha256:9d2bbd339446acce98858c5627d33056efe01f70435b22b63546fe7edae0cd57", size = 469400, upload-time = "2026-03-18T18:43:06.526Z" },
+]
+
+[package.optional-dependencies]
+vertex = [
+ { name = "google-auth", extra = ["requests"] },
]
[[package]]
@@ -714,7 +719,7 @@ dependencies = [
{ name = "a2a-sdk" },
{ name = "agent-client-protocol" },
{ name = "aiohttp" },
- { name = "anthropic" },
+ { name = "anthropic", extra = ["vertex"] },
{ name = "deprecated" },
{ name = "email-validator" },
{ name = "fastapi" },
@@ -785,7 +790,7 @@ requires-dist = [
{ name = "a2a-sdk", specifier = ">=0.3.16" },
{ name = "agent-client-protocol", specifier = ">=0.8.1" },
{ name = "aiohttp", specifier = ">=3.13.2" },
- { name = "anthropic", specifier = ">=0.84.0" },
+ { name = "anthropic", extras = ["vertex"], specifier = ">=0.86.0" },
{ name = "azure-identity", marker = "extra == 'all-providers'", specifier = ">=1.14.0" },
{ name = "azure-identity", marker = "extra == 'azure'", specifier = ">=1.14.0" },
{ name = "boto3", marker = "extra == 'all-providers'", specifier = ">=1.35.0" },
@@ -799,7 +804,7 @@ requires-dist = [
{ name = "mcp", specifier = "==1.26.0" },
{ name = "mslex", specifier = ">=1.3.0" },
{ name = "multilspy", specifier = ">=0.0.15" },
- { name = "openai", extras = ["aiohttp"], specifier = ">=2.28.0" },
+ { name = "openai", extras = ["aiohttp"], specifier = ">=2.29.0" },
{ name = "opentelemetry-distro", specifier = "==0.60b1" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = "==1.39.1" },
{ name = "opentelemetry-instrumentation-anthropic", marker = "python_full_version >= '3.10' and python_full_version < '4'", specifier = "==0.52.1" },
@@ -1702,7 +1707,7 @@ wheels = [
[[package]]
name = "openai"
-version = "2.28.0"
+version = "2.29.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -1714,9 +1719,9 @@ dependencies = [
{ name = "tqdm" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/56/87/eb0abb4ef88ddb95b3c13149384c4c288f584f3be17d6a4f63f8c3e3c226/openai-2.28.0.tar.gz", hash = "sha256:bb7fdff384d2a787fa82e8822d1dd3c02e8cf901d60f1df523b7da03cbb6d48d", size = 670334, upload-time = "2026-03-13T19:56:27.306Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/b4/15/203d537e58986b5673e7f232453a2a2f110f22757b15921cbdeea392e520/openai-2.29.0.tar.gz", hash = "sha256:32d09eb2f661b38d3edd7d7e1a2943d1633f572596febe64c0cd370c86d52bec", size = 671128, upload-time = "2026-03-17T17:53:49.599Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/c0/5a/df122348638885526e53140e9c6b0d844af7312682b3bde9587eebc28b47/openai-2.28.0-py3-none-any.whl", hash = "sha256:79aa5c45dba7fef84085701c235cf13ba88485e1ef4f8dfcedc44fc2a698fc1d", size = 1141218, upload-time = "2026-03-13T19:56:25.46Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/b1/35b6f9c8cf9318e3dbb7146cc82dab4cf61182a8d5406fc9b50864362895/openai-2.29.0-py3-none-any.whl", hash = "sha256:b7c5de513c3286d17c5e29b92c4c98ceaf0d775244ac8159aeb1bddf840eb42a", size = 1141533, upload-time = "2026-03-17T17:53:47.348Z" },
]
[package.optional-dependencies]
From fc4af5f62a4a2bb1dc5bdd8850e8b912f6745791 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Tue, 24 Mar 2026 23:23:06 +0000
Subject: [PATCH 4/9] update test for attachment indicator
---
tests/unit/fast_agent/ui/test_attachment_indicator.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/tests/unit/fast_agent/ui/test_attachment_indicator.py b/tests/unit/fast_agent/ui/test_attachment_indicator.py
index 9521519e9..912e1efb7 100644
--- a/tests/unit/fast_agent/ui/test_attachment_indicator.py
+++ b/tests/unit/fast_agent/ui/test_attachment_indicator.py
@@ -42,7 +42,9 @@ def test_render_attachment_indicator_uses_red_count_for_questionable_summary() -
DraftAttachmentSummary(count=2, mime_types=("image/png",), any_questionable=True)
)
- assert indicator == f""
+ assert indicator == (
+ f""
+ )
def test_render_attachment_indicator_formats_supported_indicator() -> None:
@@ -50,10 +52,12 @@ def test_render_attachment_indicator_formats_supported_indicator() -> None:
DraftAttachmentSummary(count=1, mime_types=("image/png",), any_questionable=False)
)
- assert indicator == f""
+ assert indicator == (
+ f""
+ )
def test_render_attachment_indicator_formats_idle_indicator() -> None:
indicator = render_attachment_indicator(None)
- assert indicator == f""
+ assert indicator == f""
From 63cf31dda47cd2f671bfd9fad24a07189122f4cd Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Wed, 25 Mar 2026 19:49:13 +0000
Subject: [PATCH 5/9] improve README
---
README.md | 81 ++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 68 insertions(+), 13 deletions(-)
diff --git a/README.md b/README.md
index 54d224f47..0828854a6 100644
--- a/README.md
+++ b/README.md
@@ -7,10 +7,65 @@
-## Overview
+## Start Here
> [!TIP]
-> Please see : https://fast-agent.ai for latest documentation. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
+> Please see https://fast-agent.ai for latest documentation.
+
+**`fast-agent`** is a flexible way to interact with LLMs, excellent for use as a Coding Agent, Development Toolkit, Evaluation or Workflow platform.
+
+To start an interactive session with shell support, install [uv](https://astral.sh/uv) and run
+
+```bash
+uvx fast-agent-mcp@latest -x
+```
+
+To start coding with Hugging Face inference providers or use your OpenAI Codex plan:
+
+```bash
+# Code with Hugging Face Inference Providers
+uvx fast-agent-mcp@latest --pack hf-dev
+
+# Code with Codex (agents optimized for OpenAI)
+uvx fast-agent-mcp@latest --pack codex
+```
+
+Enter a shell with `!`, or run shell commands e.g. `! cd web && npm run build`.
+
+Manage skills with the `/skills` command, and connect to MCP Servers with `/connect`. The default **`fast-agent`** registry contains skills to let you set up LSP, Agent and Tool Hooks, Compaction strategies, Automation and more.
+
+```bash
+# /connect supports stdio or streamable http (with OAuth)
+
+# Start a STDIO server
+/connect @modelcontextprotocol/server-everything
+
+# Connect to a Streamable HTTP Server
+/connect https://huggingface.co/mcp
+```
+
+It's recommended to install **`fast-agent`** to set up the shell aliases and other tooling.
+
+```bash
+# Install fast-agent
+uv tool install -U fast-agent-mcp
+
+# Run fast-agent with opus, shell support and subagent/smart mode
+fast-agent --model opus -x --smart
+```
+
+Use local models with the generic provider, or automatically create the correct configuration for `llama.cpp`:
+
+```bash
+fast-agent model llamacpp
+```
+
+Any **`fast-agent`** setup or program can be used with any ACP client - the simplest way is to use `fast-agent-acp`:
+
+```bash
+# Run fast-agent inside Toad
+toad acp "fast-agent-acp -x --model sonnet"
+```
**`fast-agent`** enables you to create and interact with sophisticated multimodal Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling and Elicitations.
@@ -23,32 +78,32 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
Model support is comprehensive with native support for Anthropic, OpenAI and Google providers as well as Azure, Ollama, Deepseek and dozens of others via TensorZero. Structured Outputs, PDF and Vision support is simple to use and well tested. Passthrough and Playback LLMs enable rapid development and test of Python glue-code for your applications.
Recent features include:
- - Agent Skills (SKILL.md)
- - MCP-UI Support |
- - OpenAI Apps SDK (Skybridge)
- - Shell Mode
- - Advanced MCP Transport Diagnsotics
- - MCP Elicitations
-
+- Agent Skills (SKILL.md)
+- MCP-UI Support |
+- OpenAI Apps SDK (Skybridge)
+- Shell Mode
+- Advanced MCP Transport Diagnsotics
+- MCP Elicitations
+
`fast-agent` is the only tool that allows you to inspect Streamable HTTP Transport usage - a critical feature for ensuring reliable, compliant deployments. OAuth is supported with KeyRing storage for secrets. Use the `fast-agent auth` command to manage.
-
-
-
-
> [!IMPORTANT]
>
> Documentation is included as a submodule. When cloning, use `--recurse-submodules` to get everything:
+>
> ```bash
> git clone --recurse-submodules https://github.com/evalstate/fast-agent.git
> ```
+>
> Or if you've already cloned:
+>
> ```bash
> git submodule update --init --recursive
> ```
+>
> The documentation source is also available at: https://github.com/evalstate/fast-agent-docs
### Agent Application Development
From 373f6af393a4366278539ad846d87900b48ad9f1 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Fri, 27 Mar 2026 00:04:26 +0000
Subject: [PATCH 6/9] vertex provider
---
src/fast_agent/acp/server/agent_acp_server.py | 5 +-
src/fast_agent/agents/llm_agent.py | 6 +-
src/fast_agent/agents/llm_decorator.py | 24 +-
src/fast_agent/cli/commands/check_config.py | 25 +-
src/fast_agent/cli/runtime/agent_setup.py | 8 +-
src/fast_agent/commands/handlers/model.py | 4 +-
.../commands/handlers/models_manager.py | 18 +-
src/fast_agent/llm/model_database.py | 194 ++++++++++++---
src/fast_agent/llm/model_display_name.py | 24 +-
src/fast_agent/llm/model_factory.py | 61 +++--
src/fast_agent/llm/model_info.py | 102 ++++++--
src/fast_agent/llm/model_overlays.py | 6 +-
src/fast_agent/llm/model_selection.py | 24 ++
.../anthropic/README_anth_multipart.md | 5 +-
.../llm/provider/anthropic/llm_anthropic.py | 227 ++++++++++++------
.../anthropic/llm_anthropic_vertex.py | 85 +++++++
.../multipart_converter_anthropic.py | 71 ++++++
.../llm/provider/anthropic/vertex_config.py | 16 +-
.../openai/multipart_converter_openai.py | 13 +-
.../llm/provider/openai/responses_content.py | 73 +++++-
src/fast_agent/llm/provider_key_manager.py | 37 ++-
src/fast_agent/llm/provider_types.py | 1 +
src/fast_agent/llm/resolved_model.py | 12 +-
src/fast_agent/llm/usage_tracking.py | 16 +-
src/fast_agent/mcp/helpers/content_helpers.py | 18 +-
src/fast_agent/mcp/mime_utils.py | 27 +++
src/fast_agent/ui/attachment_indicator.py | 42 +++-
src/fast_agent/ui/context_usage_display.py | 41 ++++
.../ui/interactive/command_dispatch.py | 21 +-
src/fast_agent/ui/model_picker_common.py | 89 +++----
src/fast_agent/ui/prompt/attachment_tokens.py | 40 ++-
src/fast_agent/ui/prompt/command_help.py | 4 +-
src/fast_agent/ui/prompt/completer.py | 29 ++-
.../ui/prompt/completion_sources.py | 27 ++-
src/fast_agent/ui/prompt/input.py | 2 +-
src/fast_agent/ui/prompt/input_toolbar.py | 20 +-
src/fast_agent/ui/prompt/keybindings.py | 10 +-
src/fast_agent/ui/prompt/parser.py | 5 +-
src/fast_agent/ui/prompt/resource_mentions.py | 22 +-
src/fast_agent/ui/prompt/toolbar.py | 11 +-
.../agents/test_llm_agent_web_metadata.py | 35 +++
.../agents/test_llm_content_filter.py | 97 ++++++++
.../fast_agent/commands/test_model_handler.py | 25 ++
.../commands/test_models_manager_handler.py | 40 ++-
.../test_runtime_model_picker_bootstrap.py | 15 ++
.../provider/anthropic/test_file_uploads.py | 129 ++++++++++
.../anthropic/test_reasoning_defaults.py | 61 ++++-
.../llm/provider/anthropic/test_vertex.py | 115 +++++++--
.../test_multipart_converter_anthropic.py | 42 ++++
.../test_multipart_converter_openai.py | 19 ++
.../llm/providers/test_responses_helpers.py | 80 +++++-
.../fast_agent/llm/test_model_database.py | 65 +++++
.../unit/fast_agent/llm/test_model_factory.py | 24 +-
.../fast_agent/llm/test_model_info_caps.py | 58 +++++
tests/unit/fast_agent/mcp/test_mime_utils.py | 19 ++
.../fast_agent/ui/test_agent_completer.py | 64 ++++-
.../ui/test_attachment_indicator.py | 52 ++++
.../fast_agent/ui/test_attachment_tokens.py | 27 ++-
.../ui/test_command_intent_contract.py | 22 ++
.../unit/fast_agent/ui/test_model_display.py | 11 +-
tests/unit/fast_agent/ui/test_model_picker.py | 2 +-
.../fast_agent/ui/test_model_picker_common.py | 7 +
tests/unit/fast_agent/ui/test_prompt_lexer.py | 33 +++
.../fast_agent/ui/test_resource_mentions.py | 52 +++-
64 files changed, 2122 insertions(+), 437 deletions(-)
create mode 100644 src/fast_agent/llm/provider/anthropic/llm_anthropic_vertex.py
create mode 100644 src/fast_agent/ui/context_usage_display.py
create mode 100644 tests/unit/fast_agent/llm/provider/anthropic/test_file_uploads.py
create mode 100644 tests/unit/fast_agent/ui/test_prompt_lexer.py
diff --git a/src/fast_agent/acp/server/agent_acp_server.py b/src/fast_agent/acp/server/agent_acp_server.py
index 49c769451..41dc6d3c0 100644
--- a/src/fast_agent/acp/server/agent_acp_server.py
+++ b/src/fast_agent/acp/server/agent_acp_server.py
@@ -452,10 +452,11 @@ def _build_auth_required_data(
from fast_agent.llm.provider_key_manager import ProviderKeyManager
env_var = ProviderKeyManager.get_env_key_name(provider_name)
- data["envVars"] = [env_var]
+ if env_var:
+ data["envVars"] = [env_var]
if isinstance(provider_display_name, str) and provider_display_name:
data["provider"] = provider_display_name
- if not data["details"]:
+ if not data["details"] and env_var:
data["details"] = (
f"Add the {provider_display_name} credentials to "
f"{ACP_AUTH_CONFIG_FILE} or set {env_var}."
diff --git a/src/fast_agent/agents/llm_agent.py b/src/fast_agent/agents/llm_agent.py
index e6a77883d..dbc3045c6 100644
--- a/src/fast_agent/agents/llm_agent.py
+++ b/src/fast_agent/agents/llm_agent.py
@@ -38,6 +38,7 @@
web_tool_badges,
)
from fast_agent.ui.console_display import ConsoleDisplay
+from fast_agent.ui.context_usage_display import format_compact_context_usage_percent
from fast_agent.ui.interactive_diagnostics import write_interactive_trace
from fast_agent.ui.message_display_helpers import (
build_tool_use_additional_message,
@@ -370,8 +371,9 @@ def _resolve_assistant_display_model(
context_percentage = (
usage_accumulator.context_usage_percentage if usage_accumulator else None
)
- if context_percentage is not None:
- display_model = f"{display_model} ({context_percentage:.1f}%)"
+ context_label = format_compact_context_usage_percent(context_percentage)
+ if context_label is not None:
+ display_model = f"{display_model} ({context_label})"
return display_model
diff --git a/src/fast_agent/agents/llm_decorator.py b/src/fast_agent/agents/llm_decorator.py
index 2d4b2e73f..ab0069957 100644
--- a/src/fast_agent/agents/llm_decorator.py
+++ b/src/fast_agent/agents/llm_decorator.py
@@ -65,7 +65,6 @@
StreamingAgentProtocol,
ToolRunnerHookCapable,
)
-from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.provider_types import Provider
from fast_agent.llm.stream_types import StreamChunk
from fast_agent.llm.usage_tracking import UsageAccumulator
@@ -958,7 +957,7 @@ def _filter_block_list(
for block in blocks or []:
mime_type, category = self._extract_block_metadata(block)
- if self._block_supported(mime_type, category):
+ if self._block_supported(block, mime_type, category):
kept.append(block)
else:
removed_block = _RemovedBlock(
@@ -985,25 +984,30 @@ def _filter_block_list(
return kept
- def _block_supported(self, mime_type: str | None, category: str) -> bool:
+ def _block_supported(
+ self,
+ block: ContentBlock,
+ mime_type: str | None,
+ category: str,
+ ) -> bool:
"""Determine if the current model can process a content block."""
if category == "text":
return True
- model_name = self.llm.model_name if self.llm else None
- if not model_name:
+ model_info = self.llm.model_info if self.llm else None
+ if not model_info:
return False
+ resource_source = "link" if isinstance(block, ResourceLink) else "embedded"
+
if mime_type:
- return ModelDatabase.supports_mime(model_name, mime_type)
+ return model_info.supports_mime(mime_type, resource_source=resource_source)
if category == "vision":
- return ModelDatabase.supports_any_mime(
- model_name, ["image/jpeg", "image/png", "image/webp"]
- )
+ return model_info.supports_vision
if category == "document":
- return ModelDatabase.supports_mime(model_name, "application/pdf")
+ return model_info.supports_document
return False
diff --git a/src/fast_agent/cli/commands/check_config.py b/src/fast_agent/cli/commands/check_config.py
index 54976b408..80426dfd7 100644
--- a/src/fast_agent/cli/commands/check_config.py
+++ b/src/fast_agent/cli/commands/check_config.py
@@ -68,6 +68,10 @@ class ProviderCatalogScope:
display_name="Anthropic",
providers=(Provider.ANTHROPIC,),
),
+ "anthropic-vertex": ProviderCatalogScope(
+ display_name="Anthropic (Vertex)",
+ providers=(Provider.ANTHROPIC_VERTEX,),
+ ),
"google": ProviderCatalogScope(
display_name="Google",
providers=(Provider.GOOGLE,),
@@ -98,11 +102,13 @@ class ProviderCatalogScope:
"hf": "huggingface",
"codex-responses": "codexresponses",
"codex_responses": "codexresponses",
+ "anthropicvertex": "anthropic-vertex",
}
_PROVIDER_CATALOG_VISIBLE_CHOICES: tuple[str, ...] = (
"openai",
"anthropic",
+ "anthropic-vertex",
"google",
"deepseek",
"aliyun",
@@ -266,7 +272,7 @@ def _empty_api_key_results() -> dict[str, dict[str, str]]:
return {
provider.config_name: {"env": "", "config": ""}
for provider in Provider
- if provider != Provider.FAST_AGENT
+ if provider not in {Provider.FAST_AGENT, Provider.ANTHROPIC_VERTEX}
}
@@ -388,7 +394,7 @@ def check_api_keys(secrets_summary: dict, config_summary: dict) -> dict:
for provider_name, status in results.items():
env_key_name = ProviderKeyManager.get_env_key_name(provider_name)
- env_key_value = os.environ.get(env_key_name)
+ env_key_value = os.environ.get(env_key_name) if env_key_name else None
if env_key_value:
status["env"] = _mask_configured_secret(env_key_value)
@@ -1785,6 +1791,8 @@ def _should_warn_for_provider(
vertex_cfg = google_cfg.get("vertex_ai", {}) if isinstance(google_cfg, dict) else {}
if isinstance(vertex_cfg, dict) and vertex_cfg.get("enabled") is True:
return False
+ if provider == Provider.ANTHROPIC_VERTEX:
+ return False
return True
@@ -2021,11 +2029,14 @@ def _render_check_summary_guidance(context: _CheckSummaryContext) -> None:
)
console.print("1. Add keys to fastagent.secrets.yaml")
env_vars = ", ".join(
- [
- ProviderKeyManager.get_env_key_name(p.config_name)
- for p in Provider
- if p != Provider.FAST_AGENT
- ]
+ filter(
+ None,
+ (
+ ProviderKeyManager.get_env_key_name(p.config_name)
+ for p in Provider
+ if p != Provider.FAST_AGENT
+ ),
+ )
)
console.print(f"2. Or set environment variables ({env_vars})")
diff --git a/src/fast_agent/cli/runtime/agent_setup.py b/src/fast_agent/cli/runtime/agent_setup.py
index 0724db3a1..9d36e5e45 100644
--- a/src/fast_agent/cli/runtime/agent_setup.py
+++ b/src/fast_agent/cli/runtime/agent_setup.py
@@ -25,6 +25,7 @@
from fast_agent.ui.interactive_diagnostics import write_interactive_trace
from fast_agent.ui.model_picker_common import (
has_explicit_provider_prefix,
+ infer_initial_picker_provider,
normalize_generic_model_spec,
)
from fast_agent.utils.async_utils import suppress_known_runtime_warnings
@@ -133,9 +134,8 @@ def _resolve_model_picker_initial_selection(
return "overlays", initial_model_spec
provider_name: str | None = None
- identity = model_identity(initial_model_spec)
- if identity is not None:
- provider_name = identity[0].config_name
+ if model_identity(initial_model_spec) is not None:
+ provider_name = infer_initial_picker_provider(initial_model_spec)
return provider_name, initial_model_spec
try:
@@ -148,7 +148,7 @@ def _resolve_model_picker_initial_selection(
resolved_identity = model_identity(resolved_model_spec)
if resolved_identity is not None:
- provider_name = resolved_identity[0].config_name
+ provider_name = infer_initial_picker_provider(resolved_model_spec)
return provider_name, resolved_model_spec
return None, initial_model_spec
diff --git a/src/fast_agent/commands/handlers/model.py b/src/fast_agent/commands/handlers/model.py
index 591d9fdd4..bb341a1c4 100644
--- a/src/fast_agent/commands/handlers/model.py
+++ b/src/fast_agent/commands/handlers/model.py
@@ -44,6 +44,7 @@
format_text_verbosity,
parse_text_verbosity,
)
+from fast_agent.ui.model_picker_common import infer_initial_picker_provider
if TYPE_CHECKING:
from fast_agent.commands.context import CommandContext
@@ -182,8 +183,7 @@ def _resolve_toggle_to_default(
def _resolve_model_switch_initial_provider(llm: "FastAgentLLMProtocol") -> str | None:
if llm.resolved_model.overlay is not None:
return "overlays"
- config_name = llm.provider.config_name
- return config_name.strip() if config_name.strip() else None
+ return infer_initial_picker_provider(llm.resolved_model.selected_model_name)
async def handle_model_switch(
diff --git a/src/fast_agent/commands/handlers/models_manager.py b/src/fast_agent/commands/handlers/models_manager.py
index abd65e5b6..2d60395a7 100644
--- a/src/fast_agent/commands/handlers/models_manager.py
+++ b/src/fast_agent/commands/handlers/models_manager.py
@@ -25,6 +25,7 @@
from fast_agent.llm.model_selection import ModelSelectionCatalog
from fast_agent.llm.provider_types import Provider
from fast_agent.ui.a3_headers import build_a3_section_header
+from fast_agent.ui.model_picker_common import infer_initial_picker_provider
if TYPE_CHECKING:
from pathlib import Path
@@ -905,22 +906,7 @@ def _normalize_interactive_reference_token(token: str) -> str:
def _infer_initial_provider_name(model_spec: str | None) -> str | None:
- if model_spec is None:
- return None
-
- normalized = model_spec.strip()
- if not normalized:
- return None
-
- try:
- parsed = ModelFactory.parse_model_string(
- normalized,
- presets=ModelFactory.MODEL_PRESETS,
- )
- except Exception:
- return None
-
- return parsed.provider.config_name
+ return infer_initial_picker_provider(model_spec)
async def _prompt_for_reference_token(
diff --git a/src/fast_agent/llm/model_database.py b/src/fast_agent/llm/model_database.py
index 25501088b..0c10c94b2 100644
--- a/src/fast_agent/llm/model_database.py
+++ b/src/fast_agent/llm/model_database.py
@@ -16,6 +16,9 @@
ReasoningEffortSpec,
)
from fast_agent.llm.text_verbosity import TextVerbositySpec
+from fast_agent.mcp.mime_utils import DOCUMENT_MIME_TYPES
+
+ResourceSource = Literal["embedded", "link"]
class ModelParameters(BaseModel):
@@ -99,9 +102,22 @@ class ModelDatabase:
_RUNTIME_MODEL_PARAMS: dict[str, ModelParameters] = {}
# Common parameter sets
- OPENAI_MULTIMODAL = ["text/plain", "image/jpeg", "image/png", "image/webp", "application/pdf"]
+ OPENAI_MULTIMODAL = [
+ "text/plain",
+ "image/jpeg",
+ "image/png",
+ "image/webp",
+ *DOCUMENT_MIME_TYPES,
+ ]
OPENAI_VISION = ["text/plain", "image/jpeg", "image/png", "image/webp"]
ANTHROPIC_MULTIMODAL = [
+ "text/plain",
+ "image/jpeg",
+ "image/png",
+ "image/webp",
+ *DOCUMENT_MIME_TYPES,
+ ]
+ ANTHROPIC_VERTEX_MULTIMODAL = [
"text/plain",
"image/jpeg",
"image/png",
@@ -801,14 +817,26 @@ class ModelDatabase:
# aliyun modern
"qwen3-max": ALIYUN_QWEN3_MODERN,
}
+ _PROVIDER_MODEL_OVERRIDES: dict[tuple[Provider, str], ModelParameters] = {}
+ _PROVIDER_WIRE_MODEL_NAMES: dict[tuple[Provider, str], str] = {}
@classmethod
- def get_model_params(cls, model: str) -> ModelParameters | None:
+ def get_model_params(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> ModelParameters | None:
"""Get model parameters for a given model name"""
if not model:
return None
+ effective_provider = provider or cls.get_default_provider(model)
normalized = cls.normalize_model_name(model)
+ if effective_provider is not None:
+ provider_override = cls._PROVIDER_MODEL_OVERRIDES.get((effective_provider, normalized))
+ if provider_override is not None:
+ return provider_override
params = cls.MODELS.get(normalized)
if params is not None:
return params
@@ -869,25 +897,32 @@ def normalize_model_name(cls, model: str) -> str:
return model_spec.strip().lower()
@classmethod
- def get_context_window(cls, model: str) -> int | None:
+ def get_context_window(cls, model: str, *, provider: Provider | None = None) -> int | None:
"""Get context window size for a model"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.context_window if params else None
@classmethod
- def get_max_output_tokens(cls, model: str) -> int | None:
+ def get_max_output_tokens(cls, model: str, *, provider: Provider | None = None) -> int | None:
"""Get maximum output tokens for a model"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.max_output_tokens if params else None
@classmethod
- def get_tokenizes(cls, model: str) -> list[str] | None:
+ def get_tokenizes(cls, model: str, *, provider: Provider | None = None) -> list[str] | None:
"""Get supported tokenization types for a model"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.tokenizes if params else None
@classmethod
- def supports_mime(cls, model: str, mime_type: str) -> bool:
+ def supports_mime(
+ cls,
+ model: str,
+ mime_type: str,
+ *,
+ provider: Provider | None = None,
+ resource_source: ResourceSource | None = None,
+ ) -> bool:
"""
Return True if the given model supports the provided MIME type.
@@ -896,7 +931,7 @@ def supports_mime(cls, model: str, mime_type: str) -> bool:
"""
from fast_agent.mcp.mime_utils import normalize_mime_type
- tokenizes = cls.get_tokenizes(model) or []
+ tokenizes = cls.get_tokenizes(model, provider=provider) or []
# Normalize the candidate and the database entries to lowercase
normalized_supported = [t.lower() for t in tokenizes]
@@ -911,76 +946,129 @@ def supports_mime(cls, model: str, mime_type: str) -> bool:
if not normalized:
return False
+ if (
+ resource_source == "link"
+ and provider in {Provider.ANTHROPIC, Provider.ANTHROPIC_VERTEX}
+ and normalized in DOCUMENT_MIME_TYPES
+ and normalized != "application/pdf"
+ ):
+ return False
+
return normalized.lower() in normalized_supported
@classmethod
- def supports_any_mime(cls, model: str, mime_types: list[str]) -> bool:
+ def supports_any_mime(
+ cls,
+ model: str,
+ mime_types: list[str],
+ *,
+ provider: Provider | None = None,
+ resource_source: ResourceSource | None = None,
+ ) -> bool:
"""Return True if the model supports any of the provided MIME types."""
- return any(cls.supports_mime(model, m) for m in mime_types)
+ return any(
+ cls.supports_mime(
+ model,
+ m,
+ provider=provider,
+ resource_source=resource_source,
+ )
+ for m in mime_types
+ )
@classmethod
- def get_json_mode(cls, model: str) -> str | None:
+ def get_json_mode(cls, model: str, *, provider: Provider | None = None) -> str | None:
"""Get supported json mode (structured output) for a model"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.json_mode if params else None
@classmethod
- def get_reasoning(cls, model: str) -> str | None:
+ def get_reasoning(cls, model: str, *, provider: Provider | None = None) -> str | None:
"""Get supported reasoning output style for a model"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.reasoning if params else None
@classmethod
- def get_reasoning_effort_spec(cls, model: str) -> ReasoningEffortSpec | None:
+ def get_reasoning_effort_spec(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> ReasoningEffortSpec | None:
"""Get reasoning effort capabilities for a model, if defined."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.reasoning_effort_spec if params else None
@classmethod
- def get_text_verbosity_spec(cls, model: str) -> TextVerbositySpec | None:
+ def get_text_verbosity_spec(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> TextVerbositySpec | None:
"""Get text verbosity capabilities for a model, if defined."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.text_verbosity_spec if params else None
@classmethod
- def get_stream_mode(cls, model: str | None) -> Literal["openai", "manual"]:
+ def get_stream_mode(
+ cls,
+ model: str | None,
+ *,
+ provider: Provider | None = None,
+ ) -> Literal["openai", "manual"]:
"""Return preferred streaming accumulation strategy for a model."""
if not model:
return "openai"
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.stream_mode if params else "openai"
@classmethod
- def get_default_max_tokens(cls, model: str) -> int:
+ def get_default_max_tokens(cls, model: str, *, provider: Provider | None = None) -> int:
"""Get default max_tokens for RequestParams based on model"""
if not model:
return 2048 # Fallback when no model specified
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
if params:
return params.max_output_tokens
return 2048 # Fallback for unknown models
@classmethod
- def get_default_temperature(cls, model: str | None) -> float | None:
+ def get_default_temperature(
+ cls,
+ model: str | None,
+ *,
+ provider: Provider | None = None,
+ ) -> float | None:
"""Get default temperature for RequestParams based on model metadata."""
if not model:
return None
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.default_temperature if params else None
@classmethod
- def get_cache_ttl(cls, model: str) -> Literal["5m", "1h"] | None:
+ def get_cache_ttl(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> Literal["5m", "1h"] | None:
"""Get cache TTL for a model, or None if not supported"""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.cache_ttl if params else None
@classmethod
- def get_long_context_window(cls, model: str) -> int | None:
+ def get_long_context_window(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> int | None:
"""Get optional long-context override window for a model."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.long_context_window if params else None
@classmethod
@@ -1036,23 +1124,43 @@ def supports_response_websocket_provider(cls, model: str, provider: Provider) ->
return provider in providers
@classmethod
- def get_anthropic_web_search_version(cls, model: str) -> str | None:
+ def get_anthropic_web_search_version(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> str | None:
"""Get Anthropic web_search tool version for a model, if available."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.anthropic_web_search_version if params else None
@classmethod
- def get_anthropic_web_fetch_version(cls, model: str) -> str | None:
+ def get_anthropic_web_fetch_version(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> str | None:
"""Get Anthropic web_fetch tool version for a model, if available."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.anthropic_web_fetch_version if params else None
@classmethod
- def get_anthropic_required_betas(cls, model: str) -> tuple[str, ...] | None:
+ def get_anthropic_required_betas(
+ cls,
+ model: str,
+ *,
+ provider: Provider | None = None,
+ ) -> tuple[str, ...] | None:
"""Get Anthropic beta headers required for model-specific capabilities."""
- params = cls.get_model_params(model)
+ params = cls.get_model_params(model, provider=provider)
return params.anthropic_required_betas if params else None
+ @classmethod
+ def resolve_wire_model_name(cls, *, provider: Provider, model_name: str) -> str:
+ normalized = cls.normalize_model_name(model_name)
+ return cls._PROVIDER_WIRE_MODEL_NAMES.get((provider, normalized), model_name.strip())
+
@classmethod
def list_long_context_models(cls) -> list[str]:
"""List model names that support explicit long-context overrides."""
@@ -1172,3 +1280,17 @@ def is_fast_model(cls, model: str) -> bool:
def list_fast_models(cls) -> list[str]:
"""List model names marked as fast in metadata."""
return sorted(name for name, params in cls.MODELS.items() if params.fast)
+
+
+ModelDatabase._PROVIDER_MODEL_OVERRIDES.update(
+ {
+ (Provider.ANTHROPIC_VERTEX, model_name): params.model_copy(
+ update={
+ "tokenizes": ModelDatabase.ANTHROPIC_VERTEX_MULTIMODAL,
+ "anthropic_web_fetch_version": None,
+ }
+ )
+ for model_name, params in ModelDatabase.MODELS.items()
+ if params.default_provider == Provider.ANTHROPIC
+ }
+)
diff --git a/src/fast_agent/llm/model_display_name.py b/src/fast_agent/llm/model_display_name.py
index 5ccaac2ae..ec5269e94 100644
--- a/src/fast_agent/llm/model_display_name.py
+++ b/src/fast_agent/llm/model_display_name.py
@@ -1,7 +1,8 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-from urllib.parse import parse_qs, urlsplit
+
+from fast_agent.llm.provider_types import Provider
if TYPE_CHECKING:
from fast_agent.llm.resolved_model import ResolvedModelSpec
@@ -12,6 +13,15 @@ def format_model_display_name(model: str | None, *, max_len: int | None = None)
return model
trimmed = model.rstrip("/").partition("?")[0]
+ for provider in Provider:
+ dotted_prefix = f"{provider.config_name}."
+ slash_prefix = f"{provider.config_name}/"
+ if trimmed.startswith(dotted_prefix):
+ trimmed = trimmed[len(dotted_prefix) :]
+ break
+ if trimmed.startswith(slash_prefix):
+ trimmed = trimmed[len(slash_prefix) :]
+ break
if "/" in trimmed:
display = trimmed.split("/")[-1] or trimmed
else:
@@ -41,10 +51,7 @@ def resolve_resolved_model_display_name(
or resolved_model.wire_model_name
)
- if (
- resolved_model.provider.value == "anthropic"
- and resolved_model.model_config.via == "vertex"
- ):
+ if resolved_model.provider == Provider.ANTHROPIC_VERTEX:
display = f"{display} · Vertex"
if max_len is not None and len(display) > max_len:
@@ -78,9 +85,10 @@ def resolve_model_display_name(
if display is None:
return None
if model:
- query = parse_qs(urlsplit(model).query)
- via_values = query.get("via") or query.get("source") or []
- if via_values and via_values[-1].strip().lower() == "vertex":
+ trimmed = model.partition("?")[0].strip()
+ if trimmed.startswith(f"{Provider.ANTHROPIC_VERTEX.config_name}.") or trimmed.startswith(
+ f"{Provider.ANTHROPIC_VERTEX.config_name}/"
+ ):
display = f"{display} · Vertex"
if max_len is not None and len(display) > max_len:
return display[: max_len - 1] + "…"
diff --git a/src/fast_agent/llm/model_factory.py b/src/fast_agent/llm/model_factory.py
index 1a64815e0..e95c0b246 100644
--- a/src/fast_agent/llm/model_factory.py
+++ b/src/fast_agent/llm/model_factory.py
@@ -14,7 +14,6 @@
from fast_agent.llm.internal.slow import SlowLLM
from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.model_overlays import load_model_overlay_registry
-from fast_agent.llm.provider.anthropic.vertex_config import AnthropicRoute
from fast_agent.llm.provider_types import Provider
from fast_agent.llm.reasoning_effort import ReasoningEffortSetting, parse_reasoning_setting
from fast_agent.llm.resolved_model import ResolvedModelSpec, resolve_base_model_params
@@ -36,7 +35,6 @@ class ModelConfig(BaseModel):
provider: Provider
model_name: str
- via: AnthropicRoute | None = None
reasoning_effort: ReasoningEffortSetting | None = None
text_verbosity: TextVerbosityLevel | None = None
structured_output_mode: StructuredOutputMode | None = None
@@ -57,7 +55,6 @@ class ModelConfig(BaseModel):
class ModelQueryOverrides:
"""Typed query overrides parsed from a model spec query string."""
- via: AnthropicRoute | None = None
reasoning_effort: ReasoningEffortSetting | None = None
instant: bool | None = None
text_verbosity: TextVerbosityLevel | None = None
@@ -77,7 +74,6 @@ class ModelQueryOverrides:
def with_defaults(self, defaults: Self) -> "ModelQueryOverrides":
"""Return a copy with unset values filled from defaults."""
return ModelQueryOverrides(
- via=self.via if self.via is not None else defaults.via,
reasoning_effort=(
self.reasoning_effort
if self.reasoning_effort is not None
@@ -132,7 +128,6 @@ def to_model_config(self) -> ModelConfig:
return ModelConfig(
provider=self.provider,
model_name=self.model_name,
- via=self.query_overrides.via,
reasoning_effort=self.reasoning_effort,
text_verbosity=self.query_overrides.text_verbosity,
structured_output_mode=self.query_overrides.structured_output_mode,
@@ -225,8 +220,37 @@ def _parse_query_overrides(
query_params: Mapping[str, list[str]],
model_spec: str,
) -> ModelQueryOverrides:
+ supported_keys = {
+ "reasoning",
+ "verbosity",
+ "structured",
+ "instant",
+ "context",
+ "transport",
+ "service_tier",
+ "web_search",
+ "web_fetch",
+ "temperature",
+ "temp",
+ "top_p",
+ "topP",
+ "top_k",
+ "topK",
+ "min_p",
+ "minP",
+ "presence_penalty",
+ "presencePenalty",
+ "repetition_penalty",
+ "repetitionPenalty",
+ }
+ unsupported_keys = sorted(set(query_params) - supported_keys)
+ if unsupported_keys:
+ joined = ", ".join(f"'{key}'" for key in unsupported_keys)
+ raise ModelConfigError(
+ f"Unsupported model query parameter(s) {joined} in '{model_spec}'"
+ )
+
reasoning_effort: ReasoningEffortSetting | None = None
- via: AnthropicRoute | None = None
text_verbosity: TextVerbosityLevel | None = None
structured_output_mode: StructuredOutputMode | None = None
instant: bool | None = None
@@ -245,13 +269,6 @@ def _parse_query_overrides(
)
reasoning_effort = parsed_reasoning
- route_values = _collect_query_values(query_params, ("via", "source"))
- if route_values:
- raw_value = route_values[-1].strip().lower()
- if raw_value not in {"direct", "vertex"}:
- raise ModelConfigError(f"Invalid via query value: '{raw_value}' in '{model_spec}'")
- via = "vertex" if raw_value == "vertex" else "direct"
-
if "verbosity" in query_params:
raw_value = _collect_query_values(query_params, ("verbosity",))[-1]
parsed_verbosity = parse_text_verbosity(raw_value)
@@ -318,7 +335,6 @@ def _parse_query_overrides(
web_fetch = _parse_on_off_query(raw_value, "web_fetch", model_spec)
return ModelQueryOverrides(
- via=via,
reasoning_effort=reasoning_effort,
instant=instant,
text_verbosity=text_verbosity,
@@ -692,11 +708,6 @@ def parse_model_spec(
_validate_transport_constraints(provider, model_name, merged_overrides.transport)
_validate_service_tier_constraints(provider, model_name, merged_overrides.service_tier)
- if merged_overrides.via is not None and provider != Provider.ANTHROPIC:
- raise ModelConfigError(
- f"Query parameter 'via' is only supported for Anthropic models, got '{expanded_model_spec}'."
- )
-
return ParsedModelSpec(
raw_input=raw_input,
expanded_input=expanded_model_spec,
@@ -758,6 +769,10 @@ def resolve_model_spec(
provider=parsed.provider,
model_name=parsed.model_name,
)
+ wire_model_name = ModelDatabase.resolve_wire_model_name(
+ provider=parsed.provider,
+ model_name=parsed.model_name,
+ )
return ResolvedModelSpec(
raw_input=model_string,
@@ -765,7 +780,7 @@ def resolve_model_spec(
source=source,
model_config=model_config,
provider=model_config.provider,
- wire_model_name=model_config.model_name,
+ wire_model_name=wire_model_name,
overlay=selected_overlay,
model_params=model_params,
)
@@ -832,6 +847,12 @@ def _load_provider_class(cls, provider: Provider) -> type:
from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
return AnthropicLLM
+ if provider == Provider.ANTHROPIC_VERTEX:
+ from fast_agent.llm.provider.anthropic.llm_anthropic_vertex import (
+ AnthropicVertexLLM,
+ )
+
+ return AnthropicVertexLLM
if provider == Provider.OPENAI:
from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
diff --git a/src/fast_agent/llm/model_info.py b/src/fast_agent/llm/model_info.py
index 6d42cb30c..37d9fa0c5 100644
--- a/src/fast_agent/llm/model_info.py
+++ b/src/fast_agent/llm/model_info.py
@@ -10,9 +10,10 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING
-from fast_agent.llm.model_database import ModelDatabase
+from fast_agent.llm.model_database import ModelDatabase, ResourceSource
from fast_agent.llm.model_factory import ModelFactory
from fast_agent.llm.provider_types import Provider
+from fast_agent.mcp.mime_utils import DOCUMENT_MIME_TYPES, normalize_mime_type
if TYPE_CHECKING:
# Import behind TYPE_CHECKING to avoid import cycles at runtime
@@ -32,30 +33,95 @@ class ModelInfo:
json_mode: str | None
reasoning: str | None
+ def _supports_provider_document_mime(
+ self,
+ normalized: str | None,
+ *,
+ resource_source: ResourceSource | None = None,
+ ) -> bool | None:
+ if not normalized or normalized not in DOCUMENT_MIME_TYPES:
+ return None
+
+ if (
+ resource_source == "link"
+ and self.provider in {Provider.ANTHROPIC, Provider.ANTHROPIC_VERTEX}
+ and normalized != "application/pdf"
+ ):
+ return False
+
+ multimodal_tokens = [mime.lower() for mime in (self.tokenizes or [])]
+ has_multimodal_io = any(mime.startswith("image/") for mime in multimodal_tokens)
+
+ if self.provider in {
+ Provider.RESPONSES,
+ Provider.OPENRESPONSES,
+ Provider.CODEX_RESPONSES,
+ Provider.ANTHROPIC,
+ }:
+ return has_multimodal_io
+
+ if self.provider in {
+ Provider.OPENAI,
+ Provider.AZURE,
+ Provider.ALIYUN,
+ Provider.GOOGLE_OAI,
+ }:
+ return normalized == "application/pdf"
+
+ return None
+
+ def supports_mime(
+ self,
+ mime_type: str,
+ *,
+ resource_source: ResourceSource | None = None,
+ ) -> bool:
+ tokenizes = [mime.lower() for mime in (self.tokenizes or [])]
+ mt = (mime_type or "").strip().lower()
+ if mt.endswith("/*") and "/" in mt:
+ prefix = mt.split("/", 1)[0] + "/"
+ if any(supported.startswith(prefix) for supported in tokenizes):
+ return True
+
+ normalized = normalize_mime_type(mime_type)
+ provider_override = self._supports_provider_document_mime(
+ normalized,
+ resource_source=resource_source,
+ )
+ if provider_override is not None:
+ return provider_override
+ if normalized and normalized.lower() in tokenizes:
+ return True
+
+ return ModelDatabase.supports_mime(
+ self.name,
+ mime_type,
+ provider=self.provider,
+ resource_source=resource_source,
+ )
+
+ def supports_any_mime(
+ self,
+ mime_types: list[str],
+ *,
+ resource_source: ResourceSource | None = None,
+ ) -> bool:
+ return any(
+ self.supports_mime(mime_type, resource_source=resource_source)
+ for mime_type in mime_types
+ )
+
@property
def supports_text(self) -> bool:
- if "text/plain" in (self.tokenizes or []):
- return True
- return ModelDatabase.supports_mime(self.name, "text/plain")
+ return self.supports_mime("text/plain")
@property
def supports_document(self) -> bool:
- # Document support currently keyed off PDF support
- if "application/pdf" in (self.tokenizes or []):
- return True
- return ModelDatabase.supports_mime(self.name, "pdf")
+ return self.supports_any_mime(list(DOCUMENT_MIME_TYPES))
@property
def supports_vision(self) -> bool:
- # Any common image format indicates vision support
- tokenizes = self.tokenizes or []
- if any(mt in tokenizes for mt in ("image/jpeg", "image/png", "image/webp")):
- return True
-
- return any(
- ModelDatabase.supports_mime(self.name, mt)
- for mt in ("image/jpeg", "image/png", "image/webp")
- )
+ return self.supports_any_mime(["image/jpeg", "image/png", "image/webp"])
@property
def tdv_flags(self) -> tuple[bool, bool, bool]:
@@ -84,7 +150,7 @@ def from_resolved_model(
@classmethod
def from_name(cls, name: str, provider: Provider | None = None) -> "ModelInfo" | None:
canonical_name = ModelFactory.MODEL_PRESETS.get(name, name)
- params = ModelDatabase.get_model_params(canonical_name)
+ params = ModelDatabase.get_model_params(canonical_name, provider=provider)
if not params:
# Unknown model: return a conservative default that supports text only.
# This matches the desired behavior for TDV display fallbacks.
diff --git a/src/fast_agent/llm/model_overlays.py b/src/fast_agent/llm/model_overlays.py
index 8a3633ddf..93ec36c9d 100644
--- a/src/fast_agent/llm/model_overlays.py
+++ b/src/fast_agent/llm/model_overlays.py
@@ -52,11 +52,7 @@ def _overlay_model_key(provider: Provider, model_name: str) -> str:
def _existing_model_params(provider: Provider, model_name: str) -> ModelParameters | None:
- normalized = _overlay_model_key(provider, model_name)
- params = ModelDatabase.MODELS.get(normalized)
- if params is not None:
- return params
- return ModelDatabase._RUNTIME_MODEL_PARAMS.get(normalized)
+ return ModelDatabase.get_model_params(model_name, provider=provider)
class ModelOverlayConnection(BaseModel):
diff --git a/src/fast_agent/llm/model_selection.py b/src/fast_agent/llm/model_selection.py
index ce46d39c4..00a970d7f 100644
--- a/src/fast_agent/llm/model_selection.py
+++ b/src/fast_agent/llm/model_selection.py
@@ -78,6 +78,15 @@ class ModelSelectionCatalog:
CatalogModelEntry(alias="haiku", model="claude-haiku-4-5", fast=True),
CatalogModelEntry(alias="opus", model="claude-opus-4-6"),
),
+ Provider.ANTHROPIC_VERTEX: (
+ CatalogModelEntry(alias="sonnet", model="anthropic-vertex.claude-sonnet-4-6"),
+ CatalogModelEntry(
+ alias="haiku",
+ model="anthropic-vertex.claude-haiku-4-5",
+ fast=True,
+ ),
+ CatalogModelEntry(alias="opus", model="anthropic-vertex.claude-opus-4-6"),
+ ),
Provider.GOOGLE: (
CatalogModelEntry(
alias="gemini3-flash",
@@ -593,6 +602,14 @@ def configured_providers(
):
provider_name = provider.config_name
+ if provider == Provider.ANTHROPIC_VERTEX:
+ from fast_agent.llm.provider.anthropic.vertex_config import anthropic_vertex_ready
+
+ ready, _ = anthropic_vertex_ready(config_payload)
+ if ready:
+ providers.append(provider)
+ continue
+
# Google Vertex can run without an API key.
if provider == Provider.GOOGLE and cls._google_vertex_enabled(config_payload):
providers.append(provider)
@@ -647,6 +664,13 @@ def _list_static_models_for_provider(
).entries_for_provider(provider)
]
models = ModelDatabase.list_models()
+ if provider == Provider.ANTHROPIC_VERTEX:
+ static_models = [
+ f"{provider.config_name}.{model}"
+ for model in models
+ if ModelDatabase.get_default_provider(model) == Provider.ANTHROPIC
+ ]
+ return ModelSelectionCatalog._dedupe_preserve_order([*overlay_models, *static_models])
static_models = [
model for model in models if ModelDatabase.get_default_provider(model) == provider
]
diff --git a/src/fast_agent/llm/provider/anthropic/README_anth_multipart.md b/src/fast_agent/llm/provider/anthropic/README_anth_multipart.md
index fa57b3961..4c5f4f6be 100644
--- a/src/fast_agent/llm/provider/anthropic/README_anth_multipart.md
+++ b/src/fast_agent/llm/provider/anthropic/README_anth_multipart.md
@@ -15,7 +15,8 @@ This converter transforms MCP (Model Context Protocol) user messages to Anthropi
| `TextContent` | `text` | Supported for user messages |
| `ImageContent` | `image` | Limited to jpeg, png, gif, webp formats |
| `EmbeddedResource` (text) | `document` | Converted to text documents with extracted filename as title |
-| `EmbeddedResource` (PDF) | `document` | Supported for PDFs only |
+| `EmbeddedResource` (PDF) | `document` | Converted as a native PDF document block |
+| `EmbeddedResource` (DOCX/XLSX/PPTX) | `document` | Direct Anthropic uses Files API upload + `file` document source |
| `EmbeddedResource` (image) | `image` | Must be in supported image formats |
| `EmbeddedResource` (with image URI) | `image` with URL source | HTTP(S) URIs in image resources are directly used as image URLs |
@@ -27,9 +28,11 @@ This converter transforms MCP (Model Context Protocol) user messages to Anthropi
- **Missing MIME Types**: When not provided, MIME types are guessed from file extensions
- **Filenames**: Simple filenames and full URIs are both supported for resources
- **Titles**: Document titles are extracted from the filename portion of URIs
+- **Remote URLs**: Unknown HTTP(S) URLs stay `application/octet-stream`; only known image URLs are promoted to image inputs
## Limitations
- Only supports MIME types allowed by Anthropic's API
+- Native binary document conversion without upload is PDF-specific; DOCX/XLSX/PPTX require the direct Anthropic Files API path and are not supported as linked URLs
- Cannot convert unsupported image formats (only forwards supported formats)
- This converter currently focuses on user message conversions (assistant message conversion handled separately)
diff --git a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
index b9f101c8b..720a95506 100644
--- a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
+++ b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py
@@ -1,4 +1,6 @@
import asyncio
+import base64
+import hashlib
import inspect
import json
import os
@@ -11,17 +13,18 @@
from anthropic import (
APIError,
AsyncAnthropic,
- AsyncAnthropicVertex,
AuthenticationError,
transform_schema,
)
from anthropic.lib.streaming import BetaAsyncMessageStream
from mcp import Tool
from mcp.types import (
+ BlobResourceContents,
CallToolRequest,
CallToolRequestParams,
CallToolResult,
ContentBlock,
+ EmbeddedResource,
TextContent,
)
from opentelemetry import trace
@@ -71,16 +74,9 @@
)
from fast_agent.llm.provider.anthropic.cache_planner import AnthropicCachePlanner
from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
+ ANTHROPIC_FILE_ID_META_KEY,
AnthropicConverter,
)
-from fast_agent.llm.provider.anthropic.vertex_config import (
- AnthropicRoute,
- anthropic_vertex_config,
- detect_google_adc,
- resolve_anthropic_route,
- resolve_anthropic_vertex_location,
- resolve_anthropic_vertex_project_id,
-)
from fast_agent.llm.provider.anthropic.web_tools import (
build_web_tool_params,
dedupe_preserve_order,
@@ -102,6 +98,7 @@
from fast_agent.llm.structured_output_mode import StructuredOutputMode
from fast_agent.llm.tool_tracking import ToolCallTracker
from fast_agent.llm.usage_tracking import TurnUsage
+from fast_agent.mcp.mime_utils import DOCUMENT_MIME_TYPES, guess_mime_type, normalize_mime_type
from fast_agent.types import PromptMessageExtended
from fast_agent.types.llm_stop_reason import LlmStopReason
from fast_agent.utils.type_narrowing import is_str_object_dict
@@ -368,17 +365,11 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
def __init__(self, **kwargs) -> None:
# Initialize logger - keep it simple without name reference
kwargs.pop("provider", None)
- route_override = kwargs.pop("via", None)
structured_override = kwargs.pop("structured_output_mode", None)
long_context_requested = kwargs.pop("long_context", False)
web_search_override = kwargs.pop("web_search", None)
web_fetch_override = kwargs.pop("web_fetch", None)
- super().__init__(provider=Provider.ANTHROPIC, **kwargs)
- self._route_override: AnthropicRoute | None = (
- cast("AnthropicRoute | None", route_override)
- if route_override in {"direct", "vertex"}
- else None
- )
+ super().__init__(provider=self.provider_identity(), **kwargs)
self._structured_output_mode_override: StructuredOutputMode | None = structured_override
self._web_search_override: bool | None = (
bool(web_search_override) if isinstance(web_search_override, bool) else None
@@ -386,6 +377,7 @@ def __init__(self, **kwargs) -> None:
self._web_fetch_override: bool | None = (
bool(web_fetch_override) if isinstance(web_fetch_override, bool) else None
)
+ self._file_id_cache: dict[str, str] = {}
raw_setting = kwargs.get("reasoning_effort", None)
reasoning_source: str | None = None
@@ -498,6 +490,10 @@ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
"""Initialize Anthropic-specific default parameters"""
return self._initialize_default_params_with_model_fallback(kwargs, DEFAULT_ANTHROPIC_MODEL)
+ @classmethod
+ def provider_identity(cls) -> Provider:
+ return Provider.ANTHROPIC
+
def _list_supported_long_context_models(self) -> list[str]:
"""Return models that support explicit long-context overrides."""
from fast_agent.llm.model_database import ModelDatabase
@@ -506,8 +502,6 @@ def _list_supported_long_context_models(self) -> list[str]:
def _provider_base_url(self) -> str | None:
assert self.context.config
- if self._anthropic_route() == "vertex":
- return self._vertex_cfg().base_url
return self.context.config.anthropic.base_url if self.context.config.anthropic else None
def _provider_default_headers(self) -> dict[str, str] | None:
@@ -523,65 +517,12 @@ def _provider_api_key(self):
return ProviderKeyManager.get_api_key(
self.provider.config_name,
self.context.config,
- route_hint=self._anthropic_route(),
)
- def _vertex_cfg(self):
- return anthropic_vertex_config(getattr(self.context, "config", None))
-
- def _anthropic_route(self) -> AnthropicRoute:
- explicit_route = self._route_override
- if explicit_route is None:
- resolved = self._resolved_model_spec
- if resolved is not None:
- explicit_route = resolved.model_config.via
- return resolve_anthropic_route(
- getattr(self.context, "config", None),
- explicit_route=explicit_route,
- )
-
- def _vertex_project_id(self) -> str:
- project_id = resolve_anthropic_vertex_project_id(getattr(self.context, "config", None))
- if project_id is None:
- raise ProviderKeyError(
- "Google Cloud project not configured",
- "Set anthropic.vertex_ai.project_id or configure "
- "GOOGLE_CLOUD_PROJECT before using Anthropic via Vertex.",
- )
- return project_id
-
- def _vertex_location(self) -> str:
- location = resolve_anthropic_vertex_location(getattr(self.context, "config", None))
- if location is None:
- raise ProviderKeyError(
- "Google Cloud location not configured",
- "Set anthropic.vertex_ai.location before using Anthropic via Vertex.",
- )
- return location
-
- def _vertex_credentials(self) -> object:
- adc_status = detect_google_adc()
- if not adc_status.available or adc_status.credentials is None:
- raise ProviderKeyError(
- "Google ADC not found",
- "Anthropic via Vertex uses Google Application Default Credentials.\n"
- "Run `gcloud auth application-default login` or configure a service account.",
- )
- return adc_status.credentials
-
- def _initialize_anthropic_client(self) -> AsyncAnthropic | AsyncAnthropicVertex:
+ def _initialize_anthropic_client(self) -> Any:
base_url = self._base_url()
default_headers = self._default_headers()
- if self._anthropic_route() == "vertex":
- return AsyncAnthropicVertex(
- project_id=self._vertex_project_id(),
- region=self._vertex_location(),
- credentials=cast("Any", self._vertex_credentials()),
- base_url=base_url,
- default_headers=default_headers,
- )
-
api_key = self._api_key()
if base_url and base_url.endswith("/v1"):
base_url = base_url.rstrip("/v1")
@@ -591,6 +532,19 @@ def _initialize_anthropic_client(self) -> AsyncAnthropic | AsyncAnthropicVertex:
default_headers=default_headers,
)
+ def supports_files_api(self) -> bool:
+ return True
+
+ def supports_document_uploads(self) -> bool:
+ return self.supports_files_api()
+
+ def supports_web_tools(self) -> bool:
+ return True
+
+ def supports_direct_anthropic_beta(self, feature: str) -> bool:
+ del feature
+ return True
+
def _get_cache_mode(self) -> str:
"""Get the cache mode configuration."""
cache_mode = "auto" # Default to auto
@@ -598,6 +552,91 @@ def _get_cache_mode(self) -> str:
cache_mode = self.context.config.anthropic.cache_mode
return cache_mode
+ @staticmethod
+ def _anthropic_file_cache_key(data: bytes, filename: str, mime_type: str) -> str:
+ digest = hashlib.sha256(data).hexdigest()
+ return f"{mime_type}:{filename}:{digest}"
+
+ async def _upload_anthropic_file_bytes(
+ self,
+ anthropic: Any,
+ *,
+ data: bytes,
+ filename: str,
+ mime_type: str,
+ ) -> str | None:
+ files_api = getattr(getattr(anthropic, "beta", None), "files", None)
+ upload = getattr(files_api, "upload", None)
+ if not callable(upload):
+ return None
+
+ cache_key = self._anthropic_file_cache_key(data, filename, mime_type)
+ cached = self._file_id_cache.get(cache_key)
+ if cached:
+ return cached
+
+ file_metadata = await upload(file=(filename, data, mime_type))
+ file_id = getattr(file_metadata, "id", None)
+ if not isinstance(file_id, str) or not file_id:
+ return None
+
+ self._file_id_cache[cache_key] = file_id
+ return file_id
+
+ async def _prepare_anthropic_file_resources(
+ self,
+ anthropic: Any,
+ messages: Sequence[PromptMessageExtended],
+ ) -> None:
+ if not self.supports_document_uploads():
+ return
+
+ from fast_agent.mcp.resource_utils import extract_title_from_uri
+
+ for message in messages:
+ for content in message.content:
+ if not isinstance(content, EmbeddedResource):
+ continue
+
+ resource = content.resource
+ if not isinstance(resource, BlobResourceContents):
+ continue
+
+ mime_type = normalize_mime_type(resource.mimeType)
+ if not mime_type and getattr(resource, "uri", None):
+ mime_type = guess_mime_type(str(resource.uri))
+ if mime_type not in DOCUMENT_MIME_TYPES or mime_type == "application/pdf":
+ continue
+
+ meta = dict(getattr(resource, "meta", None) or {})
+ existing = meta.get(ANTHROPIC_FILE_ID_META_KEY)
+ if isinstance(existing, str) and existing:
+ continue
+
+ try:
+ data = base64.b64decode(resource.blob)
+ except Exception:
+ logger.warning(
+ "Unable to decode Anthropic document upload bytes",
+ data={"mime_type": mime_type, "uri": str(getattr(resource, "uri", ""))},
+ )
+ continue
+
+ filename = (
+ extract_title_from_uri(resource.uri) if getattr(resource, "uri", None) else None
+ ) or "document"
+ file_id = await self._upload_anthropic_file_bytes(
+ anthropic,
+ data=data,
+ filename=filename,
+ mime_type=mime_type,
+ )
+ if not file_id:
+ continue
+
+ meta[ANTHROPIC_FILE_ID_META_KEY] = file_id
+ resource.meta = meta
+
def _get_cache_ttl(self) -> str:
"""Get the cache TTL configuration ('5m' or '1h')."""
cache_ttl = "5m" # Default to 5 minutes
@@ -708,7 +747,9 @@ def _resolve_structured_output_mode(
json_mode = self._get_model_json_mode(model)
if json_mode == "schema":
- return "json"
+ if self.supports_direct_anthropic_beta("structured_output"):
+ return "json"
+ return "tool_use"
return "tool_use"
def _build_output_format(self, structured_model: Type[ModelT]) -> dict[str, Any]:
@@ -748,6 +789,9 @@ async def _prepare_tools(
]
def _prepare_web_tools(self, model: str) -> tuple[list[ToolParam], tuple[str, ...]]:
+ if not self.supports_web_tools():
+ return [], ()
+
anthropic_settings = self.context.config.anthropic if self.context.config else None
resolved = resolve_web_tools(
anthropic_settings,
@@ -764,6 +808,8 @@ def _prepare_web_tools(self, model: str) -> tuple[list[ToolParam], tuple[str, ..
@property
def web_tools_enabled(self) -> tuple[bool, bool]:
"""Return (search_enabled, fetch_enabled) for toolbar display."""
+ if not self.supports_web_tools():
+ return False, False
anthropic_settings = self.context.config.anthropic if self.context.config else None
resolved = resolve_web_tools(
anthropic_settings,
@@ -774,6 +820,8 @@ def web_tools_enabled(self) -> tuple[bool, bool]:
@property
def web_search_supported(self) -> bool:
+ if not self.supports_web_tools():
+ return False
model_name = self.model_name
if not model_name:
return False
@@ -789,6 +837,8 @@ def set_web_search_enabled(self, value: bool | None) -> None:
@property
def web_fetch_supported(self) -> bool:
+ if not self.supports_web_tools():
+ return False
model_name = self.model_name
if not model_name:
return False
@@ -1372,15 +1422,21 @@ def _resolve_anthropic_beta_flags(
) -> list[str]:
beta_flags: list[str] = []
adaptive_thinking = self._supports_adaptive_thinking(model)
- if structured_mode:
+ if structured_mode == "json" and self.supports_direct_anthropic_beta("structured_output"):
beta_flags.append(STRUCTURED_OUTPUT_BETA)
- if thinking_enabled and request_tools and not adaptive_thinking:
+ if (
+ thinking_enabled
+ and request_tools
+ and not adaptive_thinking
+ and self.supports_direct_anthropic_beta("interleaved_thinking")
+ ):
beta_flags.append(INTERLEAVED_THINKING_BETA)
- if self._long_context:
+ if self._long_context and self.supports_direct_anthropic_beta("long_context"):
beta_flags.append(LONG_CONTEXT_BETA)
- if request_tools:
+ if request_tools and self.supports_direct_anthropic_beta("fine_grained_tool_streaming"):
beta_flags.append(FINE_GRAINED_TOOL_STREAMING_BETA)
- beta_flags.extend(web_tool_betas)
+ if self.supports_direct_anthropic_beta("web_tools"):
+ beta_flags.extend(web_tool_betas)
return dedupe_preserve_order(beta_flags)
def _apply_anthropic_cache_plan(
@@ -1416,7 +1472,7 @@ def _apply_anthropic_cache_plan(
async def _execute_anthropic_stream(
self,
*,
- anthropic: AsyncAnthropic | AsyncAnthropicVertex,
+ anthropic: Any,
arguments: dict[str, Any],
model: str,
capture_filename: Path | None,
@@ -1665,6 +1721,15 @@ async def _anthropic_completion(
try:
anthropic = self._initialize_anthropic_client()
params = self.get_request_params(request_params)
+ messages_to_prepare: list[PromptMessageExtended] = []
+ if history:
+ messages_to_prepare.extend(history)
+ if current_extended is not None:
+ messages_to_prepare.append(current_extended)
+ if messages_to_prepare:
+ await self._prepare_anthropic_file_resources(anthropic, messages_to_prepare)
+ if current_extended is not None:
+ message_param = AnthropicConverter.convert_to_anthropic(current_extended)
messages = self._build_request_messages(
params, message_param, pre_messages, history=history
)
@@ -1754,7 +1819,9 @@ async def _anthropic_completion(
):
try:
turn_usage = TurnUsage.from_anthropic(
- response.usage, model or DEFAULT_ANTHROPIC_MODEL
+ response.usage,
+ model or DEFAULT_ANTHROPIC_MODEL,
+ provider=self.provider,
)
self._finalize_turn_usage(turn_usage)
except Exception as e:
diff --git a/src/fast_agent/llm/provider/anthropic/llm_anthropic_vertex.py b/src/fast_agent/llm/provider/anthropic/llm_anthropic_vertex.py
new file mode 100644
index 000000000..07e48879c
--- /dev/null
+++ b/src/fast_agent/llm/provider/anthropic/llm_anthropic_vertex.py
@@ -0,0 +1,85 @@
+from __future__ import annotations
+
+from typing import Any, cast
+
+from anthropic import AsyncAnthropicVertex
+
+from fast_agent.core.exceptions import ProviderKeyError
+from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
+from fast_agent.llm.provider.anthropic.vertex_config import (
+ anthropic_vertex_config,
+ detect_google_adc,
+ resolve_anthropic_vertex_location,
+ resolve_anthropic_vertex_project_id,
+)
+from fast_agent.llm.provider_types import Provider
+
+
+class AnthropicVertexLLM(AnthropicLLM):
+ @classmethod
+ def provider_identity(cls) -> Provider:
+ return Provider.ANTHROPIC_VERTEX
+
+ def _vertex_cfg(self):
+ return anthropic_vertex_config(getattr(self.context, "config", None))
+
+ def _provider_base_url(self) -> str | None:
+ return self._vertex_cfg().base_url
+
+ def _provider_api_key(self) -> str:
+ return ""
+
+ def _vertex_project_id(self) -> str:
+ project_id = resolve_anthropic_vertex_project_id(getattr(self.context, "config", None))
+ if project_id is None:
+ raise ProviderKeyError(
+ "Google Cloud project not configured",
+ "Set anthropic.vertex_ai.project_id or configure "
+ "GOOGLE_CLOUD_PROJECT before using Anthropic via Vertex.",
+ )
+ return project_id
+
+ def _vertex_location(self) -> str:
+ location = resolve_anthropic_vertex_location(getattr(self.context, "config", None))
+ if location is None:
+ raise ProviderKeyError(
+ "Google Cloud location not configured",
+ "Set anthropic.vertex_ai.location before using Anthropic via Vertex.",
+ )
+ return location
+
+ def _vertex_credentials(self) -> object:
+ adc_status = detect_google_adc()
+ if not adc_status.available or adc_status.credentials is None:
+ raise ProviderKeyError(
+ "Google ADC not found",
+ "Anthropic via Vertex uses Google Application Default Credentials.\n"
+ "Run `gcloud auth application-default login` or configure a service account.",
+ )
+ return adc_status.credentials
+
+ def _initialize_anthropic_client(self) -> AsyncAnthropicVertex:
+ return AsyncAnthropicVertex(
+ project_id=self._vertex_project_id(),
+ region=self._vertex_location(),
+ credentials=cast("Any", self._vertex_credentials()),
+ base_url=self._base_url(),
+ default_headers=self._default_headers(),
+ )
+
+ def supports_files_api(self) -> bool:
+ return False
+
+ def supports_document_uploads(self) -> bool:
+ return False
+
+ def supports_web_tools(self) -> bool:
+ return True
+
+ def supports_direct_anthropic_beta(self, feature: str) -> bool:
+ return feature in {
+ "interleaved_thinking",
+ "long_context",
+ "fine_grained_tool_streaming",
+ "web_tools",
+ }
diff --git a/src/fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py b/src/fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py
index 77e19437a..be3a6f82a 100644
--- a/src/fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py
+++ b/src/fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py
@@ -13,6 +13,9 @@
from anthropic.types.beta import (
BetaContentBlockParam as ContentBlockParam,
)
+from anthropic.types.beta import (
+ BetaFileDocumentSourceParam as FileDocumentSourceParam,
+)
from anthropic.types.beta import (
BetaImageBlockParam as ImageBlockParam,
)
@@ -59,6 +62,7 @@
EmbeddedResource,
ImageContent,
PromptMessage,
+ ResourceLink,
TextContent,
TextResourceContents,
)
@@ -77,9 +81,11 @@
get_text,
is_image_content,
is_resource_content,
+ is_resource_link,
is_text_content,
)
from fast_agent.mcp.mime_utils import (
+ DOCUMENT_MIME_TYPES,
guess_mime_type,
is_image_mime_type,
is_text_mime_type,
@@ -87,6 +93,7 @@
from fast_agent.types import PromptMessageExtended
_logger = get_logger("multipart_converter_anthropic")
+ANTHROPIC_FILE_ID_META_KEY = "fast_agent_anthropic_file_id"
# Validate and normalize replay blocks against *input* content block params.
# Using output block schemas preserves output-only fields (for example
@@ -482,6 +489,10 @@ def _convert_content_items(
# Handle embedded resource
block = AnthropicConverter._convert_embedded_resource(content_item, document_mode)
anthropic_blocks.append(block)
+ elif is_resource_link(content_item):
+ anthropic_blocks.append(
+ AnthropicConverter._convert_resource_link(content_item, document_mode)
+ )
return anthropic_blocks
@@ -513,6 +524,20 @@ def _convert_embedded_resource(
from fast_agent.mcp.resource_utils import extract_title_from_uri
title = extract_title_from_uri(uri) if uri else "resource"
+ meta = getattr(resource_content, "meta", None)
+ file_id = meta.get(ANTHROPIC_FILE_ID_META_KEY) if isinstance(meta, dict) else None
+
+ if (
+ isinstance(file_id, str)
+ and file_id
+ and mime_type in DOCUMENT_MIME_TYPES
+ and mime_type != "application/pdf"
+ ):
+ return DocumentBlockParam(
+ type="document",
+ title=title,
+ source=FileDocumentSourceParam(type="file", file_id=file_id),
+ )
# Convert based on MIME type
if mime_type == "image/svg+xml":
@@ -608,6 +633,52 @@ def _convert_embedded_resource(
f"Unsupported resource ({mime_type})", resource
)
+ @staticmethod
+ def _convert_resource_link(
+ resource: ResourceLink,
+ document_mode: bool = True,
+ ) -> ContentBlockParam:
+ """Convert ResourceLink to an Anthropic block when URL sources are supported."""
+ del document_mode
+ uri_str = str(resource.uri) if resource.uri else None
+ parsed_uri = urlparse(uri_str) if uri_str else None
+ is_url: bool = bool(parsed_uri and parsed_uri.scheme in ("http", "https"))
+ mime_type = resource.mimeType or (guess_mime_type(uri_str) if uri_str else None) or ""
+
+ from fast_agent.mcp.resource_utils import extract_title_from_uri
+
+ title = (
+ extract_title_from_uri(resource.uri)
+ if resource.uri
+ else (resource.name or "resource")
+ )
+
+ if is_url and is_image_mime_type(mime_type):
+ assert uri_str is not None
+ if not AnthropicConverter._is_supported_image_type(mime_type):
+ return TextBlockParam(
+ type="text",
+ text=f"Image with unsupported format '{mime_type}'",
+ )
+ return ImageBlockParam(
+ type="image",
+ source=URLImageSourceParam(type="url", url=uri_str),
+ )
+
+ if is_url and mime_type == "application/pdf":
+ assert uri_str is not None
+ return DocumentBlockParam(
+ type="document",
+ title=title,
+ source=URLPDFSourceParam(type="url", url=uri_str),
+ )
+
+ text = get_text(resource)
+ if text:
+ return TextBlockParam(type="text", text=text)
+
+ return TextBlockParam(type="text", text=f"[Resource link: {title}]")
+
@staticmethod
def _determine_mime_type(
resource: Union[TextResourceContents, BlobResourceContents],
diff --git a/src/fast_agent/llm/provider/anthropic/vertex_config.py b/src/fast_agent/llm/provider/anthropic/vertex_config.py
index 2ea499394..3db03e17e 100644
--- a/src/fast_agent/llm/provider/anthropic/vertex_config.py
+++ b/src/fast_agent/llm/provider/anthropic/vertex_config.py
@@ -3,9 +3,7 @@
import os
from collections.abc import Mapping
from dataclasses import dataclass
-from typing import Any, Literal
-
-AnthropicRoute = Literal["direct", "vertex"]
+from typing import Any
_VERTEX_PROJECT_ENV_VARS: tuple[str, ...] = (
"ANTHROPIC_VERTEX_PROJECT_ID",
@@ -127,18 +125,6 @@ def resolve_anthropic_vertex_location(config: Any) -> str | None:
return value
return "global"
-
-
-def resolve_anthropic_route(
- config: Any,
- *,
- explicit_route: AnthropicRoute | None = None,
-) -> AnthropicRoute:
- if explicit_route is not None:
- return explicit_route
- return "direct"
-
-
def anthropic_vertex_ready(
config: Any,
*,
diff --git a/src/fast_agent/llm/provider/openai/multipart_converter_openai.py b/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
index 5c3dfa5d5..05ef8d33d 100644
--- a/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
+++ b/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
@@ -195,9 +195,16 @@ def _convert_content_to_message(
content_blocks.append(block)
elif is_resource_link(item):
- text = get_text(item)
- if text:
- content_blocks.append({"type": "text", "text": text})
+ uri = getattr(item, "uri", None)
+ mime_type = getattr(item, "mimeType", None)
+ if uri and mime_type and OpenAIConverter._is_supported_image_type(mime_type):
+ content_blocks.append(
+ {"type": "image_url", "image_url": {"url": str(uri)}}
+ )
+ else:
+ text = get_text(item)
+ if text:
+ content_blocks.append({"type": "text", "text": text})
else:
_logger.warning(f"Unsupported content type: {type(item)}")
diff --git a/src/fast_agent/llm/provider/openai/responses_content.py b/src/fast_agent/llm/provider/openai/responses_content.py
index 5e945acc7..cae217833 100644
--- a/src/fast_agent/llm/provider/openai/responses_content.py
+++ b/src/fast_agent/llm/provider/openai/responses_content.py
@@ -19,6 +19,7 @@
is_resource_link,
is_text_content,
)
+from fast_agent.mcp.mime_utils import is_image_mime_type, is_text_mime_type
from fast_agent.tools.apply_patch_tool import (
extract_apply_patch_input,
)
@@ -209,10 +210,6 @@ def _build_reasoning_summary_payload(
return []
return [{"type": "summary_text", "text": summary_text}]
- @staticmethod
- def _is_image_mime_type(mime_type: str | None) -> bool:
- return bool(mime_type) and mime_type.lower().startswith("image/")
-
@staticmethod
def _content_mime_type(content: ContentBlock) -> str | None:
mime_type = getattr(content, "mimeType", None)
@@ -222,20 +219,42 @@ def _content_mime_type(content: ContentBlock) -> str | None:
@staticmethod
def _content_filename(content: ContentBlock) -> str | None:
- if not isinstance(content, EmbeddedResource):
- return None
- uri = getattr(content.resource, "uri", None)
+ uri = getattr(content, "uri", None)
+ if isinstance(content, EmbeddedResource):
+ uri = getattr(content.resource, "uri", None)
if not uri:
return None
uri_str = str(uri)
filename = uri_str.rsplit("/", 1)[-1] if "/" in uri_str else uri_str
return filename or None
+ def _content_to_input_text_part(self, content: ContentBlock) -> dict[str, Any] | None:
+ if not is_resource_content(content):
+ return None
+
+ mime_type = self._content_mime_type(content) or "text/plain"
+ if not is_text_mime_type(mime_type):
+ return None
+
+ text = get_text(content)
+ if text is None:
+ return None
+
+ filename = self._content_filename(content) or "resource"
+ return {
+ "type": "input_text",
+ "text": (
+ f'\n'
+ f"{text}\n"
+ f""
+ ),
+ }
+
def _content_to_input_part(self, content: ContentBlock) -> dict[str, Any] | None:
mime_type = self._content_mime_type(content)
data = get_image_data(content)
if data:
- if self._is_image_mime_type(mime_type):
+ if mime_type and is_image_mime_type(mime_type):
return {"type": "input_image", "image_url": f"data:{mime_type};base64,{data}"}
if mime_type:
input_part: dict[str, Any] = {"type": "input_file", "file_data": data}
@@ -248,9 +267,15 @@ def _content_to_input_part(self, content: ContentBlock) -> dict[str, Any] | None
if is_resource_content(content):
resource_uri = get_resource_uri(content)
if resource_uri:
- if self._is_image_mime_type(mime_type):
+ if mime_type and is_image_mime_type(mime_type):
return {"type": "input_image", "image_url": resource_uri}
return {"type": "input_file", "file_url": resource_uri}
+ if is_resource_link(content):
+ resource_uri = getattr(content, "uri", None)
+ if resource_uri:
+ if mime_type and is_image_mime_type(mime_type):
+ return {"type": "input_image", "image_url": str(resource_uri)}
+ return {"type": "input_file", "file_url": str(resource_uri)}
return None
@@ -325,7 +350,13 @@ def _convert_content_parts(
parts.append({"type": text_type, "text": text})
continue
- if is_image_content(item) or is_resource_content(item):
+ if is_resource_content(item):
+ text_part = self._content_to_input_text_part(item)
+ if text_part:
+ parts.append(text_part)
+ continue
+
+ if is_image_content(item) or is_resource_content(item) or is_resource_link(item):
input_part = self._content_to_input_part(item)
if input_part:
parts.append(input_part)
@@ -350,12 +381,17 @@ def _convert_content_parts(
def _content_to_image_url(self, item: ContentBlock) -> str | None:
data = get_image_data(item)
if not data:
+ if is_resource_link(item):
+ mime_type = self._content_mime_type(item)
+ uri = getattr(item, "uri", None)
+ if uri and mime_type and is_image_mime_type(mime_type):
+ return str(uri)
return None
mime_type = getattr(item, "mimeType", None)
if not mime_type and is_resource_content(item):
resource = getattr(item, "resource", None)
mime_type = getattr(resource, "mimeType", None) if resource else None
- if not self._is_image_mime_type(mime_type):
+ if not mime_type or not is_image_mime_type(mime_type):
return None
return f"data:{mime_type};base64,{data}"
@@ -454,15 +490,26 @@ def _tool_result_to_text(self, result: Any) -> str:
if text is not None:
chunks.append(text)
continue
- if is_image_content(item) or is_resource_content(item):
+ if is_image_content(item) or is_resource_content(item) or is_resource_link(item):
image_url = self._content_to_image_url(item)
if image_url:
chunks.append(f"")
continue
+ input_part = self._content_to_input_part(item)
+ if input_part and input_part.get("type") == "input_file":
+ file_url = input_part.get("file_url")
+ if isinstance(file_url, str):
+ chunks.append(f"[Resource]({file_url})")
+ continue
resource_uri = get_resource_uri(item)
if resource_uri:
chunks.append(f"[Resource]({resource_uri})")
continue
+ if is_resource_link(item):
+ uri = getattr(item, "uri", None)
+ if uri:
+ chunks.append(f"[Resource]({uri})")
+ continue
chunks.append(f"[Unsupported content: {type(item).__name__}]")
return "\n".join(chunk for chunk in chunks if chunk)
@@ -470,7 +517,7 @@ def _tool_result_to_input_parts(self, result: Any) -> list[dict[str, Any]]:
contents = getattr(result, "content", None) or []
parts: list[dict[str, Any]] = []
for item in contents:
- if is_image_content(item) or is_resource_content(item):
+ if is_image_content(item) or is_resource_content(item) or is_resource_link(item):
input_part = self._content_to_input_part(item)
if input_part:
parts.append(input_part)
diff --git a/src/fast_agent/llm/provider_key_manager.py b/src/fast_agent/llm/provider_key_manager.py
index 46e2377e6..1e965f6c7 100644
--- a/src/fast_agent/llm/provider_key_manager.py
+++ b/src/fast_agent/llm/provider_key_manager.py
@@ -9,7 +9,6 @@
from pydantic import BaseModel
from fast_agent.core.exceptions import ProviderKeyError
-from fast_agent.llm.provider.anthropic.vertex_config import AnthropicRoute, resolve_anthropic_route
from fast_agent.utils.huggingface_hub import get_huggingface_hub_token
PROVIDER_ENVIRONMENT_MAP: dict[str, str] = {
@@ -30,6 +29,7 @@
"responses": ("openai",),
}
API_KEY_HINT_TEXT = ""
+API_KEYLESS_PROVIDERS: frozenset[str] = frozenset({"anthropic-vertex"})
class ProviderKeyManager:
@@ -41,10 +41,15 @@ class ProviderKeyManager:
@staticmethod
def get_env_var(provider_name: str) -> str | None:
- return os.getenv(ProviderKeyManager.get_env_key_name(provider_name))
+ env_key_name = ProviderKeyManager.get_env_key_name(provider_name)
+ if not env_key_name:
+ return None
+ return os.getenv(env_key_name)
@staticmethod
- def get_env_key_name(provider_name: str) -> str:
+ def get_env_key_name(provider_name: str) -> str | None:
+ if provider_name.lower() in API_KEYLESS_PROVIDERS:
+ return None
return PROVIDER_ENVIRONMENT_MAP.get(provider_name, f"{provider_name.upper()}_API_KEY")
@staticmethod
@@ -78,8 +83,6 @@ def _get_provider_config_keys(provider_name: str) -> list[str]:
def get_api_key(
provider_name: str,
config: Any,
- *,
- route_hint: str | None = None,
) -> str:
"""
Gets the API key for the specified provider.
@@ -123,19 +126,8 @@ def get_api_key(
except Exception:
pass
- if provider_name == "anthropic":
- try:
- explicit_route: AnthropicRoute | None
- if route_hint == "direct":
- explicit_route = "direct"
- elif route_hint == "vertex":
- explicit_route = "vertex"
- else:
- explicit_route = None
- if resolve_anthropic_route(config, explicit_route=explicit_route) == "vertex":
- return ""
- except Exception:
- pass
+ if provider_name == "anthropic-vertex":
+ return ""
api_key = ProviderKeyManager.get_config_file_key(provider_name, config)
if not api_key:
@@ -173,11 +165,16 @@ def get_api_key(
f"'{provider_name}' is not a valid provider name.",
)
+ env_key_name = ProviderKeyManager.get_env_key_name(provider_name)
+ env_hint = (
+ f" or set the {env_key_name} environment variable."
+ if env_key_name
+ else "."
+ )
raise ProviderKeyError(
f"{display_name} API key not configured",
f"The {display_name} API key is required but not set.\n"
- f"Add it to your configuration file under {provider_name}.api_key "
- f"or set the {ProviderKeyManager.get_env_key_name(provider_name)} environment variable.",
+ f"Add it to your configuration file under {provider_name}.api_key{env_hint}",
)
return api_key
diff --git a/src/fast_agent/llm/provider_types.py b/src/fast_agent/llm/provider_types.py
index 84ee871e6..4bedb44cd 100644
--- a/src/fast_agent/llm/provider_types.py
+++ b/src/fast_agent/llm/provider_types.py
@@ -22,6 +22,7 @@ def config_name(self) -> str:
return self._value_
ANTHROPIC = ("anthropic", "Anthropic")
+ ANTHROPIC_VERTEX = ("anthropic-vertex", "Anthropic (Vertex)")
DEEPSEEK = ("deepseek", "Deepseek")
FAST_AGENT = ("fast-agent", "fast-agent-internal")
GENERIC = ("generic", "Generic")
diff --git a/src/fast_agent/llm/resolved_model.py b/src/fast_agent/llm/resolved_model.py
index 5490002a6..f3d1f5495 100644
--- a/src/fast_agent/llm/resolved_model.py
+++ b/src/fast_agent/llm/resolved_model.py
@@ -211,8 +211,6 @@ def build_llm_kwargs(self) -> dict[str, object]:
config = self.model_config
kwargs: dict[str, object] = {}
- if config.via is not None and self.provider == Provider.ANTHROPIC:
- kwargs["via"] = config.via
if config.reasoning_effort:
kwargs["reasoning_effort"] = config.reasoning_effort
if config.text_verbosity:
@@ -300,12 +298,4 @@ def resolve_base_model_params(
model_name: str,
) -> ModelParameters | None:
"""Resolve base model metadata without preferring overlay runtime mutations."""
- normalized = model_name.strip().lower()
- if provider == Provider.HUGGINGFACE and ":" in normalized:
- normalized = normalized.rsplit(":", 1)[0]
-
- static_params = ModelDatabase.MODELS.get(normalized)
- if static_params is not None:
- return static_params
-
- return ModelDatabase._RUNTIME_MODEL_PARAMS.get(normalized)
+ return ModelDatabase.get_model_params(model_name, provider=provider)
diff --git a/src/fast_agent/llm/usage_tracking.py b/src/fast_agent/llm/usage_tracking.py
index d2689d171..c00ccb62c 100644
--- a/src/fast_agent/llm/usage_tracking.py
+++ b/src/fast_agent/llm/usage_tracking.py
@@ -29,6 +29,8 @@
from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.provider_types import Provider
+_ANTHROPIC_USAGE_PROVIDERS = {Provider.ANTHROPIC, Provider.ANTHROPIC_VERTEX}
+
# Fast-agent specific usage type for synthetic providers
class FastAgentUsage(BaseModel):
@@ -107,7 +109,7 @@ def effective_input_tokens(self) -> int:
"""Input tokens actually processed (new tokens, not from cache)"""
# For Anthropic: input_tokens already excludes cached content
# For other providers: subtract cache hits from input_tokens
- if self.provider == Provider.ANTHROPIC:
+ if self.provider in _ANTHROPIC_USAGE_PROVIDERS:
return self.input_tokens
else:
return max(0, self.input_tokens - self.cache_usage.cache_hit_tokens)
@@ -117,7 +119,7 @@ def effective_input_tokens(self) -> int:
def display_input_tokens(self) -> int:
"""Input tokens to display for 'Last turn' (total submitted tokens)"""
# For Anthropic: input_tokens excludes cache, so add cache tokens
- if self.provider == Provider.ANTHROPIC:
+ if self.provider in _ANTHROPIC_USAGE_PROVIDERS:
return (
self.input_tokens
+ self.cache_usage.cache_read_tokens
@@ -133,7 +135,13 @@ def set_tool_calls(self, count: int) -> None:
object.__setattr__(self, "tool_calls", count)
@classmethod
- def from_anthropic(cls, usage: AnthropicUsage, model: str) -> "TurnUsage":
+ def from_anthropic(
+ cls,
+ usage: AnthropicUsage,
+ model: str,
+ *,
+ provider: Provider = Provider.ANTHROPIC,
+ ) -> "TurnUsage":
# Extract cache tokens with proper null handling
cache_creation_tokens = getattr(usage, "cache_creation_input_tokens", 0) or 0
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
@@ -148,7 +156,7 @@ def from_anthropic(cls, usage: AnthropicUsage, model: str) -> "TurnUsage":
thinking_tokens = getattr(usage, "thinking_tokens", 0) or 0
return cls(
- provider=Provider.ANTHROPIC,
+ provider=provider,
model=model,
input_tokens=usage.input_tokens,
output_tokens=usage.output_tokens,
diff --git a/src/fast_agent/mcp/helpers/content_helpers.py b/src/fast_agent/mcp/helpers/content_helpers.py
index 215c975ec..ab374bf3e 100644
--- a/src/fast_agent/mcp/helpers/content_helpers.py
+++ b/src/fast_agent/mcp/helpers/content_helpers.py
@@ -133,9 +133,9 @@ def text_content(text: str) -> TextContent:
def _infer_mime_type(url: str, default: str = "application/octet-stream") -> str:
"""Infer MIME type from URL using the mimetypes database."""
- from urllib.parse import urlparse
+ from urllib.parse import parse_qs, urlparse
- from fast_agent.mcp.mime_utils import guess_mime_type
+ from fast_agent.mcp.mime_utils import guess_mime_type, normalize_mime_type
# Special case: YouTube URLs (Google has native support)
parsed = urlparse(url.lower())
@@ -145,8 +145,18 @@ def _infer_mime_type(url: str, default: str = "application/octet-stream") -> str
mime = guess_mime_type(url)
# guess_mime_type returns "application/octet-stream" for unknown types
- if mime == "application/octet-stream":
- return default
+ if mime != "application/octet-stream":
+ return mime
+
+ query_args = parse_qs(parsed.query)
+ for key in ("format", "fm", "ext", "mime"):
+ values = query_args.get(key)
+ if not values:
+ continue
+ normalized = normalize_mime_type(values[0])
+ if normalized:
+ return normalized
+
return mime
diff --git a/src/fast_agent/mcp/mime_utils.py b/src/fast_agent/mcp/mime_utils.py
index 09bc5cd59..30570b924 100644
--- a/src/fast_agent/mcp/mime_utils.py
+++ b/src/fast_agent/mcp/mime_utils.py
@@ -8,6 +8,18 @@
# Extend with additional types that might be missing
mimetypes.add_type("text/x-python", ".py")
mimetypes.add_type("image/webp", ".webp")
+mimetypes.add_type("application/msword", ".doc")
+mimetypes.add_type("application/vnd.ms-excel", ".xls")
+mimetypes.add_type("application/vnd.ms-powerpoint", ".ppt")
+mimetypes.add_type(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx"
+)
+mimetypes.add_type(
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx"
+)
+mimetypes.add_type(
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx"
+)
# Known text-based MIME types not starting with "text/"
TEXT_MIME_TYPES = {
@@ -30,6 +42,16 @@
# Common text-based MIME type patterns
TEXT_MIME_PATTERNS = ("+xml", "+json", "+yaml", "+text")
+DOCUMENT_MIME_TYPES = (
+ "application/pdf",
+ "application/msword",
+ "application/vnd.ms-excel",
+ "application/vnd.ms-powerpoint",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+)
+
def guess_mime_type(file_path: str) -> str:
"""
@@ -69,6 +91,11 @@ def is_image_mime_type(mime_type: str) -> bool:
return mime_type.startswith("image/") and mime_type != "image/svg+xml"
+def is_document_mime_type(mime_type: str) -> bool:
+ """Check if a MIME type represents a document attachment."""
+ return mime_type in DOCUMENT_MIME_TYPES
+
+
# Common reference mapping and normalization helpers
_MIME_ALIASES = {
# Friendly or non-standard labels
diff --git a/src/fast_agent/ui/attachment_indicator.py b/src/fast_agent/ui/attachment_indicator.py
index 4296b2159..fabf4324d 100644
--- a/src/fast_agent/ui/attachment_indicator.py
+++ b/src/fast_agent/ui/attachment_indicator.py
@@ -4,10 +4,15 @@
from dataclasses import dataclass
from pathlib import Path
+from typing import TYPE_CHECKING
from fast_agent.llm.model_database import ModelDatabase
+from fast_agent.mcp.helpers.content_helpers import resource_link
from fast_agent.mcp.mime_utils import guess_mime_type
+if TYPE_CHECKING:
+ from fast_agent.llm.provider_types import Provider
+
ATTACHMENT_GLYPH = "▲"
ATTACHMENT_SUPPORTED_COLOR = "ansigreen"
ATTACHMENT_QUESTIONABLE_COLOR = "ansired"
@@ -25,20 +30,38 @@ def summarize_draft_attachments(
text: str,
*,
model_name: str | None,
+ provider: Provider | None = None,
) -> DraftAttachmentSummary | None:
- from fast_agent.ui.prompt.attachment_tokens import FILE_MENTION_SERVER
+ from fast_agent.ui.prompt.attachment_tokens import FILE_MENTION_SERVER, URL_MENTION_SERVER
from fast_agent.ui.prompt.resource_mentions import parse_mentions
parsed = parse_mentions(text)
- local_mentions = [
- mention for mention in parsed.mentions if mention.server_name == FILE_MENTION_SERVER
+ attachment_mentions = [
+ mention
+ for mention in parsed.mentions
+ if mention.server_name in {FILE_MENTION_SERVER, URL_MENTION_SERVER}
]
- if not local_mentions:
+ if not attachment_mentions:
return None
mime_types: list[str] = []
any_questionable = False
- for mention in local_mentions:
+ for mention in attachment_mentions:
+ if mention.server_name == URL_MENTION_SERVER:
+ mime_type = resource_link(mention.resource_uri).mimeType or "application/octet-stream"
+ mime_types.append(mime_type)
+ if mime_type == "application/octet-stream":
+ any_questionable = True
+ continue
+ if model_name and not ModelDatabase.supports_mime(
+ model_name,
+ mime_type,
+ provider=provider,
+ resource_source="link",
+ ):
+ any_questionable = True
+ continue
+
path = Path(mention.resource_uri)
if not path.exists():
any_questionable = True
@@ -54,11 +77,16 @@ def summarize_draft_attachments(
if mime_type == "application/octet-stream":
any_questionable = True
continue
- if model_name and not ModelDatabase.supports_mime(model_name, mime_type):
+ if model_name and not ModelDatabase.supports_mime(
+ model_name,
+ mime_type,
+ provider=provider,
+ resource_source="embedded",
+ ):
any_questionable = True
return DraftAttachmentSummary(
- count=len(local_mentions),
+ count=len(attachment_mentions),
mime_types=tuple(mime_types),
any_questionable=any_questionable,
)
diff --git a/src/fast_agent/ui/context_usage_display.py b/src/fast_agent/ui/context_usage_display.py
new file mode 100644
index 000000000..109684949
--- /dev/null
+++ b/src/fast_agent/ui/context_usage_display.py
@@ -0,0 +1,41 @@
+"""Shared compact context-usage helpers for UI surfaces."""
+
+from __future__ import annotations
+
+
+def resolve_context_usage_percent(
+ *,
+ context_pct: float | None,
+ usage_accumulator: object | None,
+ fallback_window_size: int | float | None = None,
+) -> float | None:
+ """Resolve context usage percent from an accumulator when needed."""
+ if context_pct is not None or usage_accumulator is None:
+ return context_pct
+
+ try:
+ window_size = getattr(usage_accumulator, "context_window_size", None)
+ if not isinstance(window_size, (int, float)) or window_size <= 0:
+ window_size = fallback_window_size
+ if not isinstance(window_size, (int, float)) or window_size <= 0:
+ return None
+
+ current_context_tokens = getattr(usage_accumulator, "current_context_tokens", None)
+ if not isinstance(current_context_tokens, (int, float)):
+ return None
+ return (current_context_tokens / window_size) * 100
+ except Exception:
+ return None
+
+
+def format_compact_context_usage_percent(pct: float | None) -> str | None:
+ """Format context usage with stable width for compact displays."""
+ if pct is None:
+ return None
+
+ safe_pct = max(pct, 0.0)
+ if safe_pct >= 100.0:
+ return "100%+"
+ if safe_pct < 10.0:
+ return f"{min(safe_pct, 9.99):.2f}%"
+ return f"{min(safe_pct, 99.9):.1f}%"
diff --git a/src/fast_agent/ui/interactive/command_dispatch.py b/src/fast_agent/ui/interactive/command_dispatch.py
index 3bba73274..2ab9e252e 100644
--- a/src/fast_agent/ui/interactive/command_dispatch.py
+++ b/src/fast_agent/ui/interactive/command_dispatch.py
@@ -73,7 +73,9 @@
from fast_agent.ui.prompt.attachment_tokens import (
append_attachment_tokens,
build_local_attachment_token,
+ build_remote_attachment_token,
normalize_local_attachment_reference,
+ normalize_remote_attachment_reference,
strip_local_attachment_tokens,
)
@@ -181,7 +183,7 @@ async def _dispatch_local_ui_payload(
if not resolved_paths:
context = build_command_context(prompt_provider, agent_name)
prompted_path = await context.io.prompt_text(
- "Attach file path:",
+ "Attach file path or URL:",
allow_empty=False,
)
if not prompted_path:
@@ -192,12 +194,17 @@ async def _dispatch_local_ui_payload(
tokens: list[str] = []
for raw_path in resolved_paths:
try:
- attachment_path = normalize_local_attachment_reference(raw_path)
- if not attachment_path.exists():
- raise FileNotFoundError(raw_path)
- if not attachment_path.is_file():
- raise IsADirectoryError(raw_path)
- token = build_local_attachment_token(attachment_path)
+ if raw_path.strip().lower().startswith(("http://", "https://")):
+ token = build_remote_attachment_token(
+ normalize_remote_attachment_reference(raw_path)
+ )
+ else:
+ attachment_path = normalize_local_attachment_reference(raw_path)
+ if not attachment_path.exists():
+ raise FileNotFoundError(raw_path)
+ if not attachment_path.is_file():
+ raise IsADirectoryError(raw_path)
+ token = build_local_attachment_token(attachment_path)
except Exception as exc:
rich_print(f"[red]Unable to attach '{raw_path}': {exc}[/red]")
continue
diff --git a/src/fast_agent/ui/model_picker_common.py b/src/fast_agent/ui/model_picker_common.py
index 0ebc86506..951286fb3 100644
--- a/src/fast_agent/ui/model_picker_common.py
+++ b/src/fast_agent/ui/model_picker_common.py
@@ -31,6 +31,7 @@
Provider.OPENRESPONSES,
Provider.CODEX_RESPONSES,
Provider.ANTHROPIC,
+ Provider.ANTHROPIC_VERTEX,
Provider.HUGGINGFACE,
Provider.OPENAI,
Provider.GENERIC,
@@ -113,6 +114,10 @@ class ModelPickerSnapshot:
def _provider_is_active(provider: Provider, config_payload: dict[str, Any]) -> bool:
+ if provider == Provider.ANTHROPIC_VERTEX:
+ ready, _ = anthropic_vertex_ready(config_payload)
+ return ready
+
config_key = ProviderKeyManager.get_config_file_key(provider.config_name, config_payload)
if config_key:
return True
@@ -150,11 +155,6 @@ def _provider_is_active(provider: Provider, config_payload: dict[str, Any]) -> b
return False
-
-def _force_anthropic_vertex_route(model_spec: str) -> str:
- return _update_query_param(model_spec, key="via", value="vertex")
-
-
def _catalog_options_from_entries(
entries: tuple[CatalogModelEntry, ...],
*,
@@ -227,47 +227,10 @@ def model_options_for_option(
provider = option.provider
assert provider is not None
- spec_transform = (
- _force_anthropic_vertex_route
- if option.option_key == ANTHROPIC_VERTEX_PROVIDER_KEY
- else None
- )
return _catalog_options_from_entries(
option.curated_entries,
provider=provider,
source=source,
- spec_transform=spec_transform,
- )
-
-
-def _anthropic_vertex_provider_option(
- *,
- entries: tuple[CatalogModelEntry, ...],
- config_payload: dict[str, Any],
-) -> ProviderOption | None:
- if not anthropic_vertex_intent(config_payload):
- return None
-
- vertex_ready, disabled_reason = anthropic_vertex_ready(config_payload)
- rewritten_entries = tuple(
- CatalogModelEntry(
- alias=entry.alias,
- model=_force_anthropic_vertex_route(entry.model),
- current=entry.current,
- fast=entry.fast,
- local=entry.local,
- display_label=entry.display_label,
- description=entry.description,
- )
- for entry in entries
- )
- return ProviderOption(
- provider=Provider.ANTHROPIC,
- active=vertex_ready,
- curated_entries=rewritten_entries,
- key=ANTHROPIC_VERTEX_PROVIDER_KEY,
- display_name="Anthropic (Vertex)",
- disabled_reason=disabled_reason,
)
@@ -320,6 +283,8 @@ def build_snapshot(
)
for provider in PICKER_PROVIDER_ORDER:
+ if provider == Provider.ANTHROPIC_VERTEX and not anthropic_vertex_intent(config_payload):
+ continue
entries = tuple(
entry
for entry in ModelSelectionCatalog.list_entries(
@@ -338,15 +303,13 @@ def build_snapshot(
provider=provider,
active=provider in active_providers,
curated_entries=entries,
+ disabled_reason=(
+ anthropic_vertex_ready(config_payload)[1]
+ if provider == Provider.ANTHROPIC_VERTEX and provider not in active_providers
+ else None
+ ),
)
)
- if provider == Provider.ANTHROPIC:
- vertex_option = _anthropic_vertex_provider_option(
- entries=entries,
- config_payload=config_payload,
- )
- if vertex_option is not None:
- providers.append(vertex_option)
return ModelPickerSnapshot(providers=tuple(providers), config_payload=config_payload)
@@ -460,6 +423,26 @@ def normalize_generic_model_spec(raw_model: str) -> str | None:
return f"generic.{candidate}"
+def infer_initial_picker_provider(model_spec: str | None) -> str | None:
+ if model_spec is None:
+ return None
+
+ normalized = model_spec.strip()
+ if not normalized:
+ return None
+
+ try:
+ parsed = ModelFactory.parse_model_string(
+ normalized,
+ presets=ModelFactory.MODEL_PRESETS,
+ )
+ except Exception:
+ return None
+
+ config_name = parsed.provider.config_name.strip()
+ return config_name or None
+
+
def provider_activation_action(
snapshot: ModelPickerSnapshot,
provider: Provider,
@@ -481,7 +464,13 @@ def model_identity(model_spec: str) -> tuple[Provider, str] | None:
def _static_provider_models(provider: Provider) -> list[str]:
models: list[str] = []
for model in ModelDatabase.list_models():
- if ModelDatabase.get_default_provider(model) != provider:
+ default_provider = ModelDatabase.get_default_provider(model)
+ if provider == Provider.ANTHROPIC_VERTEX:
+ if default_provider != Provider.ANTHROPIC:
+ continue
+ models.append(f"{provider.config_name}.{model}")
+ continue
+ if default_provider != provider:
continue
models.append(f"{provider.config_name}.{model}")
return models
diff --git a/src/fast_agent/ui/prompt/attachment_tokens.py b/src/fast_agent/ui/prompt/attachment_tokens.py
index 36a4978a1..132ec8166 100644
--- a/src/fast_agent/ui/prompt/attachment_tokens.py
+++ b/src/fast_agent/ui/prompt/attachment_tokens.py
@@ -1,4 +1,4 @@
-"""Helpers for inline local attachment tokens."""
+"""Helpers for inline attachment tokens."""
from __future__ import annotations
@@ -9,8 +9,9 @@
from urllib.request import url2pathname
FILE_MENTION_SERVER = "file"
-_LOCAL_ATTACHMENT_TOKEN_RE = re.compile(r"(?P^|\s)(?P\^file:[^\s]+)")
-_LOCAL_ATTACHMENT_BODY_RE = r"\^file:[^\s]+"
+URL_MENTION_SERVER = "url"
+_ATTACHMENT_TOKEN_RE = re.compile(r"(?P^|\s)(?P\^(?:file|url):[^\s]+)")
+_ATTACHMENT_BODY_RE = r"\^(?:file|url):[^\s]+"
def normalize_local_attachment_reference(
@@ -45,6 +46,21 @@ def normalize_local_attachment_reference(
return resolved_path.resolve(strict=False)
+def normalize_remote_attachment_reference(reference: str) -> str:
+ """Normalize an HTTP(S) attachment reference into a remote URL."""
+ raw_value = reference.strip()
+ if not raw_value:
+ raise ValueError("Attachment URL is empty")
+
+ parsed = urlparse(raw_value)
+ scheme = parsed.scheme.lower()
+ if scheme not in ("http", "https"):
+ raise ValueError(f"Unsupported attachment URI scheme: {parsed.scheme or ''}")
+ if not parsed.netloc:
+ raise ValueError("Attachment URL is missing host")
+ return raw_value
+
+
def encode_local_attachment_reference(path_text: str) -> str:
"""Percent-encode a token path while keeping it compact and path-like."""
normalized = path_text.replace("\\", "/")
@@ -59,32 +75,38 @@ def build_local_attachment_token(path: str | Path) -> str:
return f"^{FILE_MENTION_SERVER}:{encode_local_attachment_reference(normalized.as_posix())}"
+def build_remote_attachment_token(url: str) -> str:
+ """Build a canonical ``^url:...`` token for a remote URL."""
+ normalized = normalize_remote_attachment_reference(url)
+ return f"^{URL_MENTION_SERVER}:{quote(normalized, safe='/._~-:?&=#%')}"
+
+
def strip_local_attachment_tokens(text: str) -> str:
- """Remove inline local attachment tokens while preserving other text."""
+ """Remove inline attachment tokens while preserving other text."""
stripped = re.sub(
- rf"(^|\n)[ \t]*{_LOCAL_ATTACHMENT_BODY_RE}[ \t]*(?:\n|$)",
+ rf"(^|\n)[ \t]*{_ATTACHMENT_BODY_RE}[ \t]*(?:\n|$)",
lambda match: match.group(1),
text,
flags=re.MULTILINE,
)
stripped = re.sub(
- rf"(?P[ \t]){_LOCAL_ATTACHMENT_BODY_RE}(?P[ \t])",
+ rf"(?P[ \t]){_ATTACHMENT_BODY_RE}(?P[ \t])",
r"\g",
stripped,
)
stripped = re.sub(
- rf"(?P[ \t]+){_LOCAL_ATTACHMENT_BODY_RE}(?=$|\n)",
+ rf"(?P[ \t]+){_ATTACHMENT_BODY_RE}(?=$|\n)",
"",
stripped,
)
stripped = re.sub(
- rf"(?:(?<=^)|(?<=\s)){_LOCAL_ATTACHMENT_BODY_RE}(?P[ \t]+)",
+ rf"(?:(?<=^)|(?<=\s)){_ATTACHMENT_BODY_RE}(?P[ \t]+)",
"",
stripped,
flags=re.MULTILINE,
)
stripped = re.sub(
- rf"(?:(?<=^)|(?<=\s)){_LOCAL_ATTACHMENT_BODY_RE}",
+ rf"(?:(?<=^)|(?<=\s)){_ATTACHMENT_BODY_RE}",
"",
stripped,
flags=re.MULTILINE,
diff --git a/src/fast_agent/ui/prompt/command_help.py b/src/fast_agent/ui/prompt/command_help.py
index c2934487f..8fd507d89 100644
--- a/src/fast_agent/ui/prompt/command_help.py
+++ b/src/fast_agent/ui/prompt/command_help.py
@@ -9,7 +9,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" /help - Show this help",
" /system - Show the current system prompt",
" /prompt - Load a Prompt File or use MCP Prompt",
- " /attach [path ...|clear] - Stage or clear local ^file: attachments",
+ " /attach [path|url ...|clear] - Stage or clear file/^file: or URL/^url: attachments",
" /usage - Show current usage statistics",
" /skills - List local skills for the manager directory",
" /skills available - Browse marketplace skills before installing",
@@ -95,7 +95,7 @@ def render_help_lines(*, show_webclear_help: bool) -> list[str]:
" F7 - Cycle verbosity (when supported)",
" F8 - Toggle web search (when supported)",
" F9 - Toggle web fetch (when supported)",
- " F10 - Remove staged local ^file: attachments from the draft",
+ " F10 - Clear staged ^file:/^url: attachments",
" Ctrl+T - Toggle multiline mode",
" Ctrl+E - Edit in external editor",
" Ctrl+Y - Copy last assistant response to clipboard",
diff --git a/src/fast_agent/ui/prompt/completer.py b/src/fast_agent/ui/prompt/completer.py
index 9228f50fd..676fccab1 100644
--- a/src/fast_agent/ui/prompt/completer.py
+++ b/src/fast_agent/ui/prompt/completer.py
@@ -24,6 +24,7 @@
from fast_agent.llm.text_verbosity import available_text_verbosity_values
from fast_agent.ui.prompt.attachment_tokens import (
FILE_MENTION_SERVER,
+ URL_MENTION_SERVER,
encode_local_attachment_reference,
)
from fast_agent.ui.prompt.resource_mentions import template_argument_names
@@ -95,7 +96,7 @@ def __init__(
"(/cards, /cards add, /cards remove, /cards update, /cards publish, /cards registry)"
),
"prompt": "Load a Prompt File or use MCP Prompt",
- "attach": "Stage local file attachment token(s) for the next prompt",
+ "attach": "Stage file path or remote URL attachment token(s) for the next prompt",
"system": "Show the current system prompt",
"usage": "Show current usage statistics",
"markdown": "Show last assistant message without markdown formatting",
@@ -1316,14 +1317,24 @@ def _mention_completions(self, text_before_cursor: str) -> list[Completion] | No
return list(cached)
server_names = self._run_async_completion(self._list_connected_resource_servers()) or []
- server_names = list(dict.fromkeys([*server_names, FILE_MENTION_SERVER]))
+ server_names = list(
+ dict.fromkeys([*server_names, FILE_MENTION_SERVER, URL_MENTION_SERVER])
+ )
partial = context.partial.lower()
completions = [
Completion(
f"{server_name}:",
start_position=-len(context.partial),
display=server_name,
- display_meta="connected mcp server (resources)",
+ display_meta=(
+ "local file attachment"
+ if server_name == FILE_MENTION_SERVER
+ else (
+ "remote URL attachment"
+ if server_name == URL_MENTION_SERVER
+ else "connected mcp server (resources)"
+ )
+ ),
)
for server_name in server_names
if not partial or server_name.lower().startswith(partial)
@@ -1337,6 +1348,18 @@ def _mention_completions(self, text_before_cursor: str) -> list[Completion] | No
if context.kind == "resource":
if context.server_name == FILE_MENTION_SERVER:
return self._complete_local_attachment_paths(context.partial)
+ if context.server_name == URL_MENTION_SERVER:
+ prefix = context.partial.lower()
+ return [
+ Completion(
+ scheme,
+ start_position=-len(context.partial),
+ display=scheme,
+ display_meta="remote URL attachment",
+ )
+ for scheme in ("https://", "http://")
+ if not prefix or scheme.startswith(prefix)
+ ]
cache_key = (
"resource",
diff --git a/src/fast_agent/ui/prompt/completion_sources.py b/src/fast_agent/ui/prompt/completion_sources.py
index db43d6255..eb33ab183 100644
--- a/src/fast_agent/ui/prompt/completion_sources.py
+++ b/src/fast_agent/ui/prompt/completion_sources.py
@@ -2,13 +2,13 @@
from __future__ import annotations
-import shlex
from typing import TYPE_CHECKING
from prompt_toolkit.completion import Completion
from fast_agent.agents.agent_types import AgentType
from fast_agent.llm.model_selection import ModelSelectionCatalog
+from fast_agent.utils.commandline import join_commandline, split_commandline
if TYPE_CHECKING:
from fast_agent.ui.prompt.completer import AgentCompleter
@@ -200,14 +200,20 @@ def _attach_command_completions(
"clear",
start_position=0,
display="clear",
- display_meta="remove staged local attachments from the next draft buffer",
+ display_meta="remove staged file or URL attachments from the next draft buffer",
+ ),
+ Completion(
+ "https://",
+ start_position=0,
+ display="https://",
+ display_meta="stage a remote URL attachment for the next prompt",
)
]
results.extend(list(completer._complete_shell_paths("", 0)))
return results
try:
- parts = shlex.split(remainder)
+ parts = split_commandline(remainder)
except ValueError:
return []
@@ -225,13 +231,20 @@ def _attach_command_completions(
"clear",
start_position=-len(partial),
display="clear",
- display_meta="remove staged local attachments from the next draft buffer",
+ display_meta="remove staged file or URL attachments from the next draft buffer",
+ )
+ )
+ if token_count <= 1 and "https://".startswith(partial.lower()):
+ results.append(
+ Completion(
+ "https://",
+ start_position=-len(partial),
+ display="https://",
+ display_meta="stage a remote URL attachment for the next prompt",
)
)
for completion in completer._complete_shell_paths(partial, len(partial)):
- completion_text = completion.text
- if any(char.isspace() for char in completion_text):
- completion_text = shlex.quote(completion_text)
+ completion_text = join_commandline([completion.text])
results.append(
Completion(
completion_text,
diff --git a/src/fast_agent/ui/prompt/input.py b/src/fast_agent/ui/prompt/input.py
index 707efb109..486ab6af9 100644
--- a/src/fast_agent/ui/prompt/input.py
+++ b/src/fast_agent/ui/prompt/input.py
@@ -710,7 +710,7 @@ def _show_input_help_banner(
rich_print(
"""[dim]Use '/' for commands, '!' for shell. '#' to query, '@' to switch agents\n"""
"""CTRL+T multiline, CTRL+Y copy last message, CTRL+E external editor.\n"""
- """CTRL+Space or Tab for path completion. Use /attach, `^file:`, or F10 for attachments.[/dim]"""
+ """CTRL+Space or Tab for path completion. Use /attach, `^file:`, or `^url:` for attachments. F10 to clear.[/dim]"""
)
diff --git a/src/fast_agent/ui/prompt/input_toolbar.py b/src/fast_agent/ui/prompt/input_toolbar.py
index 76a6a3c0e..66af4ab23 100644
--- a/src/fast_agent/ui/prompt/input_toolbar.py
+++ b/src/fast_agent/ui/prompt/input_toolbar.py
@@ -18,6 +18,7 @@
render_attachment_indicator,
summarize_draft_attachments,
)
+from fast_agent.ui.context_usage_display import resolve_context_usage_percent
from fast_agent.ui.model_chip_display import render_model_chip
from fast_agent.ui.prompt.alert_flags import _resolve_alert_flags_from_history
from fast_agent.ui.prompt.toolbar import (
@@ -108,6 +109,7 @@ def render_input_toolbar(
mode_style, mode_text = _resolve_toolbar_mode(multiline_mode)
shortcut_text = ""
agent_state = _resolve_toolbar_agent_state(agent_name, agent_provider)
+ active_llm = resolve_active_llm(agent_provider, agent_name)
agent_identity_segment = _format_toolbar_agent_identity(
agent_name,
toolbar_color,
@@ -116,6 +118,7 @@ def render_input_toolbar(
attachment_summary = summarize_draft_attachments(
current_input_text,
model_name=agent_state.model_name,
+ provider=getattr(active_llm, "provider", None),
)
middle = _build_middle_segment(agent_state, shortcut_text, attachment_summary=attachment_summary)
notification_segment = _build_notification_segment()
@@ -331,17 +334,12 @@ def _resolve_context_pct(
return context_pct
info = _resolve_model_info(model_name, llm)
- try:
- window_size = getattr(usage_accumulator, "context_window_size", None)
- if (not window_size or window_size <= 0) and info:
- window_size = info.context_window
- if window_size and window_size > 0:
- current_context_tokens = getattr(usage_accumulator, "current_context_tokens", None)
- if isinstance(current_context_tokens, int | float):
- return (current_context_tokens / window_size) * 100
- except Exception:
- return None
- return None
+ fallback_window_size = info.context_window if info else None
+ return resolve_context_usage_percent(
+ context_pct=context_pct,
+ usage_accumulator=usage_accumulator,
+ fallback_window_size=fallback_window_size,
+ )
def _resolve_tdv_segment(
diff --git a/src/fast_agent/ui/prompt/keybindings.py b/src/fast_agent/ui/prompt/keybindings.py
index 1cee3509f..6473da850 100644
--- a/src/fast_agent/ui/prompt/keybindings.py
+++ b/src/fast_agent/ui/prompt/keybindings.py
@@ -27,12 +27,16 @@ class ShellPrefixLexer(Lexer):
"""Lexer that highlights shell (!) and comment (#) commands."""
def lex_document(self, document):
+ first_line = document.lines[0] if document.lines else ""
+ first_stripped = first_line.lstrip()
+ first_line_is_shell = first_stripped.startswith("!")
+ first_line_is_hash_command = try_parse_hash_agent_command(first_stripped) is not None
+
def get_line_tokens(line_number):
line = document.lines[line_number]
- stripped = line.lstrip()
- if stripped.startswith("!"):
+ if line_number == 0 and first_line_is_shell:
return [("class:shell-command", line)]
- if try_parse_hash_agent_command(stripped) is not None:
+ if line_number == 0 and first_line_is_hash_command:
return [("class:comment-command", line)]
return [("", line)]
diff --git a/src/fast_agent/ui/prompt/parser.py b/src/fast_agent/ui/prompt/parser.py
index 782286c36..56f49c038 100644
--- a/src/fast_agent/ui/prompt/parser.py
+++ b/src/fast_agent/ui/prompt/parser.py
@@ -65,6 +65,7 @@
TitleSessionCommand,
UnknownCommand,
)
+from fast_agent.utils.commandline import split_commandline
from fast_agent.utils.slash_commands import split_subcommand_and_remainder
@@ -165,7 +166,7 @@ def _parse_attach_command(remainder: str) -> AttachCommand:
return AttachCommand(paths=())
try:
- tokens = shlex.split(remainder)
+ tokens = split_commandline(remainder)
except ValueError as exc:
return AttachCommand(paths=(), error=str(exc))
@@ -631,7 +632,7 @@ def parse_special_input(text: str) -> str | CommandPayload:
if cmd_line and cmd_line.startswith("@"):
return SwitchAgentCommand(agent_name=cmd_line[1:].strip())
- parsed_hash_command = try_parse_hash_agent_command(text)
+ parsed_hash_command = try_parse_hash_agent_command(cmd_line.lstrip())
if parsed_hash_command is not None:
return parsed_hash_command
diff --git a/src/fast_agent/ui/prompt/resource_mentions.py b/src/fast_agent/ui/prompt/resource_mentions.py
index 076cf1607..6702e5210 100644
--- a/src/fast_agent/ui/prompt/resource_mentions.py
+++ b/src/fast_agent/ui/prompt/resource_mentions.py
@@ -10,11 +10,14 @@
from mcp.types import ContentBlock, EmbeddedResource, ReadResourceResult, TextContent
+from fast_agent.mcp.helpers.content_helpers import image_link, resource_link
from fast_agent.mcp.mcp_content import MCPFile, MCPImage
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
from fast_agent.ui.prompt.attachment_tokens import (
FILE_MENTION_SERVER,
+ URL_MENTION_SERVER,
normalize_local_attachment_reference,
+ normalize_remote_attachment_reference,
)
if TYPE_CHECKING:
@@ -243,6 +246,8 @@ def _parse_token(token: str, *, start: int, end: int) -> ParsedMention | None:
if server_name == FILE_MENTION_SERVER:
resource_uri = str(normalize_local_attachment_reference(resource_expr))
+ elif server_name == URL_MENTION_SERVER:
+ resource_uri = normalize_remote_attachment_reference(resource_expr)
else:
template_uri, args = _parse_template_args(resource_expr)
resource_uri = _render_template_uri(template_uri, args)
@@ -316,7 +321,11 @@ async def resolve_mentions(agent: Any, parsed: ParsedMentions) -> ResolvedMentio
resources=[],
)
- remote_mentions = [mention for mention in parsed.mentions if mention.server_name != FILE_MENTION_SERVER]
+ remote_mentions = [
+ mention
+ for mention in parsed.mentions
+ if mention.server_name not in {FILE_MENTION_SERVER, URL_MENTION_SERVER}
+ ]
get_resource = getattr(agent, "get_resource", None)
if remote_mentions and not callable(get_resource):
raise ResourceMentionError("Current agent does not support MCP resources")
@@ -328,6 +337,9 @@ async def resolve_mentions(agent: Any, parsed: ParsedMentions) -> ResolvedMentio
if mention.server_name == FILE_MENTION_SERVER:
resources.append(_resolve_local_content_block(mention.resource_uri))
continue
+ if mention.server_name == URL_MENTION_SERVER:
+ resources.append(_resolve_remote_content_block(mention.resource_uri))
+ continue
if not callable(get_resource):
raise ResourceMentionError("Current agent does not support MCP resources")
result: ReadResourceResult = await get_resource(
@@ -385,6 +397,14 @@ def _resolve_local_content_block(path_text: str) -> ContentBlock:
return content
+def _resolve_remote_content_block(url: str) -> ContentBlock:
+ inferred = resource_link(url)
+ mime_type = inferred.mimeType or "application/octet-stream"
+ if mime_type.startswith("image/"):
+ return image_link(url, mime_type=mime_type)
+ return inferred
+
+
def _is_image_path(path: Path) -> bool:
from fast_agent.mcp.mime_utils import guess_mime_type, is_image_mime_type
diff --git a/src/fast_agent/ui/prompt/toolbar.py b/src/fast_agent/ui/prompt/toolbar.py
index 07bcfc530..0e6270b62 100644
--- a/src/fast_agent/ui/prompt/toolbar.py
+++ b/src/fast_agent/ui/prompt/toolbar.py
@@ -10,6 +10,7 @@
from prompt_toolkit.formatted_text.utils import fragment_list_width
from fast_agent.agents.agent_types import AgentType
+from fast_agent.ui.context_usage_display import format_compact_context_usage_percent
from fast_agent.ui.gauge_glyph_palette import (
PAIRED_REASONING_GAUGE_GLYPHS,
PAIRED_VERBOSITY_GAUGE_GLYPHS,
@@ -28,15 +29,7 @@
def _format_context_usage_percent_for_toolbar(pct: float | None) -> str | None:
"""Format context usage for toolbar display with stable width."""
- if pct is None:
- return None
-
- safe_pct = max(pct, 0.0)
- if safe_pct >= 100.0:
- return "100%+"
- if safe_pct < 10.0:
- return f"{min(safe_pct, 9.99):.2f}%"
- return f"{min(safe_pct, 99.9):.1f}%"
+ return format_compact_context_usage_percent(pct)
def _left_truncate_with_ellipsis(text: str, max_length: int) -> str:
diff --git a/tests/unit/fast_agent/agents/test_llm_agent_web_metadata.py b/tests/unit/fast_agent/agents/test_llm_agent_web_metadata.py
index 423d6c0aa..d72e6eb16 100644
--- a/tests/unit/fast_agent/agents/test_llm_agent_web_metadata.py
+++ b/tests/unit/fast_agent/agents/test_llm_agent_web_metadata.py
@@ -293,6 +293,41 @@ async def test_show_assistant_message_places_websocket_indicator_before_context_
assert call.get("model") == "gpt-5.3-codex ↔ (10.0%)"
+@pytest.mark.unit
+@pytest.mark.asyncio
+async def test_show_assistant_message_uses_compact_context_format_for_low_usage() -> None:
+ agent = LlmAgent(AgentConfig("websocket-indicator-low-context"))
+ capture_display = _CaptureDisplay()
+ agent.display = capture_display
+ llm = ResponsesLLM(provider=Provider.RESPONSES, model="gpt-5.3-codex")
+ llm._record_ws_turn_outcome("reused")
+ llm.usage_accumulator.set_context_window_size(1000)
+ llm.usage_accumulator.add_turn(
+ TurnUsage.from_fast_agent(
+ FastAgentUsage(input_chars=9, output_chars=1, model_type="test"),
+ model="gpt-5.3-codex",
+ )
+ )
+ agent._llm = llm
+
+ tool_call = CallToolRequest(
+ method="tools/call",
+ params=CallToolRequestParams(name="demo-tool", arguments={}),
+ )
+ message = PromptMessageExtended(
+ role="assistant",
+ content=[TextContent(type="text", text="need tool")],
+ tool_calls={"call_1": tool_call},
+ stop_reason=LlmStopReason.TOOL_USE,
+ )
+
+ await agent.show_assistant_message(message)
+
+ assert len(capture_display.calls) == 1
+ call = capture_display.calls[0]
+ assert call.get("model") == "gpt-5.3-codex ↔ (1.00%)"
+
+
@pytest.mark.unit
@pytest.mark.asyncio
async def test_generate_impl_hides_summary_for_tool_use_turn() -> None:
diff --git a/tests/unit/fast_agent/agents/test_llm_content_filter.py b/tests/unit/fast_agent/agents/test_llm_content_filter.py
index d30988994..869de73f4 100644
--- a/tests/unit/fast_agent/agents/test_llm_content_filter.py
+++ b/tests/unit/fast_agent/agents/test_llm_content_filter.py
@@ -6,6 +6,7 @@
CallToolResult,
EmbeddedResource,
ImageContent,
+ ResourceLink,
TextContent,
)
from pydantic import AnyUrl
@@ -19,6 +20,7 @@
)
from fast_agent.interfaces import FastAgentLLMProtocol
from fast_agent.llm.model_factory import ModelConfig
+from fast_agent.llm.model_info import ModelInfo
from fast_agent.llm.provider_types import Provider
from fast_agent.llm.resolved_model import ResolvedModelSpec
from fast_agent.types import PromptMessageExtended, text_content
@@ -87,6 +89,39 @@ def model_info(self):
return ModelInfo.from_name(model_name, self.provider)
+class OverlayVisionStubLLM(RecordingStubLLM):
+ @property
+ def provider(self) -> Provider:
+ return Provider.OPENRESPONSES
+
+ @property
+ def model_info(self):
+ model_name = self.model_name or "overlay-model"
+ return ModelInfo(
+ name=model_name,
+ provider=self.provider,
+ context_window=75264,
+ max_output_tokens=2048,
+ tokenizes=["text/plain", "image/jpeg", "image/png", "image/webp"],
+ json_mode=None,
+ reasoning=None,
+ )
+
+
+class AnthropicStubLLM(RecordingStubLLM):
+ def __init__(self, model_name: str = "claude-sonnet-4-5") -> None:
+ super().__init__(model_name=model_name)
+ self._provider = Provider.ANTHROPIC
+ self._resolved_model = ResolvedModelSpec(
+ raw_input=model_name,
+ selected_model_name=model_name,
+ source="direct",
+ model_config=ModelConfig(provider=Provider.ANTHROPIC, model_name=model_name),
+ provider=Provider.ANTHROPIC,
+ wire_model_name=model_name,
+ )
+
+
def make_decorator(model_name: str = "passthrough") -> tuple[LlmDecorator, RecordingStubLLM]:
config = AgentConfig(name="tester", model=model_name)
decorator = LlmDecorator(config=config)
@@ -95,6 +130,24 @@ def make_decorator(model_name: str = "passthrough") -> tuple[LlmDecorator, Recor
return decorator, stub
+def make_overlay_vision_decorator() -> tuple[LlmDecorator, OverlayVisionStubLLM]:
+ model_name = "unsloth/Qwen3.5-9B-GGUF"
+ config = AgentConfig(name="tester", model=model_name)
+ decorator = LlmDecorator(config=config)
+ stub = OverlayVisionStubLLM(model_name=model_name)
+ decorator._llm = stub
+ return decorator, stub
+
+
+def make_anthropic_decorator() -> tuple[LlmDecorator, AnthropicStubLLM]:
+ model_name = "claude-sonnet-4-5"
+ config = AgentConfig(name="tester", model=model_name)
+ decorator = LlmDecorator(config=config)
+ stub = AnthropicStubLLM(model_name=model_name)
+ decorator._llm = stub
+ return decorator, stub
+
+
def _parse_meta_categories(blocks) -> set[str]:
categories: set[str] = set()
for block in blocks or []:
@@ -175,6 +228,25 @@ async def test_sanitizes_image_content_for_text_only_model():
assert _parse_alert_flags(alert_blocks) == {"V"}
+@pytest.mark.asyncio
+async def test_overlay_model_info_keeps_supported_image_content():
+ decorator, stub = make_overlay_vision_decorator()
+
+ image_block = ImageContent(type="image", data="AAA", mimeType="image/png")
+ message = PromptMessageExtended(role="user", content=[image_block])
+
+ _, summary = decorator._sanitize_messages_for_llm([message])
+ assert summary is None
+
+ await decorator.generate_impl([message])
+
+ assert stub.generated_messages is not None
+ sent_message = stub.generated_messages[0]
+ assert len(sent_message.content) == 1
+ assert isinstance(sent_message.content[0], ImageContent)
+ assert sent_message.channels is None or FAST_AGENT_ERROR_CHANNEL not in sent_message.channels
+
+
@pytest.mark.asyncio
async def test_removes_unsupported_tool_result_content():
decorator, stub = make_decorator("passthrough")
@@ -220,6 +292,31 @@ async def test_removes_unsupported_tool_result_content():
assert _parse_alert_flags(alert_blocks) == {"D"}
+@pytest.mark.asyncio
+async def test_removes_remote_office_document_links_for_anthropic() -> None:
+ decorator, stub = make_anthropic_decorator()
+
+ resource = ResourceLink(
+ type="resource_link",
+ uri=AnyUrl("https://example.com/report.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ name="report.docx",
+ )
+ message = PromptMessageExtended(role="user", content=[resource])
+
+ _, summary = decorator._sanitize_messages_for_llm([message])
+ assert summary is not None
+ assert "document" in summary.message.lower()
+
+ await decorator.generate_impl([message])
+
+ assert stub.generated_messages is not None
+ sent_message = stub.generated_messages[0]
+ assert len(sent_message.content) == 1
+ assert isinstance(sent_message.content[0], TextContent)
+ assert "removed" in sent_message.content[0].text.lower()
+
+
@pytest.mark.asyncio
async def test_metadata_clears_when_supported_content_only():
decorator, stub = make_decorator("passthrough")
diff --git a/tests/unit/fast_agent/commands/test_model_handler.py b/tests/unit/fast_agent/commands/test_model_handler.py
index c51ca0702..d9736e147 100644
--- a/tests/unit/fast_agent/commands/test_model_handler.py
+++ b/tests/unit/fast_agent/commands/test_model_handler.py
@@ -733,6 +733,31 @@ async def test_model_switch_reopens_overlay_selection_on_overlay_provider() -> N
assert any("already active" in str(message.text) for message in outcome.messages)
+@pytest.mark.asyncio
+async def test_model_switch_reopens_vertex_selection_for_anthropic_vertex_model() -> None:
+ llm = _StubLLM(
+ "claude-sonnet-4-6",
+ provider=Provider.ANTHROPIC_VERTEX,
+ selected_model_name="anthropic-vertex.claude-sonnet-4-6",
+ )
+ agent = _StubAgent(llm)
+ provider = _StubAgentProvider(agent)
+ io = _StubIO(model_selection_response="anthropic-vertex.claude-sonnet-4-6")
+ ctx = CommandContext(
+ agent_provider=provider,
+ current_agent_name="test",
+ io=io,
+ settings=Settings(),
+ )
+
+ outcome = await handle_model_switch(ctx, agent_name="test", value=None)
+
+ assert io.last_initial_provider == "anthropic-vertex"
+ assert io.last_default_model == "anthropic-vertex.claude-sonnet-4-6"
+ assert outcome.reset_session is False
+ assert any("already active" in str(message.text) for message in outcome.messages)
+
+
@pytest.mark.asyncio
async def test_model_switch_does_not_reset_session_when_model_is_already_active() -> None:
llm = _StubLLM("gpt-5-mini")
diff --git a/tests/unit/fast_agent/commands/test_models_manager_handler.py b/tests/unit/fast_agent/commands/test_models_manager_handler.py
index f823f2747..d7a8bc71a 100644
--- a/tests/unit/fast_agent/commands/test_models_manager_handler.py
+++ b/tests/unit/fast_agent/commands/test_models_manager_handler.py
@@ -63,6 +63,8 @@ def __init__(
self._selection_responses = list(selection_responses or [])
self._model_selection_responses = list(model_selection_responses or [])
self.emitted_messages: list[_HasText] = []
+ self.last_initial_provider: str | None = None
+ self.last_default_model: str | None = None
async def emit(self, message: object) -> None:
assert hasattr(message, "text")
@@ -99,7 +101,8 @@ async def prompt_model_selection(
initial_provider: str | None = None,
default_model: str | None = None,
) -> str | None:
- del initial_provider, default_model
+ self.last_initial_provider = initial_provider
+ self.last_default_model = default_model
if self._model_selection_responses:
return self._model_selection_responses.pop(0)
return None
@@ -549,6 +552,41 @@ async def test_models_aliases_set_uses_model_selector_for_existing_alias(tmp_pat
assert "new: claude-haiku-4-5" in rendered
+@pytest.mark.asyncio
+async def test_models_aliases_set_reopens_vertex_selection_for_vertex_model(tmp_path: Path) -> None:
+ workspace = tmp_path / "workspace"
+ env_dir = workspace / ".fast-agent"
+ workspace.mkdir(parents=True)
+ _write_yaml(
+ env_dir / "fastagent.config.yaml",
+ {
+ "model_references": {
+ "system": {
+ "fast": "anthropic-vertex.claude-sonnet-4-6",
+ }
+ }
+ },
+ )
+
+ io = _StubCommandIO(model_selection_responses=["anthropic-vertex.claude-sonnet-4-6"])
+
+ previous_cwd = Path.cwd()
+ try:
+ os.chdir(workspace)
+ outcome = await models_manager.handle_models_command(
+ _context_with_io(Settings(environment_dir=str(env_dir)), io),
+ agent_name="main",
+ action="references",
+ argument="set $system.fast",
+ )
+ finally:
+ os.chdir(previous_cwd)
+
+ assert io.last_initial_provider == "anthropic-vertex"
+ assert io.last_default_model == "anthropic-vertex.claude-sonnet-4-6"
+ assert "no changes" in str(outcome.messages[0].text)
+
+
@pytest.mark.asyncio
async def test_models_aliases_set_can_create_new_alias_interactively(tmp_path: Path) -> None:
workspace = tmp_path / "workspace"
diff --git a/tests/unit/fast_agent/commands/test_runtime_model_picker_bootstrap.py b/tests/unit/fast_agent/commands/test_runtime_model_picker_bootstrap.py
index 956fdca9d..f4f7d7eee 100644
--- a/tests/unit/fast_agent/commands/test_runtime_model_picker_bootstrap.py
+++ b/tests/unit/fast_agent/commands/test_runtime_model_picker_bootstrap.py
@@ -272,6 +272,21 @@ def test_resolve_model_picker_initial_selection_uses_last_used_alias() -> None:
assert model_spec == "claude-haiku-4-5"
+def test_resolve_model_picker_initial_selection_uses_vertex_group_for_anthropic_vertex() -> None:
+ provider, model_spec = _resolve_model_picker_initial_selection(
+ settings=Settings(
+ model_references={
+ "system": {
+ "last_used": "anthropic-vertex.claude-sonnet-4-6",
+ }
+ }
+ )
+ )
+
+ assert provider == "anthropic-vertex"
+ assert model_spec == "anthropic-vertex.claude-sonnet-4-6"
+
+
def test_resolve_model_picker_initial_selection_preserves_overlay_alias(tmp_path: Path) -> None:
env_dir = tmp_path / ".fast-agent"
overlays_dir = env_dir / "model-overlays"
diff --git a/tests/unit/fast_agent/llm/provider/anthropic/test_file_uploads.py b/tests/unit/fast_agent/llm/provider/anthropic/test_file_uploads.py
new file mode 100644
index 000000000..110a85dcf
--- /dev/null
+++ b/tests/unit/fast_agent/llm/provider/anthropic/test_file_uploads.py
@@ -0,0 +1,129 @@
+from __future__ import annotations
+
+import base64
+from types import SimpleNamespace
+
+import pytest
+from mcp.types import BlobResourceContents, EmbeddedResource
+from pydantic import AnyUrl
+
+from fast_agent.config import AnthropicSettings, Settings
+from fast_agent.context import Context
+from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
+from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
+ ANTHROPIC_FILE_ID_META_KEY,
+)
+from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
+
+
+def _make_llm(model: str = "claude-sonnet-4-5") -> AnthropicLLM:
+ settings = Settings()
+ settings.anthropic = AnthropicSettings(api_key="test-key")
+ context = Context(config=settings)
+ return AnthropicLLM(context=context, model=model, name="test-agent")
+
+
+class _FakeFilesApi:
+ def __init__(self) -> None:
+ self.calls: list[tuple[str | None, bytes, str | None]] = []
+
+ async def upload(self, *, file):
+ if isinstance(file, tuple):
+ if len(file) == 3:
+ filename, data, mime_type = file
+ elif len(file) == 2:
+ filename, data = file
+ mime_type = None
+ else:
+ raise AssertionError(f"Unexpected file tuple: {file}")
+ else:
+ filename, data, mime_type = None, file, None
+
+ assert isinstance(data, bytes)
+ self.calls.append((filename, data, mime_type))
+ return SimpleNamespace(id=f"file_{len(self.calls)}")
+
+
+class _FakeAnthropic:
+ def __init__(self) -> None:
+ self.beta = SimpleNamespace(files=_FakeFilesApi())
+
+
+@pytest.mark.asyncio
+async def test_prepare_anthropic_file_resources_uploads_office_documents() -> None:
+ llm = _make_llm()
+ anthropic = _FakeAnthropic()
+ docx_bytes = b"PK\x03\x04docx"
+ resource = BlobResourceContents(
+ uri=AnyUrl("file:///tmp/report.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ blob=base64.b64encode(docx_bytes).decode("ascii"),
+ )
+ message = PromptMessageExtended(
+ role="user",
+ content=[EmbeddedResource(type="resource", resource=resource)],
+ )
+
+ await llm._prepare_anthropic_file_resources(anthropic, [message])
+
+ meta = dict(resource.meta or {})
+ assert meta[ANTHROPIC_FILE_ID_META_KEY] == "file_1"
+ assert anthropic.beta.files.calls == [("report.docx", docx_bytes, resource.mimeType)]
+
+
+@pytest.mark.asyncio
+async def test_prepare_anthropic_file_resources_caches_repeated_uploads() -> None:
+ llm = _make_llm()
+ anthropic = _FakeAnthropic()
+ docx_bytes = b"PK\x03\x04docx"
+ blob = base64.b64encode(docx_bytes).decode("ascii")
+
+ first = BlobResourceContents(
+ uri=AnyUrl("file:///tmp/report.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ blob=blob,
+ )
+ second = BlobResourceContents(
+ uri=AnyUrl("file:///tmp/report.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ blob=blob,
+ )
+ messages = [
+ PromptMessageExtended(role="user", content=[EmbeddedResource(type="resource", resource=first)]),
+ PromptMessageExtended(
+ role="user", content=[EmbeddedResource(type="resource", resource=second)]
+ ),
+ ]
+
+ await llm._prepare_anthropic_file_resources(anthropic, messages)
+
+ assert len(anthropic.beta.files.calls) == 1
+ assert dict(first.meta or {})[ANTHROPIC_FILE_ID_META_KEY] == "file_1"
+ assert dict(second.meta or {})[ANTHROPIC_FILE_ID_META_KEY] == "file_1"
+
+
+@pytest.mark.asyncio
+async def test_prepare_anthropic_file_resources_infers_document_mime_from_uri() -> None:
+ llm = _make_llm()
+ anthropic = _FakeAnthropic()
+ docx_bytes = b"PK\x03\x04docx"
+ resource = BlobResourceContents(
+ uri=AnyUrl("file:///tmp/report.docx"),
+ blob=base64.b64encode(docx_bytes).decode("ascii"),
+ )
+ message = PromptMessageExtended(
+ role="user",
+ content=[EmbeddedResource(type="resource", resource=resource)],
+ )
+
+ await llm._prepare_anthropic_file_resources(anthropic, [message])
+
+ meta = dict(resource.meta or {})
+ assert meta[ANTHROPIC_FILE_ID_META_KEY] == "file_1"
+ assert anthropic.beta.files.calls == [
+ (
+ "report.docx",
+ docx_bytes,
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ )
+ ]
diff --git a/tests/unit/fast_agent/llm/provider/anthropic/test_reasoning_defaults.py b/tests/unit/fast_agent/llm/provider/anthropic/test_reasoning_defaults.py
index 93f1489e8..e72004792 100644
--- a/tests/unit/fast_agent/llm/provider/anthropic/test_reasoning_defaults.py
+++ b/tests/unit/fast_agent/llm/provider/anthropic/test_reasoning_defaults.py
@@ -5,7 +5,11 @@
from fast_agent.config import AnthropicSettings, Settings
from fast_agent.context import Context
from fast_agent.llm.model_database import ModelDatabase
-from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
+from fast_agent.llm.provider.anthropic.llm_anthropic import (
+ FINE_GRAINED_TOOL_STREAMING_BETA,
+ STRUCTURED_OUTPUT_BETA,
+ AnthropicLLM,
+)
from fast_agent.llm.reasoning_effort import is_auto_reasoning
from fast_agent.llm.request_params import RequestParams
@@ -210,6 +214,17 @@ def test_json_structured_output_uses_output_config_format():
assert "schema" in args["output_config"]["format"]
+def test_auto_structured_output_mode_prefers_json_when_direct_beta_supported():
+ llm = _make_llm("claude-opus-4-6", reasoning=False)
+
+ structured_mode = llm._resolve_structured_output_mode(
+ "claude-opus-4-6",
+ _StructuredResponse,
+ )
+
+ assert structured_mode == "json"
+
+
def test_json_structured_output_merges_with_adaptive_effort():
llm = _make_llm("claude-opus-4-6", reasoning="max")
@@ -228,3 +243,47 @@ def test_json_structured_output_merges_with_adaptive_effort():
assert args["thinking"] == {"type": "adaptive"}
assert args["output_config"]["effort"] == "max"
assert args["output_config"]["format"]["type"] == "json_schema"
+
+
+def test_structured_output_json_adds_structured_output_beta() -> None:
+ llm = _make_llm("claude-opus-4-6")
+
+ beta_flags = llm._resolve_anthropic_beta_flags(
+ model="claude-opus-4-6",
+ structured_mode="json",
+ thinking_enabled=False,
+ request_tools=[],
+ web_tool_betas=[],
+ )
+
+ assert beta_flags == [STRUCTURED_OUTPUT_BETA]
+
+
+def test_structured_output_tool_use_does_not_add_structured_output_beta() -> None:
+ llm = _make_llm("claude-opus-4-6")
+
+ beta_flags = llm._resolve_anthropic_beta_flags(
+ model="claude-opus-4-6",
+ structured_mode="tool_use",
+ thinking_enabled=False,
+ request_tools=[],
+ web_tool_betas=[],
+ )
+
+ assert beta_flags == []
+
+
+def test_structured_output_modes_still_preserve_other_beta_flags() -> None:
+ llm = _make_llm("claude-opus-4-6")
+
+ beta_flags = llm._resolve_anthropic_beta_flags(
+ model="claude-opus-4-6",
+ structured_mode="json",
+ thinking_enabled=False,
+ request_tools=[{"name": "demo", "description": "", "input_schema": {"type": "object"}}],
+ web_tool_betas=["web-beta"],
+ )
+
+ assert FINE_GRAINED_TOOL_STREAMING_BETA in beta_flags
+ assert STRUCTURED_OUTPUT_BETA in beta_flags
+ assert "web-beta" in beta_flags
diff --git a/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py b/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
index a14bbea58..4f97eaae0 100644
--- a/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
+++ b/tests/unit/fast_agent/llm/provider/anthropic/test_vertex.py
@@ -1,20 +1,31 @@
import types
import pytest
+from pydantic import BaseModel
from fast_agent.config import AnthropicSettings, Settings
from fast_agent.context import Context
from fast_agent.core.exceptions import ProviderKeyError
+from fast_agent.llm.provider.anthropic.beta_types import ToolParam
from fast_agent.llm.provider.anthropic.llm_anthropic import AnthropicLLM
-from fast_agent.llm.provider.anthropic.vertex_config import GoogleAdcStatus
+from fast_agent.llm.provider.anthropic.llm_anthropic_vertex import AnthropicVertexLLM
+from fast_agent.llm.provider.anthropic.vertex_config import (
+ GoogleAdcStatus,
+ anthropic_vertex_config,
+)
from fast_agent.llm.provider_key_manager import ProviderKeyManager
-def _build_llm(config: Settings, *, via: str | None = None) -> AnthropicLLM:
- kwargs = {"context": Context(config=config), "model": "claude-sonnet-4-6"}
- if via is not None:
- kwargs["via"] = via
- return AnthropicLLM(**kwargs)
+class _StructuredResponse(BaseModel):
+ answer: str
+
+
+def _build_direct_llm(config: Settings) -> AnthropicLLM:
+ return AnthropicLLM(context=Context(config=config), model="claude-sonnet-4-6")
+
+
+def _build_vertex_llm(config: Settings) -> AnthropicVertexLLM:
+ return AnthropicVertexLLM(context=Context(config=config), model="claude-sonnet-4-6")
def test_vertex_cfg_accepts_model_object() -> None:
@@ -31,8 +42,7 @@ def test_vertex_cfg_accepts_model_object() -> None:
)
config = Settings(anthropic=anthropic)
- llm = _build_llm(config)
- vertex_cfg = llm._vertex_cfg()
+ vertex_cfg = anthropic_vertex_config(config)
assert vertex_cfg.enabled is True
assert vertex_cfg.project_id == "proj"
@@ -53,11 +63,9 @@ def test_provider_key_manager_allows_vertex_route_without_api_key() -> None:
}
)
- assert ProviderKeyManager.get_api_key("anthropic", config, route_hint="vertex") == ""
+ assert ProviderKeyManager.get_api_key("anthropic-vertex", config) == ""
with pytest.raises(ProviderKeyError):
ProviderKeyManager.get_api_key("anthropic", config)
- with pytest.raises(ProviderKeyError):
- ProviderKeyManager.get_api_key("anthropic", config, route_hint="direct")
def test_initialize_anthropic_client_uses_vertex(monkeypatch) -> None:
@@ -73,7 +81,7 @@ def test_initialize_anthropic_client_uses_vertex(monkeypatch) -> None:
}
}
)
- llm = _build_llm(config, via="vertex")
+ llm = _build_vertex_llm(config)
called: dict[str, object] = {}
@@ -82,11 +90,11 @@ def __init__(self, **kwargs) -> None:
called.update(kwargs)
monkeypatch.setattr(
- "fast_agent.llm.provider.anthropic.llm_anthropic.AsyncAnthropicVertex",
+ "fast_agent.llm.provider.anthropic.llm_anthropic_vertex.AsyncAnthropicVertex",
FakeVertexClient,
)
monkeypatch.setattr(
- "fast_agent.llm.provider.anthropic.llm_anthropic.detect_google_adc",
+ "fast_agent.llm.provider.anthropic.llm_anthropic_vertex.detect_google_adc",
lambda: GoogleAdcStatus(available=True, project_id="proj", credentials=object()),
)
@@ -110,7 +118,7 @@ def test_initialize_anthropic_client_uses_direct_sdk(monkeypatch) -> None:
}
}
)
- llm = _build_llm(config)
+ llm = _build_direct_llm(config)
called: dict[str, object] = {}
@@ -142,12 +150,85 @@ def test_vertex_client_requires_google_adc(monkeypatch) -> None:
}
}
)
- llm = _build_llm(config, via="vertex")
+ llm = _build_vertex_llm(config)
monkeypatch.setattr(
- "fast_agent.llm.provider.anthropic.llm_anthropic.detect_google_adc",
+ "fast_agent.llm.provider.anthropic.llm_anthropic_vertex.detect_google_adc",
lambda: GoogleAdcStatus(available=False, error=RuntimeError("missing")),
)
with pytest.raises(ProviderKeyError, match="Google ADC not found"):
llm._initialize_anthropic_client()
+
+
+def test_vertex_beta_support_is_selective() -> None:
+ llm = AnthropicVertexLLM(
+ context=Context(
+ config=Settings.model_validate(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+ ),
+ model="claude-sonnet-4-5",
+ long_context=True,
+ )
+ request_tools = [ToolParam(name="demo", description="", input_schema={})]
+
+ beta_flags = llm._resolve_anthropic_beta_flags(
+ model="claude-sonnet-4-5",
+ structured_mode="json",
+ thinking_enabled=False,
+ request_tools=request_tools,
+ web_tool_betas=("code-execution-web-tools-2026-02-09",),
+ )
+
+ assert "structured-outputs-2025-11-13" not in beta_flags
+ assert "context-1m-2025-08-07" in beta_flags
+ assert "fine-grained-tool-streaming-2025-05-14" in beta_flags
+ assert "code-execution-web-tools-2026-02-09" in beta_flags
+
+
+def test_vertex_supports_web_search_but_not_web_fetch() -> None:
+ llm = _build_vertex_llm(
+ Settings.model_validate(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+ )
+
+ assert llm.web_search_supported is True
+ assert llm.web_fetch_supported is False
+
+
+def test_vertex_auto_structured_output_mode_falls_back_to_tool_use() -> None:
+ llm = _build_vertex_llm(
+ Settings.model_validate(
+ {
+ "anthropic": {
+ "vertex_ai": {
+ "project_id": "proj",
+ "location": "global",
+ }
+ }
+ }
+ )
+ )
+
+ structured_mode = llm._resolve_structured_output_mode(
+ "claude-sonnet-4-6",
+ _StructuredResponse,
+ )
+
+ assert structured_mode == "tool_use"
diff --git a/tests/unit/fast_agent/llm/providers/test_multipart_converter_anthropic.py b/tests/unit/fast_agent/llm/providers/test_multipart_converter_anthropic.py
index 39bea073a..94c4dce7b 100644
--- a/tests/unit/fast_agent/llm/providers/test_multipart_converter_anthropic.py
+++ b/tests/unit/fast_agent/llm/providers/test_multipart_converter_anthropic.py
@@ -12,6 +12,7 @@
EmbeddedResource,
ImageContent,
PromptMessage,
+ ResourceLink,
TextContent,
TextResourceContents,
)
@@ -23,6 +24,7 @@
ANTHROPIC_THINKING_BLOCKS,
)
from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
+ ANTHROPIC_FILE_ID_META_KEY,
AnthropicConverter,
)
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
@@ -173,6 +175,46 @@ def test_embedded_resource_image_url_conversion(self):
"https://example.com/image.jpg",
)
+ def test_resource_link_image_url_conversion(self):
+ """Test conversion of image ResourceLink to Anthropic image block."""
+ resource = ResourceLink(
+ type="resource_link",
+ uri=AnyUrl("https://example.com/image.jpg"),
+ mimeType="image/jpeg",
+ name="image.jpg",
+ )
+ multipart = PromptMessageExtended(role="user", content=[resource])
+
+ anthropic_msg = AnthropicConverter.convert_to_anthropic(multipart)
+
+ self.assertEqual(anthropic_msg["role"], "user")
+ self.assertEqual(len(content_blocks(anthropic_msg)), 1)
+ self.assertEqual(content_blocks(anthropic_msg)[0]["type"], "image")
+ self.assertEqual(block_source(content_blocks(anthropic_msg)[0])["type"], "url")
+ self.assertEqual(
+ block_source(content_blocks(anthropic_msg)[0])["url"],
+ "https://example.com/image.jpg",
+ )
+
+ def test_embedded_resource_office_document_uses_uploaded_file_source(self):
+ """Test office documents use Anthropic file document source when pre-uploaded."""
+ resource = BlobResourceContents(
+ uri=AnyUrl("file:///tmp/report.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ blob=PDF_BASE64,
+ )
+ resource.meta = {ANTHROPIC_FILE_ID_META_KEY: "file_abc123"}
+ embedded_resource = EmbeddedResource(type="resource", resource=resource)
+ multipart = PromptMessageExtended(role="user", content=[embedded_resource])
+
+ anthropic_msg = AnthropicConverter.convert_to_anthropic(multipart)
+
+ self.assertEqual(anthropic_msg["role"], "user")
+ self.assertEqual(len(content_blocks(anthropic_msg)), 1)
+ self.assertEqual(content_blocks(anthropic_msg)[0]["type"], "document")
+ self.assertEqual(block_source(content_blocks(anthropic_msg)[0])["type"], "file")
+ self.assertEqual(block_source(content_blocks(anthropic_msg)[0])["file_id"], "file_abc123")
+
def test_assistant_role_restrictions(self):
"""Test that assistant messages can only contain text blocks."""
# Create mixed content for assistant
diff --git a/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py b/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
index b9859fa1e..596474d0c 100644
--- a/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
+++ b/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
@@ -206,6 +206,25 @@ def test_linked_resource_conversion(self):
self.assertIn("text/plain", text_part(openai_msg))
self.assertIn("test://example.com/document.txt", text_part(openai_msg))
+ def test_image_resource_link_conversion(self):
+ """Test conversion of image ResourceLink to OpenAI image_url content."""
+ resource_link = ResourceLink(
+ uri=AnyUrl("https://example.com/image.jpg"),
+ type="resource_link",
+ mimeType="image/jpeg",
+ name="image.jpg",
+ )
+ multipart = PromptMessageExtended(role="user", content=[resource_link])
+
+ openai_msgs = OpenAIConverter.convert_to_openai(multipart)
+ self.assertEqual(len(openai_msgs), 1)
+ openai_msg = openai_msgs[0]
+
+ self.assertEqual(openai_msg["role"], "user")
+ self.assertEqual(len(content_parts(openai_msg)), 1)
+ self.assertEqual(content_parts(openai_msg)[0]["type"], "image_url")
+ self.assertEqual(image_url_part(openai_msg)["url"], "https://example.com/image.jpg")
+
def test_multiple_content_blocks(self):
"""Test conversion of messages with multiple content blocks."""
# Create multiple content blocks
diff --git a/tests/unit/fast_agent/llm/providers/test_responses_helpers.py b/tests/unit/fast_agent/llm/providers/test_responses_helpers.py
index e1870403d..f87e0d6eb 100644
--- a/tests/unit/fast_agent/llm/providers/test_responses_helpers.py
+++ b/tests/unit/fast_agent/llm/providers/test_responses_helpers.py
@@ -5,10 +5,19 @@
from typing import Any, Literal
import pytest
-from mcp.types import CallToolRequest, CallToolRequestParams, ImageContent, TextContent
+from mcp.types import (
+ BlobResourceContents,
+ CallToolRequest,
+ CallToolRequestParams,
+ EmbeddedResource,
+ ImageContent,
+ ResourceLink,
+ TextContent,
+ TextResourceContents,
+)
from openai import AsyncOpenAI
from openai.types.responses import ResponseFunctionToolCall
-from pydantic import ValidationError
+from pydantic import AnyUrl, ValidationError
from fast_agent.config import (
CodexResponsesSettings,
@@ -630,6 +639,73 @@ def test_convert_content_parts_text_and_image():
assert parts[1]["image_url"].startswith("data:image/png;base64,")
+def test_convert_content_parts_embedded_text_resource_inlines_as_input_text():
+ harness = _ContentHarness()
+ resource = EmbeddedResource(
+ type="resource",
+ resource=TextResourceContents(
+ uri=AnyUrl("file:///tmp/example.py"),
+ mimeType="text/x-python",
+ text="print('hello')",
+ ),
+ )
+
+ parts = harness._convert_content_parts([resource], role="user")
+
+ assert parts == [
+ {
+ "type": "input_text",
+ "text": (
+ '\n'
+ "print('hello')\n"
+ ""
+ ),
+ }
+ ]
+
+
+def test_convert_content_parts_office_resource_stays_as_input_file():
+ harness = _ContentHarness()
+ docx_data = base64.b64encode(b"PK\x03\x04docx-bytes").decode("ascii")
+ resource = EmbeddedResource(
+ type="resource",
+ resource=BlobResourceContents(
+ uri=AnyUrl("file:///tmp/example.docx"),
+ mimeType="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ blob=docx_data,
+ ),
+ )
+
+ parts = harness._convert_content_parts([resource], role="user")
+
+ assert parts == [
+ {
+ "type": "input_file",
+ "file_data": docx_data,
+ "filename": "example.docx",
+ }
+ ]
+
+
+def test_convert_content_parts_image_resource_link_uses_remote_input_image():
+ harness = _ContentHarness()
+ resource = ResourceLink(
+ type="resource_link",
+ uri=AnyUrl("https://example.com/image.png"),
+ mimeType="image/png",
+ name="image.png",
+ )
+
+ parts = harness._convert_content_parts([resource], role="user")
+
+ assert parts == [
+ {
+ "type": "input_image",
+ "image_url": "https://example.com/image.png",
+ }
+ ]
+
+
@pytest.mark.asyncio
async def test_normalize_input_file_data_to_file_id():
harness = _FileHarness()
diff --git a/tests/unit/fast_agent/llm/test_model_database.py b/tests/unit/fast_agent/llm/test_model_database.py
index 8c83dbbf7..b2f2a928b 100644
--- a/tests/unit/fast_agent/llm/test_model_database.py
+++ b/tests/unit/fast_agent/llm/test_model_database.py
@@ -104,6 +104,59 @@ def test_model_database_anthropic_web_tool_versions_unknown_model():
assert ModelDatabase.get_anthropic_required_betas("unknown-model") is None
+def test_model_database_anthropic_vertex_caps_are_provider_aware() -> None:
+ assert (
+ ModelDatabase.get_anthropic_web_search_version(
+ "claude-sonnet-4-6",
+ provider=Provider.ANTHROPIC_VERTEX,
+ )
+ == "web_search_20260209"
+ )
+ assert (
+ ModelDatabase.get_anthropic_web_fetch_version(
+ "claude-sonnet-4-6",
+ provider=Provider.ANTHROPIC_VERTEX,
+ )
+ is None
+ )
+ assert ModelDatabase.get_anthropic_required_betas(
+ "claude-sonnet-4-6",
+ provider=Provider.ANTHROPIC_VERTEX,
+ ) == ("code-execution-web-tools-2026-02-09",)
+ assert (
+ ModelDatabase.get_long_context_window(
+ "claude-sonnet-4-5",
+ provider=Provider.ANTHROPIC_VERTEX,
+ )
+ == 1_000_000
+ )
+ assert not ModelDatabase.supports_mime(
+ "claude-sonnet-4-6",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ provider=Provider.ANTHROPIC_VERTEX,
+ )
+ assert ModelDatabase.supports_mime(
+ "claude-sonnet-4-6",
+ "application/pdf",
+ provider=Provider.ANTHROPIC_VERTEX,
+ )
+
+
+def test_model_database_anthropic_linked_office_docs_are_not_supported() -> None:
+ assert not ModelDatabase.supports_mime(
+ "claude-sonnet-4-5",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ provider=Provider.ANTHROPIC,
+ resource_source="link",
+ )
+ assert ModelDatabase.supports_mime(
+ "claude-sonnet-4-5",
+ "image/png",
+ provider=Provider.ANTHROPIC,
+ resource_source="link",
+ )
+
+
def test_model_database_max_tokens():
"""Test that ModelDatabase returns expected max tokens"""
# Test known models with different max_output_tokens (no cap)
@@ -147,10 +200,22 @@ def test_model_database_supports_mime_basic():
assert ModelDatabase.supports_mime(
"claude-sonnet-4-0", "document/pdf"
) # alias -> application/pdf
+ assert ModelDatabase.supports_mime(
+ "claude-sonnet-4-0",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ )
+ assert ModelDatabase.supports_mime(
+ "gpt-4o",
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ )
# Text-only models should not support images
assert not ModelDatabase.supports_mime("deepseek-chat", "image/png")
assert not ModelDatabase.supports_mime("deepseek-chat", "pdf")
+ assert not ModelDatabase.supports_mime(
+ "deepseek-chat",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ )
# Wildcard checks
assert ModelDatabase.supports_mime("gpt-4o", "image/*")
diff --git a/tests/unit/fast_agent/llm/test_model_factory.py b/tests/unit/fast_agent/llm/test_model_factory.py
index 22e90fc35..40857d11a 100644
--- a/tests/unit/fast_agent/llm/test_model_factory.py
+++ b/tests/unit/fast_agent/llm/test_model_factory.py
@@ -124,25 +124,21 @@ def test_model_query_structured_tool_use():
assert config.structured_output_mode == "tool_use"
-def test_model_query_via_vertex():
- config = ModelFactory.parse_model_string("claude-sonnet-4-6?via=vertex")
-
- assert config.provider == Provider.ANTHROPIC
- assert config.model_name == "claude-sonnet-4-6"
- assert config.via == "vertex"
+def test_model_query_unknown_parameter_is_rejected() -> None:
+ with pytest.raises(ModelConfigError, match="Unsupported model query parameter"):
+ ModelFactory.parse_model_string("claude-sonnet-4-6?routing=vertex")
-def test_model_query_source_alias_maps_to_via():
- config = ModelFactory.parse_model_string("sonnet?source=vertex")
+def test_explicit_anthropic_vertex_provider_namespace() -> None:
+ config = ModelFactory.parse_model_string("anthropic-vertex.claude-sonnet-4-6")
- assert config.provider == Provider.ANTHROPIC
+ assert config.provider == Provider.ANTHROPIC_VERTEX
assert config.model_name == "claude-sonnet-4-6"
- assert config.via == "vertex"
-def test_model_query_via_rejected_for_non_anthropic_model():
- with pytest.raises(ModelConfigError, match="only supported for Anthropic"):
- ModelFactory.parse_model_string("openai.gpt-4.1?via=vertex")
+def test_model_query_unknown_parameter_rejected_for_non_anthropic_model():
+ with pytest.raises(ModelConfigError, match="Unsupported model query parameter"):
+ ModelFactory.parse_model_string("openai.gpt-4.1?routing=vertex")
def test_model_query_text_verbosity():
@@ -554,6 +550,8 @@ def test_curated_catalog_aliases_are_parseable():
for entry in ModelSelectionCatalog.list_current_entries():
if "?" in entry.model:
continue
+ if entry.model.startswith("anthropic-vertex."):
+ continue
alias_config = ModelFactory.parse_model_string(entry.alias)
model_config = ModelFactory.parse_model_string(entry.model)
diff --git a/tests/unit/fast_agent/llm/test_model_info_caps.py b/tests/unit/fast_agent/llm/test_model_info_caps.py
index 8d54d184c..572be6678 100644
--- a/tests/unit/fast_agent/llm/test_model_info_caps.py
+++ b/tests/unit/fast_agent/llm/test_model_info_caps.py
@@ -106,3 +106,61 @@ def test_codexspark_alias_is_text_only() -> None:
assert info is not None
assert info.name == "codexresponses.gpt-5.3-codex-spark"
assert info.tdv_flags == (True, False, False)
+
+
+def test_model_info_supports_overlay_tokenizes() -> None:
+ info = ModelInfo(
+ name="unsloth/Qwen3.5-9B-GGUF",
+ provider=Provider.OPENRESPONSES,
+ context_window=75264,
+ max_output_tokens=2048,
+ tokenizes=["text/plain", "image/jpeg", "image/png", "image/webp"],
+ json_mode=None,
+ reasoning=None,
+ )
+
+ assert info.supports_mime("image/png")
+ assert info.supports_vision
+
+
+def test_model_info_openai_chat_documents_remain_pdf_only() -> None:
+ info = ModelInfo.from_name("gpt-4o", provider=Provider.OPENAI)
+
+ assert info is not None
+ assert info.supports_mime("application/pdf")
+ assert not info.supports_mime(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
+
+
+def test_model_info_responses_models_support_office_documents() -> None:
+ info = ModelInfo.from_name("o4-mini", provider=Provider.RESPONSES)
+
+ assert info is not None
+ assert info.supports_mime(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
+
+
+def test_model_info_anthropic_models_support_office_documents() -> None:
+ info = ModelInfo.from_name("claude-sonnet-4-5", provider=Provider.ANTHROPIC)
+
+ assert info is not None
+ assert info.supports_mime(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
+ assert not info.supports_mime(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ resource_source="link",
+ )
+ assert info.supports_mime("image/png", resource_source="link")
+
+
+def test_model_info_anthropic_vertex_models_do_not_support_office_documents() -> None:
+ info = ModelInfo.from_name("claude-sonnet-4-5", provider=Provider.ANTHROPIC_VERTEX)
+
+ assert info is not None
+ assert info.supports_mime("application/pdf")
+ assert not info.supports_mime(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
diff --git a/tests/unit/fast_agent/mcp/test_mime_utils.py b/tests/unit/fast_agent/mcp/test_mime_utils.py
index 25389df6a..25fe458f1 100644
--- a/tests/unit/fast_agent/mcp/test_mime_utils.py
+++ b/tests/unit/fast_agent/mcp/test_mime_utils.py
@@ -16,6 +16,25 @@ def test_guess_mime_type(self):
assert mime_utils.guess_mime_type("file.png") == "image/png"
assert mime_utils.guess_mime_type("file.jpg") == "image/jpeg"
assert mime_utils.guess_mime_type("file.jpeg") == "image/jpeg"
+ assert (
+ mime_utils.guess_mime_type("file.docx")
+ == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
+ assert (
+ mime_utils.guess_mime_type("file.xlsx")
+ == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
+ )
+ assert (
+ mime_utils.guess_mime_type("file.pptx")
+ == "application/vnd.openxmlformats-officedocument.presentationml.presentation"
+ )
# TODO: decide if this should default to text or not...
assert mime_utils.guess_mime_type("file.unknown") == "application/octet-stream"
+
+ def test_is_document_mime_type(self):
+ assert mime_utils.is_document_mime_type("application/pdf")
+ assert mime_utils.is_document_mime_type(
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+ )
+ assert not mime_utils.is_document_mime_type("text/plain")
diff --git a/tests/unit/fast_agent/ui/test_agent_completer.py b/tests/unit/fast_agent/ui/test_agent_completer.py
index 5a84b66ec..0d3fb3058 100644
--- a/tests/unit/fast_agent/ui/test_agent_completer.py
+++ b/tests/unit/fast_agent/ui/test_agent_completer.py
@@ -10,7 +10,7 @@
import pytest
from mcp.types import Completion as MCPCompletion
from mcp.types import ResourceTemplate, TextContent
-from prompt_toolkit.completion import CompleteEvent
+from prompt_toolkit.completion import CompleteEvent, Completion
from prompt_toolkit.document import Document
import fast_agent.config as config_module
@@ -1436,10 +1436,27 @@ def test_resource_mention_server_completion_filters_connected_resource_servers()
assert "demo:" in names
assert "file:" in names
+ assert "url:" in names
assert "offline:" not in names
assert "nores:" not in names
+def test_resource_mention_builtin_attachment_server_completion_meta() -> None:
+ completer = AgentCompleter(
+ agents=["agent1"],
+ current_agent="agent1",
+ agent_provider=cast("AgentApp", _ProviderStub(_MentionFilteredAgentStub())),
+ )
+
+ doc = Document("^", cursor_position=1)
+ completions = list(completer.get_completions(doc, None))
+ meta_by_text = {completion.text: completion.display_meta_text for completion in completions}
+
+ assert meta_by_text["file:"] == "local file attachment"
+ assert meta_by_text["url:"] == "remote URL attachment"
+ assert meta_by_text["demo:"] == "connected mcp server (resources)"
+
+
def test_resource_mention_resource_and_template_completion() -> None:
completer = AgentCompleter(
agents=["agent1"],
@@ -1472,6 +1489,17 @@ def test_resource_mention_local_file_completion_encodes_spaces() -> None:
assert any(completion.text == "./two%20words.txt" for completion in completions)
+def test_resource_mention_url_completion_offers_http_schemes() -> None:
+ completer = AgentCompleter(agents=["agent1"])
+
+ doc = Document("^url:h", cursor_position=len("^url:h"))
+ completions = list(completer.get_completions(doc, None))
+
+ names = [completion.text for completion in completions]
+ assert "https://" in names
+ assert "http://" in names
+
+
def test_attach_command_completion_offers_clear_and_paths() -> None:
with tempfile.TemporaryDirectory() as tmpdir:
base = Path(tmpdir)
@@ -1491,6 +1519,40 @@ def test_attach_command_completion_offers_clear_and_paths() -> None:
assert "'two words.pdf'" in names
+def test_attach_command_completion_offers_https_hint() -> None:
+ completer = AgentCompleter(agents=["agent1"])
+
+ doc = Document("/attach h", cursor_position=len("/attach h"))
+ completions = list(completer.get_completions(doc, None))
+
+ names = [completion.text for completion in completions]
+ assert "https://" in names
+
+
+def test_attach_command_completion_quotes_windows_paths(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setattr("fast_agent.utils.commandline.os.name", "nt")
+
+ completer = AgentCompleter(agents=["agent1"])
+ completion = Completion(
+ r"C:\Program Files\Tool\tool.exe",
+ start_position=0,
+ display=r"C:\Program Files\Tool\tool.exe",
+ display_meta="path",
+ )
+
+ def _complete_shell_paths(partial: str, delete_len: int, max_results: int = 100) -> list[Completion]:
+ del partial, delete_len, max_results
+ return [completion]
+
+ monkeypatch.setattr(completer, "_complete_shell_paths", _complete_shell_paths)
+
+ doc = Document("/attach C:\\Pro", cursor_position=len("/attach C:\\Pro"))
+ completions = list(completer.get_completions(doc, None))
+
+ names = [item.text for item in completions]
+ assert '"C:\\Program Files\\Tool\\tool.exe"' in names
+
+
def test_resource_mention_argument_value_completion() -> None:
completer = AgentCompleter(
agents=["agent1"],
diff --git a/tests/unit/fast_agent/ui/test_attachment_indicator.py b/tests/unit/fast_agent/ui/test_attachment_indicator.py
index 912e1efb7..a9badb55f 100644
--- a/tests/unit/fast_agent/ui/test_attachment_indicator.py
+++ b/tests/unit/fast_agent/ui/test_attachment_indicator.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from fast_agent.llm.provider_types import Provider
from fast_agent.ui.attachment_indicator import (
ATTACHMENT_GLYPH,
ATTACHMENT_IDLE_COLOR,
@@ -37,6 +38,57 @@ def test_summarize_draft_attachments_marks_missing_file_questionable() -> None:
assert summary.any_questionable is True
+def test_summarize_draft_attachments_includes_remote_url() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^url:https://example.com/image.png",
+ model_name="gpt-4.1",
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is False
+ assert summary.mime_types == ("image/png",)
+
+
+def test_summarize_draft_attachments_infers_remote_query_image_type() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^url:https://pbs.twimg.com/media/HCaWzdDWYAArgCf?format=jpg&name=4096x4096",
+ model_name="gpt-4.1",
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is False
+ assert summary.mime_types == ("image/jpeg",)
+
+
+def test_summarize_draft_attachments_marks_remote_office_doc_questionable_for_anthropic() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^url:https://example.com/report.docx",
+ model_name="claude-sonnet-4-5",
+ provider=Provider.ANTHROPIC,
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is True
+ assert summary.mime_types == (
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ )
+
+
+def test_summarize_draft_attachments_marks_unknown_remote_url_questionable() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^url:https://example.com/download",
+ model_name="gpt-4.1",
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is True
+ assert summary.mime_types == ("application/octet-stream",)
+
+
def test_render_attachment_indicator_uses_red_count_for_questionable_summary() -> None:
indicator = render_attachment_indicator(
DraftAttachmentSummary(count=2, mime_types=("image/png",), any_questionable=True)
diff --git a/tests/unit/fast_agent/ui/test_attachment_tokens.py b/tests/unit/fast_agent/ui/test_attachment_tokens.py
index 9f939916a..9dd2ef11c 100644
--- a/tests/unit/fast_agent/ui/test_attachment_tokens.py
+++ b/tests/unit/fast_agent/ui/test_attachment_tokens.py
@@ -1,6 +1,12 @@
from __future__ import annotations
-from fast_agent.ui.prompt.attachment_tokens import strip_local_attachment_tokens
+import pytest
+
+from fast_agent.ui.prompt.attachment_tokens import (
+ build_remote_attachment_token,
+ normalize_remote_attachment_reference,
+ strip_local_attachment_tokens,
+)
def test_strip_local_attachment_tokens_preserves_multiline_whitespace() -> None:
@@ -17,3 +23,22 @@ def test_strip_local_attachment_tokens_collapses_only_attachment_gap_between_wor
stripped = strip_local_attachment_tokens(text)
assert stripped == "compare with this"
+
+
+def test_strip_local_attachment_tokens_removes_remote_url_tokens() -> None:
+ text = "compare ^url:https://example.com/cat.png with this"
+
+ stripped = strip_local_attachment_tokens(text)
+
+ assert stripped == "compare with this"
+
+
+def test_build_remote_attachment_token_preserves_query_delimiters() -> None:
+ token = build_remote_attachment_token("https://example.com/cat.png?size=full&v=1")
+
+ assert token == "^url:https://example.com/cat.png?size=full&v=1"
+
+
+def test_normalize_remote_attachment_reference_rejects_non_http_scheme() -> None:
+ with pytest.raises(ValueError, match="Unsupported attachment URI scheme"):
+ normalize_remote_attachment_reference("ftp://example.com/cat.png")
diff --git a/tests/unit/fast_agent/ui/test_command_intent_contract.py b/tests/unit/fast_agent/ui/test_command_intent_contract.py
index c9193b23f..9692af2dc 100644
--- a/tests/unit/fast_agent/ui/test_command_intent_contract.py
+++ b/tests/unit/fast_agent/ui/test_command_intent_contract.py
@@ -169,3 +169,25 @@ def test_parse_special_input_intent_contract(
assert actual.error == expected["error"]
return
assert actual == expected
+
+
+def test_parse_attach_uses_windows_aware_tokenization(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setattr("fast_agent.utils.commandline.os.name", "nt")
+
+ actual = parse_special_input(r'/attach C:\tmp\foo.txt "C:\Program Files\bar.txt"')
+
+ assert actual == AttachCommand(
+ paths=(r"C:\tmp\foo.txt", r"C:\Program Files\bar.txt"),
+ clear=False,
+ error=None,
+ )
+
+
+def test_parse_hash_agent_command_ignores_leading_whitespace() -> None:
+ actual = parse_special_input(" ##review please check this")
+
+ assert actual == HashAgentCommand(
+ agent_name="review",
+ message="please check this",
+ quiet=True,
+ )
diff --git a/tests/unit/fast_agent/ui/test_model_display.py b/tests/unit/fast_agent/ui/test_model_display.py
index 677e5b8ea..1cabc93b5 100644
--- a/tests/unit/fast_agent/ui/test_model_display.py
+++ b/tests/unit/fast_agent/ui/test_model_display.py
@@ -52,15 +52,14 @@ def test_resolve_llm_display_name_uses_wire_model_name_for_anthropic_presets() -
def test_resolve_llm_display_name_marks_anthropic_vertex_route() -> None:
resolved_model = ResolvedModelSpec(
- raw_input="sonnet?via=vertex",
- selected_model_name="sonnet?via=vertex",
+ raw_input="anthropic-vertex.claude-sonnet-4-6",
+ selected_model_name="anthropic-vertex.claude-sonnet-4-6",
source="preset",
model_config=ModelConfig(
- provider=Provider.ANTHROPIC,
+ provider=Provider.ANTHROPIC_VERTEX,
model_name="claude-sonnet-4-6",
- via="vertex",
),
- provider=Provider.ANTHROPIC,
+ provider=Provider.ANTHROPIC_VERTEX,
wire_model_name="claude-sonnet-4-6",
)
@@ -109,7 +108,7 @@ def test_resolve_model_display_name_formats_raw_model_strings() -> None:
)
assert resolve_model_display_name("zai-org/GLM-5:novita") == "GLM-5"
assert (
- resolve_model_display_name("claude-sonnet-4-6?via=vertex")
+ resolve_model_display_name("anthropic-vertex.claude-sonnet-4-6")
== "claude-sonnet-4-6 · Vertex"
)
diff --git a/tests/unit/fast_agent/ui/test_model_picker.py b/tests/unit/fast_agent/ui/test_model_picker.py
index c59e9d0b8..18cae65b6 100644
--- a/tests/unit/fast_agent/ui/test_model_picker.py
+++ b/tests/unit/fast_agent/ui/test_model_picker.py
@@ -324,7 +324,7 @@ def test_snapshot_adds_anthropic_vertex_group_when_ready(monkeypatch) -> None:
assert option.active is True
assert option.option_display_name == "Anthropic (Vertex)"
- assert all("?via=vertex" in entry.model for entry in option.curated_entries)
+ assert all(entry.model.startswith("anthropic-vertex.") for entry in option.curated_entries)
def test_snapshot_disables_anthropic_vertex_group_when_adc_missing(monkeypatch) -> None:
diff --git a/tests/unit/fast_agent/ui/test_model_picker_common.py b/tests/unit/fast_agent/ui/test_model_picker_common.py
index ea6544871..40cf9090b 100644
--- a/tests/unit/fast_agent/ui/test_model_picker_common.py
+++ b/tests/unit/fast_agent/ui/test_model_picker_common.py
@@ -8,6 +8,7 @@
from fast_agent.ui.model_picker_common import (
ModelOption,
build_snapshot,
+ infer_initial_picker_provider,
model_capabilities,
model_options_for_provider,
)
@@ -45,6 +46,12 @@ def test_46_models_do_not_report_optional_long_context() -> None:
assert capabilities.long_context_window is None
+def test_infer_initial_picker_provider_uses_vertex_group_for_anthropic_vertex() -> None:
+ assert (
+ infer_initial_picker_provider("anthropic-vertex.claude-sonnet-4-6") == "anthropic-vertex"
+ )
+
+
def test_build_snapshot_surfaces_overlays_as_a_separate_group(tmp_path: Path) -> None:
env_dir = tmp_path / ".fast-agent"
overlays_dir = env_dir / "model-overlays"
diff --git a/tests/unit/fast_agent/ui/test_prompt_lexer.py b/tests/unit/fast_agent/ui/test_prompt_lexer.py
new file mode 100644
index 000000000..11f589513
--- /dev/null
+++ b/tests/unit/fast_agent/ui/test_prompt_lexer.py
@@ -0,0 +1,33 @@
+from prompt_toolkit.document import Document
+
+from fast_agent.ui.prompt.keybindings import ShellPrefixLexer
+
+
+def test_hash_highlighting_only_applies_to_first_line() -> None:
+ lexer = ShellPrefixLexer()
+ document = Document("#agent hello\n# not a command here")
+
+ tokens = lexer.lex_document(document)
+
+ assert tokens(0) == [("class:comment-command", "#agent hello")]
+ assert tokens(1) == [("", "# not a command here")]
+
+
+def test_later_hash_lines_do_not_trigger_command_highlighting() -> None:
+ lexer = ShellPrefixLexer()
+ document = Document("plain text\n# heading")
+
+ tokens = lexer.lex_document(document)
+
+ assert tokens(0) == [("", "plain text")]
+ assert tokens(1) == [("", "# heading")]
+
+
+def test_shell_highlighting_only_applies_to_first_line() -> None:
+ lexer = ShellPrefixLexer()
+ document = Document("!echo hi\n!not-a-new-command")
+
+ tokens = lexer.lex_document(document)
+
+ assert tokens(0) == [("class:shell-command", "!echo hi")]
+ assert tokens(1) == [("", "!not-a-new-command")]
diff --git a/tests/unit/fast_agent/ui/test_resource_mentions.py b/tests/unit/fast_agent/ui/test_resource_mentions.py
index aa1b71c1f..f809d1bb1 100644
--- a/tests/unit/fast_agent/ui/test_resource_mentions.py
+++ b/tests/unit/fast_agent/ui/test_resource_mentions.py
@@ -3,7 +3,13 @@
import base64
import pytest
-from mcp.types import EmbeddedResource, ImageContent, ReadResourceResult, TextResourceContents
+from mcp.types import (
+ EmbeddedResource,
+ ImageContent,
+ ReadResourceResult,
+ ResourceLink,
+ TextResourceContents,
+)
from pydantic import AnyUrl
from fast_agent.ui.prompt.resource_mentions import (
@@ -92,6 +98,14 @@ def test_parse_mentions_normalizes_local_file_paths(
assert parsed.mentions[0].resource_uri == str(report.resolve())
+def test_parse_mentions_normalizes_remote_urls() -> None:
+ parsed = parse_mentions("Describe ^url:https://example.com/image.png?size=full")
+
+ assert len(parsed.mentions) == 1
+ assert parsed.mentions[0].server_name == "url"
+ assert parsed.mentions[0].resource_uri == "https://example.com/image.png?size=full"
+
+
@pytest.mark.asyncio
async def test_resolve_mentions_builds_embedded_resources() -> None:
parsed = parse_mentions("Read ^demo:file:///tmp/notes.txt")
@@ -132,6 +146,42 @@ async def test_resolve_mentions_builds_local_image_content(tmp_path) -> None:
assert isinstance(prompt.content[1], ImageContent)
+@pytest.mark.asyncio
+async def test_resolve_mentions_builds_remote_url_resource_link_without_agent_support() -> None:
+ parsed = parse_mentions("Describe ^url:https://example.com/image.png")
+
+ resolved = await resolve_mentions(object(), parsed)
+ prompt = build_prompt_with_resources(parsed.text, resolved)
+
+ assert isinstance(prompt.content[1], ResourceLink)
+ assert str(prompt.content[1].uri) == "https://example.com/image.png"
+ assert prompt.content[1].mimeType == "image/png"
+
+
+@pytest.mark.asyncio
+async def test_resolve_mentions_infers_image_type_from_query_and_defaults_to_image() -> None:
+ parsed = parse_mentions(
+ "Describe ^url:https://pbs.twimg.com/media/HCaWzdDWYAArgCf?format=jpg&name=4096x4096"
+ )
+
+ resolved = await resolve_mentions(object(), parsed)
+ prompt = build_prompt_with_resources(parsed.text, resolved)
+
+ assert isinstance(prompt.content[1], ResourceLink)
+ assert prompt.content[1].mimeType == "image/jpeg"
+
+
+@pytest.mark.asyncio
+async def test_resolve_mentions_keeps_unknown_remote_type_questionable() -> None:
+ parsed = parse_mentions("Describe ^url:https://example.com/download")
+
+ resolved = await resolve_mentions(object(), parsed)
+ prompt = build_prompt_with_resources(parsed.text, resolved)
+
+ assert isinstance(prompt.content[1], ResourceLink)
+ assert prompt.content[1].mimeType == "application/octet-stream"
+
+
@pytest.mark.asyncio
async def test_resolve_mentions_raises_on_resource_errors() -> None:
class _FailingAgent:
From 4c9379bfda0e79a020a20911f20cb63b46649387 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Sat, 28 Mar 2026 08:04:39 -0400
Subject: [PATCH 7/9] Support agent-client-protocol 0.9.0
---
pyproject.toml | 2 +-
src/fast_agent/acp/__init__.py | 15 ++++++++++++-
src/fast_agent/acp/server/agent_acp_server.py | 22 +++++++++++++------
tests/integration/acp/conftest.py | 11 +++++++---
.../acp/test_acp_auth_integration.py | 4 ++--
tests/integration/acp/test_acp_filesystem.py | 10 ++++-----
tests/integration/acp/test_acp_reload.py | 4 ++--
tests/integration/acp/test_acp_sessions.py | 4 ++--
.../acp/test_acp_slash_commands.py | 20 +++++++++++------
tests/integration/acp/test_acp_watch.py | 4 ++--
tests/integration/acp/test_client.py | 6 ++---
uv.lock | 8 +++----
12 files changed, 71 insertions(+), 39 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index e39ba3836..fe3ca5005 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ dependencies = [
"keyring>=24.3.1",
"python-frontmatter>=1.1.0",
"watchfiles>=1.1.0",
- "agent-client-protocol>=0.8.1",
+ "agent-client-protocol==0.9.0",
"tiktoken>=0.12.0",
"uvloop>=0.22.1; platform_system != 'Windows'",
"multilspy>=0.0.15",
diff --git a/src/fast_agent/acp/__init__.py b/src/fast_agent/acp/__init__.py
index 7be2c3490..ad46a194d 100644
--- a/src/fast_agent/acp/__init__.py
+++ b/src/fast_agent/acp/__init__.py
@@ -1,11 +1,15 @@
"""Agent Client Protocol (ACP) support for fast-agent."""
+from typing import TYPE_CHECKING, Any
+
from fast_agent.acp.acp_aware_mixin import ACPAwareMixin, ACPCommand, ACPModeInfo
from fast_agent.acp.acp_context import ACPContext, ClientCapabilities, ClientInfo
from fast_agent.acp.filesystem_runtime import ACPFilesystemRuntime
-from fast_agent.acp.server.agent_acp_server import AgentACPServer
from fast_agent.acp.terminal_runtime import ACPTerminalRuntime
+if TYPE_CHECKING: # pragma: no cover - type checking only
+ from fast_agent.acp.server.agent_acp_server import AgentACPServer as AgentACPServer
+
__all__ = [
"ACPCommand",
"ACPModeInfo",
@@ -17,3 +21,12 @@
"ACPFilesystemRuntime",
"ACPTerminalRuntime",
]
+
+
+def __getattr__(name: str) -> Any:
+ if name == "AgentACPServer":
+ from fast_agent.acp.server.agent_acp_server import AgentACPServer
+
+ return AgentACPServer
+
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
diff --git a/src/fast_agent/acp/server/agent_acp_server.py b/src/fast_agent/acp/server/agent_acp_server.py
index 41dc6d3c0..60257952c 100644
--- a/src/fast_agent/acp/server/agent_acp_server.py
+++ b/src/fast_agent/acp/server/agent_acp_server.py
@@ -36,9 +36,10 @@
AgentCapabilities,
AgentMessageChunk,
AuthenticateResponse,
- AuthMethod,
+ AuthMethodAgent,
AvailableCommandsUpdate,
ClientCapabilities,
+ EnvVarAuthMethod,
HttpMcpServer,
Implementation,
ListSessionsResponse,
@@ -54,6 +55,7 @@
SessionResumeCapabilities,
SseMcpServer,
StopReason,
+ TerminalAuthMethod,
UserMessageChunk,
)
from acp.schema import (
@@ -569,11 +571,10 @@ async def initialize(
# Minimal "agent auth" hint for ACP clients.
#
- # Per ACP RFD auth-methods, the default type is "agent" when no type is provided.
- # We keep this strictly within the current AuthMethod schema (id/name/description)
- # to avoid requiring client/SDK support for typed auth metadata yet.
- auth_methods = [
- AuthMethod(
+ # In ACP 0.9.x this uses the explicit agent auth schema, but we still
+ # keep it to the minimal id/name/description shape.
+ auth_methods: list[EnvVarAuthMethod | TerminalAuthMethod | AuthMethodAgent] = [
+ AuthMethodAgent(
id=ACP_AUTH_METHOD_ID,
name="Configure fast-agent",
description=(
@@ -2099,6 +2100,7 @@ async def prompt(
self,
prompt: list[ACPContentBlock],
session_id: str,
+ message_id: str | None = None,
**kwargs: Any,
) -> PromptResponse:
"""Handle prompt request.
@@ -2111,7 +2113,12 @@ async def prompt(
"""
prompt_lock = await self._get_prompt_lock(session_id)
async with prompt_lock:
- return await self._prompt_locked(prompt=prompt, session_id=session_id, **kwargs)
+ return await self._prompt_locked(
+ prompt=prompt,
+ session_id=session_id,
+ message_id=message_id,
+ **kwargs,
+ )
async def _get_prompt_lock(self, session_id: str) -> asyncio.Lock:
"""Get/create the lock used to serialize prompts for a session."""
@@ -2126,6 +2133,7 @@ async def _prompt_locked(
self,
prompt: list[ACPContentBlock],
session_id: str,
+ message_id: str | None = None,
**kwargs: Any,
) -> PromptResponse:
"""
diff --git a/tests/integration/acp/conftest.py b/tests/integration/acp/conftest.py
index cea3b384e..fb2e9be3b 100644
--- a/tests/integration/acp/conftest.py
+++ b/tests/integration/acp/conftest.py
@@ -7,7 +7,12 @@
from typing import TYPE_CHECKING, AsyncIterator
import pytest_asyncio
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation, InitializeResponse
+from acp.schema import (
+ ClientCapabilities,
+ FileSystemCapabilities,
+ Implementation,
+ InitializeResponse,
+)
from acp.stdio import spawn_agent_process
TEST_DIR = Path(__file__).parent
@@ -69,7 +74,7 @@ async def _spawn_initialized_agent(
_process,
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=fs_read, write_text_file=fs_write),
+ fs=FileSystemCapabilities(read_text_file=fs_read, write_text_file=fs_write),
terminal=terminal,
),
client_info=Implementation(name=client_name, version=client_version),
@@ -94,7 +99,7 @@ async def _initialize_agent(
protocol_version: int,
client_capabilities: ClientCapabilities,
client_info: Implementation,
- timeout: float = 5.0,
+ timeout: float = 10.0,
) -> InitializeResponse:
try:
return await asyncio.wait_for(
diff --git a/tests/integration/acp/test_acp_auth_integration.py b/tests/integration/acp/test_acp_auth_integration.py
index e74a7e7ad..30745ee12 100644
--- a/tests/integration/acp/test_acp_auth_integration.py
+++ b/tests/integration/acp/test_acp_auth_integration.py
@@ -7,7 +7,7 @@
import pytest
from acp.exceptions import RequestError
from acp.helpers import text_block
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation
+from acp.schema import ClientCapabilities, FileSystemCapabilities, Implementation
from acp.stdio import spawn_agent_process
TEST_DIR = Path(__file__).parent
@@ -69,7 +69,7 @@ async def test_acp_initialize_survives_missing_provider_keys_and_prompts_fail_la
process,
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-auth-client", version="0.0.1"),
diff --git a/tests/integration/acp/test_acp_filesystem.py b/tests/integration/acp/test_acp_filesystem.py
index 5ccb2f12b..c396a34f9 100644
--- a/tests/integration/acp/test_acp_filesystem.py
+++ b/tests/integration/acp/test_acp_filesystem.py
@@ -8,7 +8,7 @@
import pytest
from acp.helpers import text_block
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation, StopReason
+from acp.schema import ClientCapabilities, FileSystemCapabilities, Implementation, StopReason
TEST_DIR = Path(__file__).parent
if str(TEST_DIR) not in sys.path:
@@ -63,7 +63,7 @@ async def test_acp_filesystem_support_enabled() -> None:
init_response = await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-filesystem-client", version="0.0.1"),
@@ -111,7 +111,7 @@ async def test_acp_filesystem_read_only() -> None:
await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=False),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=False),
terminal=False,
),
client_info=Implementation(name="pytest-filesystem-client", version="0.0.1"),
@@ -141,7 +141,7 @@ async def test_acp_filesystem_write_only() -> None:
await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=False, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=False, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-filesystem-client", version="0.0.1"),
@@ -171,7 +171,7 @@ async def test_acp_filesystem_disabled_when_client_unsupported() -> None:
await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=False, write_text_file=False),
+ fs=FileSystemCapabilities(read_text_file=False, write_text_file=False),
terminal=False,
),
client_info=Implementation(name="pytest-filesystem-client", version="0.0.1"),
diff --git a/tests/integration/acp/test_acp_reload.py b/tests/integration/acp/test_acp_reload.py
index 26eeb8f0c..7f3aea35c 100644
--- a/tests/integration/acp/test_acp_reload.py
+++ b/tests/integration/acp/test_acp_reload.py
@@ -7,7 +7,7 @@
import pytest
from acp.helpers import text_block
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation
+from acp.schema import ClientCapabilities, FileSystemCapabilities, Implementation
from acp.stdio import spawn_agent_process
TEST_DIR = Path(__file__).parent
@@ -104,7 +104,7 @@ async def test_acp_reload_agent_cards(tmp_path: Path) -> None:
await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-client", version="0.0.1"),
diff --git a/tests/integration/acp/test_acp_sessions.py b/tests/integration/acp/test_acp_sessions.py
index 678f10100..dec9fb2ce 100644
--- a/tests/integration/acp/test_acp_sessions.py
+++ b/tests/integration/acp/test_acp_sessions.py
@@ -10,7 +10,7 @@
import pytest
from acp.exceptions import RequestError
from acp.helpers import text_block
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation
+from acp.schema import ClientCapabilities, FileSystemCapabilities, Implementation
from acp.stdio import spawn_agent_process
from mcp.types import TextContent
@@ -512,7 +512,7 @@ async def _initialize_connection(connection: "ClientSideConnection") -> None:
await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-client", version="0.0.1"),
diff --git a/tests/integration/acp/test_acp_slash_commands.py b/tests/integration/acp/test_acp_slash_commands.py
index f24bec163..bfa46a84e 100644
--- a/tests/integration/acp/test_acp_slash_commands.py
+++ b/tests/integration/acp/test_acp_slash_commands.py
@@ -808,15 +808,21 @@ async def test_slash_command_history_detail_turn() -> None:
async def test_slash_command_session_list_no_sessions(tmp_path, monkeypatch) -> None:
"""Test /session list output when no sessions exist."""
monkeypatch.chdir(tmp_path)
+ old_settings = get_settings()
+ env_dir = tmp_path / "env"
+ monkeypatch.setenv("ENVIRONMENT_DIR", str(env_dir))
+ override = old_settings.model_copy(update={"environment_dir": str(env_dir)})
+ update_global_settings(override)
+ reset_session_manager()
- import fast_agent.session.session_manager as session_module
-
- monkeypatch.setattr(session_module, "_session_manager", None)
-
- handler = _handler(StubAgentInstance())
- response = await handler.execute_command("session", "list")
+ try:
+ handler = _handler(StubAgentInstance())
+ response = await handler.execute_command("session", "list")
- assert "no sessions" in response.lower()
+ assert "no sessions" in response.lower()
+ finally:
+ update_global_settings(old_settings)
+ reset_session_manager()
@pytest.mark.integration
diff --git a/tests/integration/acp/test_acp_watch.py b/tests/integration/acp/test_acp_watch.py
index b58a67d70..6164df9c8 100644
--- a/tests/integration/acp/test_acp_watch.py
+++ b/tests/integration/acp/test_acp_watch.py
@@ -7,7 +7,7 @@
import pytest
from acp.helpers import text_block
-from acp.schema import ClientCapabilities, FileSystemCapability, Implementation, StopReason
+from acp.schema import ClientCapabilities, FileSystemCapabilities, Implementation, StopReason
from acp.stdio import spawn_agent_process
TEST_DIR = Path(__file__).parent
@@ -61,7 +61,7 @@ async def test_acp_watch_allows_prompt_reload(tmp_path: Path) -> None:
init_response: InitializeResponse = await connection.initialize(
protocol_version=1,
client_capabilities=ClientCapabilities(
- fs=FileSystemCapability(read_text_file=True, write_text_file=True),
+ fs=FileSystemCapabilities(read_text_file=True, write_text_file=True),
terminal=False,
),
client_info=Implementation(name="pytest-client", version="0.0.1"),
diff --git a/tests/integration/acp/test_client.py b/tests/integration/acp/test_client.py
index bfa08b67b..480bfaf84 100644
--- a/tests/integration/acp/test_client.py
+++ b/tests/integration/acp/test_client.py
@@ -9,7 +9,7 @@
CreateTerminalResponse,
DeniedOutcome,
EnvVariable,
- KillTerminalCommandResponse,
+ KillTerminalResponse,
PermissionOption,
ReadTextFileResponse,
ReleaseTerminalResponse,
@@ -215,12 +215,12 @@ async def kill_terminal(
session_id: str,
terminal_id: str,
**kwargs: Any,
- ) -> KillTerminalCommandResponse | None:
+ ) -> KillTerminalResponse | None:
"""Kill a running terminal."""
if terminal_id in self.terminals:
self.terminals[terminal_id]["exit_code"] = -1
self.terminals[terminal_id]["completed"] = True
- return KillTerminalCommandResponse()
+ return KillTerminalResponse()
async def ext_method(self, method: str, params: dict[str, Any]) -> dict[str, Any]:
self.ext_calls.append((method, params))
diff --git a/uv.lock b/uv.lock
index a3b6324db..f027a473f 100644
--- a/uv.lock
+++ b/uv.lock
@@ -27,14 +27,14 @@ wheels = [
[[package]]
name = "agent-client-protocol"
-version = "0.8.1"
+version = "0.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pydantic" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/1b/7b/7cdac86db388809d9e3bc58cac88cc7dfa49b7615b98fab304a828cd7f8a/agent_client_protocol-0.8.1.tar.gz", hash = "sha256:1bbf15663bf51f64942597f638e32a6284c5da918055d9672d3510e965143dbd", size = 68866, upload-time = "2026-02-13T15:34:54.567Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/eb/13/3b893421369767e7043cc115d6ef0df417c298b84563be3a12df0416158d/agent_client_protocol-0.9.0.tar.gz", hash = "sha256:f744c48ab9af0f0b4452e5ab5498d61bcab97c26dbe7d6feec5fd36de49be30b", size = 71853, upload-time = "2026-03-26T01:21:00.379Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/4b/f3/219eeca0ad4a20843d4b9eaac5532f87018b9d25730a62a16f54f6c52d1a/agent_client_protocol-0.8.1-py3-none-any.whl", hash = "sha256:9421a11fd435b4831660272d169c3812d553bb7247049c138c3ca127e4b8af8e", size = 54529, upload-time = "2026-02-13T15:34:53.344Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/ed/c284543c08aa443a4ef2c8bd120be51da8433dd174c01749b5d87c333f22/agent_client_protocol-0.9.0-py3-none-any.whl", hash = "sha256:06911500b51d8cb69112544e2be01fc5e7db39ef88fecbc3848c5c6f194798ee", size = 56850, upload-time = "2026-03-26T01:20:59.252Z" },
]
[[package]]
@@ -788,7 +788,7 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "a2a-sdk", specifier = ">=0.3.16" },
- { name = "agent-client-protocol", specifier = ">=0.8.1" },
+ { name = "agent-client-protocol", specifier = "==0.9.0" },
{ name = "aiohttp", specifier = ">=3.13.2" },
{ name = "anthropic", extras = ["vertex"], specifier = ">=0.86.0" },
{ name = "azure-identity", marker = "extra == 'all-providers'", specifier = ">=1.14.0" },
From 0e4d3ace8f0f91317e0d362cf85a794db7be90c8 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Sat, 28 Mar 2026 08:19:06 -0400
Subject: [PATCH 8/9] acp tweak, input path resolution for non CWD attach
---
src/fast_agent/acp/server/agent_acp_server.py | 42 ++++++-
.../llm/provider/anthropic/vertex_config.py | 31 +++--
.../llm/provider/openai/llm_openai.py | 114 ++++++++++++++++--
.../openai/multipart_converter_openai.py | 46 ++++++-
src/fast_agent/ui/attachment_indicator.py | 14 +--
.../ui/interactive/command_dispatch.py | 10 +-
src/fast_agent/ui/interactive_prompt.py | 10 +-
src/fast_agent/ui/prompt/completer.py | 9 +-
src/fast_agent/ui/prompt/input.py | 13 ++
src/fast_agent/ui/prompt/input_toolbar.py | 1 +
src/fast_agent/ui/prompt/resource_mentions.py | 18 ++-
tests/unit/acp/test_prompt_sequencing.py | 66 ++++++++++
.../test_multipart_converter_openai.py | 87 ++++++++++++-
.../llm/test_model_selection_catalog.py | 15 +++
.../fast_agent/ui/test_agent_completer.py | 42 +++++++
.../ui/test_attachment_indicator.py | 15 +++
...st_interactive_prompt_resource_mentions.py | 78 +++++++++++-
tests/unit/fast_agent/ui/test_model_picker.py | 23 ++++
.../fast_agent/ui/test_resource_mentions.py | 13 ++
19 files changed, 603 insertions(+), 44 deletions(-)
diff --git a/src/fast_agent/acp/server/agent_acp_server.py b/src/fast_agent/acp/server/agent_acp_server.py
index 60257952c..11003db9f 100644
--- a/src/fast_agent/acp/server/agent_acp_server.py
+++ b/src/fast_agent/acp/server/agent_acp_server.py
@@ -2129,6 +2129,23 @@ async def _get_prompt_lock(self, session_id: str) -> asyncio.Lock:
self._prompt_locks[session_id] = lock
return lock
+ async def _send_prompt_user_updates(
+ self,
+ *,
+ session_id: str,
+ prompt: Sequence[ACPContentBlock],
+ message_id: str | None,
+ ) -> None:
+ """Acknowledge the accepted user turn through session updates."""
+ if not self._connection:
+ return
+
+ for block in prompt:
+ update = update_user_message(block)
+ if message_id:
+ update.message_id = message_id
+ await self._connection.session_update(session_id=session_id, update=update)
+
async def _prompt_locked(
self,
prompt: list[ACPContentBlock],
@@ -2184,6 +2201,20 @@ async def _prompt_locked(
# Inline resource URIs for slash commands (e.g., /card @file.txt)
processed_prompt = inline_resources_for_slash_command(prompt)
+ if self._connection and processed_prompt:
+ try:
+ await self._send_prompt_user_updates(
+ session_id=session_id,
+ prompt=processed_prompt,
+ message_id=message_id,
+ )
+ except Exception as e:
+ logger.error(
+ f"Error sending prompt acknowledgement update: {e}",
+ name="acp_prompt_ack_update_error",
+ exc_info=True,
+ )
+
# Convert ACP content blocks to MCP format
mcp_content_blocks = convert_acp_prompt_to_mcp_content_blocks(processed_prompt)
@@ -2252,7 +2283,10 @@ async def _prompt_locked(
)
# Return success
- return PromptResponse(stop_reason=END_TURN)
+ return PromptResponse(
+ stop_reason=END_TURN,
+ user_message_id=message_id,
+ )
logger.info(
"Sending prompt to fast-agent",
@@ -2512,6 +2546,7 @@ async def after_llm_call(_runner, message):
return PromptResponse(
stop_reason=acp_stop_reason,
field_meta=status_line_meta,
+ user_message_id=message_id,
)
except asyncio.CancelledError:
# Task was cancelled - return appropriate response
@@ -2522,7 +2557,10 @@ async def after_llm_call(_runner, message):
name="acp_prompt_cancelled",
session_id=session_id,
)
- return PromptResponse(stop_reason="cancelled")
+ return PromptResponse(
+ stop_reason="cancelled",
+ user_message_id=message_id,
+ )
finally:
# Always remove session from active prompts and cleanup task
write_interactive_trace("acp.prompt.finally", session_id=session_id)
diff --git a/src/fast_agent/llm/provider/anthropic/vertex_config.py b/src/fast_agent/llm/provider/anthropic/vertex_config.py
index 3db03e17e..ff93d1800 100644
--- a/src/fast_agent/llm/provider/anthropic/vertex_config.py
+++ b/src/fast_agent/llm/provider/anthropic/vertex_config.py
@@ -53,6 +53,14 @@ def _clean_str(value: Any) -> str | None:
return stripped or None
+def _env_value(env_vars: tuple[str, ...]) -> str | None:
+ for env_var in env_vars:
+ value = _clean_str(os.getenv(env_var))
+ if value is not None:
+ return value
+ return None
+
+
def anthropic_vertex_source(config: Any) -> Any:
anthropic_cfg = _get_value(config, "anthropic")
return _get_value(anthropic_cfg, "vertex_ai")
@@ -73,7 +81,14 @@ def anthropic_vertex_config(config: Any) -> AnthropicVertexConfig:
def anthropic_vertex_intent(config: Any) -> bool:
cfg = anthropic_vertex_config(config)
- return bool(cfg.enabled or cfg.project_id or cfg.location or cfg.base_url)
+ return bool(
+ cfg.enabled
+ or cfg.project_id
+ or cfg.location
+ or cfg.base_url
+ or _env_value(_VERTEX_PROJECT_ENV_VARS)
+ or _env_value(_VERTEX_LOCATION_ENV_VARS)
+ )
def anthropic_vertex_enabled(config: Any) -> bool:
@@ -104,10 +119,9 @@ def resolve_anthropic_vertex_project_id(
if cfg.project_id is not None:
return cfg.project_id
- for env_var in _VERTEX_PROJECT_ENV_VARS:
- value = _clean_str(os.getenv(env_var))
- if value is not None:
- return value
+ env_project = _env_value(_VERTEX_PROJECT_ENV_VARS)
+ if env_project is not None:
+ return env_project
if adc_status is None:
adc_status = detect_google_adc()
@@ -119,10 +133,9 @@ def resolve_anthropic_vertex_location(config: Any) -> str | None:
if cfg.location is not None:
return cfg.location
- for env_var in _VERTEX_LOCATION_ENV_VARS:
- value = _clean_str(os.getenv(env_var))
- if value is not None:
- return value
+ env_location = _env_value(_VERTEX_LOCATION_ENV_VARS)
+ if env_location is not None:
+ return env_location
return "global"
def anthropic_vertex_ready(
diff --git a/src/fast_agent/llm/provider/openai/llm_openai.py b/src/fast_agent/llm/provider/openai/llm_openai.py
index 8478b329c..2ce2d96de 100644
--- a/src/fast_agent/llm/provider/openai/llm_openai.py
+++ b/src/fast_agent/llm/provider/openai/llm_openai.py
@@ -2,6 +2,7 @@
from pathlib import Path
from typing import Any, cast
+import httpx
from mcp import Tool
from mcp.types import (
CallToolRequest,
@@ -40,6 +41,7 @@
stream_capture_filename as _stream_capture_filename,
)
from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter
+from fast_agent.llm.provider.openai.responses_files import ResponsesFileMixin
from fast_agent.llm.provider.openai.schema_sanitizer import (
sanitize_tool_input_schema,
should_strip_tool_schema_defaults,
@@ -51,6 +53,7 @@
from fast_agent.llm.stream_types import StreamChunk
from fast_agent.llm.usage_tracking import TurnUsage
from fast_agent.mcp.helpers.content_helpers import get_text
+from fast_agent.mcp.mime_utils import guess_mime_type
from fast_agent.types import LlmStopReason, PromptMessageExtended
_logger = get_logger(__name__)
@@ -59,11 +62,15 @@
class EmptyStreamError(RuntimeError):
"""Raised when a streaming response yields no chunks."""
+
DEFAULT_OPENAI_MODEL = "gpt-5-mini"
DEFAULT_REASONING_EFFORT = "low"
+
class OpenAILLM(
- OpenAIToolNotificationMixin, FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage]
+ OpenAIToolNotificationMixin,
+ ResponsesFileMixin,
+ FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage],
):
# Config section name override (falls back to provider value)
config_section: str | None = None
@@ -87,6 +94,7 @@ def __init__(self, provider: Provider = Provider.OPENAI, **kwargs) -> None:
# Initialize logger with name if available
self.logger = get_logger(f"{__name__}.{self.name}" if self.name else __name__)
+ self._file_id_cache: dict[str, str] = {}
# Set up reasoning-related attributes
raw_setting = kwargs.get("reasoning_effort", None)
@@ -123,6 +131,88 @@ def __init__(self, provider: Provider = Provider.OPENAI, **kwargs) -> None:
f"'{format_reasoning_setting(self.reasoning_effort)}' reasoning effort"
)
+ async def _download_remote_file(
+ self,
+ file_url: str,
+ ) -> tuple[bytes | None, str | None]:
+ try:
+ async with httpx.AsyncClient(follow_redirects=True, timeout=30.0) as client:
+ response = await client.get(file_url)
+ response.raise_for_status()
+ except Exception as exc:
+ self.logger.warning(
+ "Failed to download remote attachment for OpenAI chat completions",
+ data={"url": file_url, "error": str(exc)},
+ )
+ return None, None
+
+ content_type = response.headers.get("content-type", "").split(";", 1)[0].strip() or None
+ return response.content, content_type
+
+ async def _normalize_chat_completion_files(
+ self,
+ client: AsyncOpenAI,
+ messages: list[ChatCompletionMessageParam],
+ ) -> list[ChatCompletionMessageParam]:
+ normalized: list[ChatCompletionMessageParam] = []
+ for message in messages:
+ content = message.get("content")
+ if not isinstance(content, list):
+ normalized.append(message)
+ continue
+
+ updated_content: list[Any] = []
+ changed = False
+ for part in content:
+ if not isinstance(part, dict) or part.get("type") != "file":
+ updated_content.append(part)
+ continue
+
+ file_obj = part.get("file")
+ if not isinstance(file_obj, dict):
+ updated_content.append(part)
+ continue
+
+ file_url = file_obj.get("file_url")
+ if not isinstance(file_url, str) or not file_url:
+ updated_content.append(part)
+ continue
+
+ filename = file_obj.get("filename")
+ if not isinstance(filename, str) or not filename:
+ filename = None
+
+ data_bytes: bytes | None = None
+ mime_type: str | None = None
+ if file_url.startswith("data:"):
+ data_bytes, mime_type = self._decode_file_data(file_url)
+ elif file_url.startswith("file://"):
+ local_path = Path(file_url[len("file://") :])
+ if local_path.exists():
+ data_bytes = local_path.read_bytes()
+ filename = filename or local_path.name
+ mime_type = guess_mime_type(local_path.name)
+ elif file_url.startswith(("http://", "https://")):
+ data_bytes, mime_type = await self._download_remote_file(file_url)
+
+ if data_bytes is None:
+ updated_content.append(part)
+ continue
+
+ mime_type = mime_type or guess_mime_type(filename or file_url)
+ file_id = await self._upload_file_bytes(client, data_bytes, filename, mime_type)
+ updated_content.append({"type": "file", "file": {"file_id": file_id}})
+ changed = True
+
+ if changed:
+ message = cast(
+ "ChatCompletionMessageParam",
+ {**message, "content": updated_content},
+ )
+ normalized.append(message)
+
+ return normalized
+
def _resolve_reasoning_effort(self) -> str | None:
setting = self.reasoning_effort
if setting is None:
@@ -883,17 +973,23 @@ async def _openai_completion(
if not self._reasoning and request_params.stopSequences:
arguments["stop"] = request_params.stopSequences
- self.logger.debug(f"OpenAI completion requested for: {arguments}")
-
- self._log_chat_progress(self.chat_turn(), model=model_name)
-
- # Generate stream capture filename once (before streaming starts)
- capture_filename = _stream_capture_filename(self.chat_turn())
- _save_stream_request(capture_filename, arguments)
-
# Use basic streaming API with context manager to properly close aiohttp session
try:
async with self._openai_client() as client:
+ messages_arg = arguments.get("messages")
+ if isinstance(messages_arg, list):
+ arguments = dict(arguments)
+ arguments["messages"] = await self._normalize_chat_completion_files(
+ client, messages_arg
+ )
+
+ self.logger.debug(f"OpenAI completion requested for: {arguments}")
+ self._log_chat_progress(self.chat_turn(), model=model_name)
+
+ # Generate stream capture filename once (before streaming starts)
+ capture_filename = _stream_capture_filename(self.chat_turn())
+ _save_stream_request(capture_filename, arguments)
+
stream = await client.chat.completions.create(**arguments)
timeout = request_params.streaming_timeout
timed_stream = with_stream_idle_timeout(
diff --git a/src/fast_agent/llm/provider/openai/multipart_converter_openai.py b/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
index 05ef8d33d..98609dba2 100644
--- a/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
+++ b/src/fast_agent/llm/provider/openai/multipart_converter_openai.py
@@ -32,6 +32,7 @@
)
from fast_agent.mcp.mime_utils import (
guess_mime_type,
+ is_document_mime_type,
is_image_mime_type,
is_text_mime_type,
)
@@ -201,6 +202,14 @@ def _convert_content_to_message(
content_blocks.append(
{"type": "image_url", "image_url": {"url": str(uri)}}
)
+ elif (
+ uri
+ and mime_type
+ and is_document_mime_type(mime_type)
+ ):
+ content_blocks.append(
+ OpenAIConverter._convert_resource_link_document(item, str(uri))
+ )
else:
text = get_text(item)
if text:
@@ -372,11 +381,7 @@ def _convert_embedded_resource(
# Handle PDFs
elif mime_type == "application/pdf":
if is_url and uri_str:
- # OpenAI doesn't directly support PDF URLs, explain this limitation
- return {
- "type": "text",
- "text": f"[PDF URL: {uri_str}]\nOpenAI requires PDF files to be uploaded or provided as base64 data.",
- }
+ return OpenAIConverter._build_file_part(title or "document.pdf", file_url=uri_str)
elif hasattr(resource_content, "blob"):
return {
"type": "file",
@@ -426,6 +431,37 @@ def _convert_embedded_resource(
"text": f"[Unsupported resource: {title} ({mime_type})]",
}
+ @staticmethod
+ def _build_file_part(
+ filename: str,
+ *,
+ file_data: str | None = None,
+ file_url: str | None = None,
+ ) -> ContentBlock:
+ file_block: dict[str, str] = {"filename": filename}
+ if file_data:
+ file_block["file_data"] = file_data
+ if file_url:
+ file_block["file_url"] = file_url
+ return {"type": "file", "file": file_block}
+
+ @staticmethod
+ def _convert_resource_link_document(
+ resource,
+ uri_str: str,
+ ) -> ContentBlock:
+ from fast_agent.mcp.resource_utils import extract_title_from_uri
+
+ filename = (
+ resource.name
+ or extract_title_from_uri(resource.uri)
+ or "document"
+ )
+ return OpenAIConverter._build_file_part(
+ filename,
+ file_url=uri_str,
+ )
+
@staticmethod
def _extract_text_from_content_blocks(
content: OpenAITextExtractableContent,
diff --git a/src/fast_agent/ui/attachment_indicator.py b/src/fast_agent/ui/attachment_indicator.py
index fabf4324d..d36926188 100644
--- a/src/fast_agent/ui/attachment_indicator.py
+++ b/src/fast_agent/ui/attachment_indicator.py
@@ -6,7 +6,7 @@
from pathlib import Path
from typing import TYPE_CHECKING
-from fast_agent.llm.model_database import ModelDatabase
+from fast_agent.llm.model_info import ModelInfo
from fast_agent.mcp.helpers.content_helpers import resource_link
from fast_agent.mcp.mime_utils import guess_mime_type
@@ -31,11 +31,12 @@ def summarize_draft_attachments(
*,
model_name: str | None,
provider: Provider | None = None,
+ cwd: Path | None = None,
) -> DraftAttachmentSummary | None:
from fast_agent.ui.prompt.attachment_tokens import FILE_MENTION_SERVER, URL_MENTION_SERVER
from fast_agent.ui.prompt.resource_mentions import parse_mentions
- parsed = parse_mentions(text)
+ parsed = parse_mentions(text, cwd=cwd)
attachment_mentions = [
mention
for mention in parsed.mentions
@@ -44,6 +45,7 @@ def summarize_draft_attachments(
if not attachment_mentions:
return None
+ model_info = ModelInfo.from_name(model_name, provider=provider) if model_name else None
mime_types: list[str] = []
any_questionable = False
for mention in attachment_mentions:
@@ -53,10 +55,8 @@ def summarize_draft_attachments(
if mime_type == "application/octet-stream":
any_questionable = True
continue
- if model_name and not ModelDatabase.supports_mime(
- model_name,
+ if model_info and not model_info.supports_mime(
mime_type,
- provider=provider,
resource_source="link",
):
any_questionable = True
@@ -77,10 +77,8 @@ def summarize_draft_attachments(
if mime_type == "application/octet-stream":
any_questionable = True
continue
- if model_name and not ModelDatabase.supports_mime(
- model_name,
+ if model_info and not model_info.supports_mime(
mime_type,
- provider=provider,
resource_source="embedded",
):
any_questionable = True
diff --git a/src/fast_agent/ui/interactive/command_dispatch.py b/src/fast_agent/ui/interactive/command_dispatch.py
index 2ab9e252e..4166c8e86 100644
--- a/src/fast_agent/ui/interactive/command_dispatch.py
+++ b/src/fast_agent/ui/interactive/command_dispatch.py
@@ -83,6 +83,8 @@
from .mcp_connect_flow import handle_mcp_connect
if TYPE_CHECKING:
+ from pathlib import Path
+
from fast_agent.core.agent_app import AgentApp
from fast_agent.ui.interactive_prompt import InteractivePrompt
@@ -142,6 +144,7 @@ async def _dispatch_local_ui_payload(
available_agents_set: set[str],
agent_name: str,
buffer_prefill: str,
+ shell_working_dir: Path | None = None,
) -> DispatchResult | None:
result = DispatchResult(handled=True)
match payload:
@@ -199,7 +202,10 @@ async def _dispatch_local_ui_payload(
normalize_remote_attachment_reference(raw_path)
)
else:
- attachment_path = normalize_local_attachment_reference(raw_path)
+ attachment_path = normalize_local_attachment_reference(
+ raw_path,
+ cwd=shell_working_dir,
+ )
if not attachment_path.exists():
raise FileNotFoundError(raw_path)
if not attachment_path.is_file():
@@ -782,6 +788,7 @@ async def dispatch_command_payload(
available_agents_set: set[str],
merge_pinned_agents: Callable[[list[str]], list[str]],
buffer_prefill: str = "",
+ shell_working_dir: Path | None = None,
) -> DispatchResult:
del available_agents
@@ -791,6 +798,7 @@ async def dispatch_command_payload(
available_agents_set=available_agents_set,
agent_name=agent,
buffer_prefill=buffer_prefill,
+ shell_working_dir=shell_working_dir,
)
if local_result is not None:
return local_result
diff --git a/src/fast_agent/ui/interactive_prompt.py b/src/fast_agent/ui/interactive_prompt.py
index fb7f6b8a0..7ea6a4528 100644
--- a/src/fast_agent/ui/interactive_prompt.py
+++ b/src/fast_agent/ui/interactive_prompt.py
@@ -61,6 +61,7 @@
from fast_agent.ui.interactive_diagnostics import write_interactive_trace
from fast_agent.ui.interactive_shell import ShellExecutionResult, run_interactive_shell_command
from fast_agent.ui.progress_display import progress_display
+from fast_agent.ui.prompt.input import resolve_shell_working_dir
from fast_agent.ui.prompt.resource_mentions import (
build_prompt_with_resources,
parse_mentions,
@@ -737,6 +738,10 @@ async def _process_turn_command_phase(
pinned_agent=pinned_agent,
),
buffer_prefill=buffer_prefill,
+ shell_working_dir=resolve_shell_working_dir(
+ agent_name=agent_state.current_agent,
+ agent_provider=prompt_provider,
+ ),
)
except KeyboardInterrupt:
self._handle_ctrl_c_interrupt(
@@ -871,7 +876,10 @@ async def _resolve_prompt_payload(
user_input: str,
) -> str | PromptMessageExtended | None:
prompt_payload: str | PromptMessageExtended = user_input
- parsed_mentions = parse_mentions(user_input)
+ parsed_mentions = parse_mentions(
+ user_input,
+ cwd=resolve_shell_working_dir(agent_name=agent_name, agent_provider=prompt_provider),
+ )
for warning in parsed_mentions.warnings:
rich_print(f"[yellow]{warning}[/yellow]")
diff --git a/src/fast_agent/ui/prompt/completer.py b/src/fast_agent/ui/prompt/completer.py
index 676fccab1..018fef4f7 100644
--- a/src/fast_agent/ui/prompt/completer.py
+++ b/src/fast_agent/ui/prompt/completer.py
@@ -68,11 +68,13 @@ def __init__(
current_agent: str | None = None,
agent_provider: "AgentApp | None" = None,
noenv_mode: bool = False,
+ cwd: Path | None = None,
) -> None:
self.agents = agents
self.current_agent = current_agent
self.agent_provider = agent_provider
self.noenv_mode = noenv_mode
+ self.cwd = cwd
# Map commands to their descriptions for better completion hints
self.commands = {
"mcp": "Manage MCP runtime servers (/mcp list|connect|disconnect|reconnect|session)",
@@ -157,6 +159,9 @@ def _resolve_completion_search(self, partial: str) -> _CompletionSearch | None:
raw_dir = raw_dir or "."
expanded_dir = Path(os.path.expandvars(os.path.expanduser(raw_dir)))
+ if not expanded_dir.is_absolute():
+ expanded_dir = (self.cwd or Path.cwd()) / expanded_dir
+ expanded_dir = expanded_dir.resolve(strict=False)
if not expanded_dir.exists() or not expanded_dir.is_dir():
return None
@@ -666,7 +671,9 @@ def _complete_local_attachment_paths(self, partial: str) -> list[Completion]:
from fast_agent.ui.prompt.attachment_tokens import normalize_local_attachment_reference
try:
- decoded_partial = str(normalize_local_attachment_reference(decoded_partial))
+ decoded_partial = str(
+ normalize_local_attachment_reference(decoded_partial, cwd=self.cwd)
+ )
except ValueError:
return []
diff --git a/src/fast_agent/ui/prompt/input.py b/src/fast_agent/ui/prompt/input.py
index 486ab6af9..3ab4388da 100644
--- a/src/fast_agent/ui/prompt/input.py
+++ b/src/fast_agent/ui/prompt/input.py
@@ -642,6 +642,15 @@ def _resolve_shell_context(
return shell_context, shell_agent
+def resolve_shell_working_dir(
+ *,
+ agent_name: str,
+ agent_provider: "AgentApp | None",
+) -> Path | None:
+ shell_context, _ = _resolve_shell_context(agent_name=agent_name, agent_provider=agent_provider)
+ return shell_context.working_dir
+
+
def _build_prompt_text_resolver(
*,
session_factory: "Callable[[], PromptSession]",
@@ -936,6 +945,10 @@ def session_factory() -> PromptSession:
current_agent=agent_name,
agent_provider=agent_provider,
noenv_mode=noenv_mode,
+ cwd=resolve_shell_working_dir(
+ agent_name=agent_name,
+ agent_provider=agent_provider,
+ ),
),
lexer=ShellPrefixLexer(),
multiline_filter=Condition(lambda: in_multiline_mode),
diff --git a/src/fast_agent/ui/prompt/input_toolbar.py b/src/fast_agent/ui/prompt/input_toolbar.py
index 66af4ab23..049d0e492 100644
--- a/src/fast_agent/ui/prompt/input_toolbar.py
+++ b/src/fast_agent/ui/prompt/input_toolbar.py
@@ -119,6 +119,7 @@ def render_input_toolbar(
current_input_text,
model_name=agent_state.model_name,
provider=getattr(active_llm, "provider", None),
+ cwd=shell_state.working_dir,
)
middle = _build_middle_segment(agent_state, shortcut_text, attachment_summary=attachment_summary)
notification_segment = _build_notification_segment()
diff --git a/src/fast_agent/ui/prompt/resource_mentions.py b/src/fast_agent/ui/prompt/resource_mentions.py
index 6702e5210..6bb7ada19 100644
--- a/src/fast_agent/ui/prompt/resource_mentions.py
+++ b/src/fast_agent/ui/prompt/resource_mentions.py
@@ -230,7 +230,13 @@ def _replace(match: re.Match[str]) -> str:
return _PLACEHOLDER_RE.sub(_replace, template_uri)
-def _parse_token(token: str, *, start: int, end: int) -> ParsedMention | None:
+def _parse_token(
+ token: str,
+ *,
+ start: int,
+ end: int,
+ cwd: Path | None = None,
+) -> ParsedMention | None:
if not token.startswith("^"):
return None
@@ -245,7 +251,7 @@ def _parse_token(token: str, *, start: int, end: int) -> ParsedMention | None:
return None
if server_name == FILE_MENTION_SERVER:
- resource_uri = str(normalize_local_attachment_reference(resource_expr))
+ resource_uri = str(normalize_local_attachment_reference(resource_expr, cwd=cwd))
elif server_name == URL_MENTION_SERVER:
resource_uri = normalize_remote_attachment_reference(resource_expr)
else:
@@ -261,7 +267,7 @@ def _parse_token(token: str, *, start: int, end: int) -> ParsedMention | None:
)
-def parse_mentions(text: str) -> ParsedMentions:
+def parse_mentions(text: str, *, cwd: Path | None = None) -> ParsedMentions:
"""Parse supported resource mentions from text and strip them from the sent message body."""
mentions: list[ParsedMention] = []
warnings: list[str] = []
@@ -277,7 +283,7 @@ def parse_mentions(text: str) -> ParsedMentions:
parsed: ParsedMention | None
try:
- parsed = _parse_token(token, start=token_start, end=token_end)
+ parsed = _parse_token(token, start=token_start, end=token_end, cwd=cwd)
except (ResourceMentionError, ValueError) as exc:
parsed = None
warnings.append(f"Malformed resource mention '{token}': {exc}")
@@ -377,9 +383,9 @@ def build_prompt_with_resources(
return PromptMessageExtended(role="user", content=content)
-def mentions_in_text(text: str) -> Sequence[ParsedMention]:
+def mentions_in_text(text: str, *, cwd: Path | None = None) -> Sequence[ParsedMention]:
"""Convenience helper primarily for tests."""
- return parse_mentions(text).mentions
+ return parse_mentions(text, cwd=cwd).mentions
def _resolve_local_content_block(path_text: str) -> ContentBlock:
diff --git a/tests/unit/acp/test_prompt_sequencing.py b/tests/unit/acp/test_prompt_sequencing.py
index 5f2e64994..b1496b3ac 100644
--- a/tests/unit/acp/test_prompt_sequencing.py
+++ b/tests/unit/acp/test_prompt_sequencing.py
@@ -51,6 +51,20 @@ class DummyApp:
"""Placeholder for AgentInstance.app (unused by AgentACPServer.prompt path)."""
+class CapturingConnection:
+ def __init__(self) -> None:
+ self.notifications: list[dict[str, Any]] = []
+
+ async def session_update(self, session_id: str, update: Any, **kwargs: Any) -> None:
+ self.notifications.append(
+ {
+ "session_id": session_id,
+ "update": update,
+ "kwargs": kwargs,
+ }
+ )
+
+
@pytest.mark.asyncio
async def test_overlapping_prompts_are_serialized() -> None:
started1 = asyncio.Event()
@@ -170,3 +184,55 @@ async def dispose_instance(_instance: AgentInstance) -> None:
proceed2.set()
next_response = await asyncio.wait_for(next_task, timeout=1.0)
assert next_response.stop_reason == "end_turn"
+
+
+@pytest.mark.asyncio
+async def test_prompt_message_id_is_acknowledged_in_response_and_updates() -> None:
+ started = asyncio.Event()
+ proceed = asyncio.Event()
+ proceed.set()
+
+ agent = DummyAgent(started_evt=started, proceed_evt=proceed, text="first")
+ agents: dict[str, "AgentProtocol"] = {"default": cast("AgentProtocol", agent)}
+ instance = AgentInstance(app=AgentApp(agents), agents=agents, registry_version=0)
+
+ async def create_instance() -> AgentInstance:
+ return instance
+
+ async def dispose_instance(_instance: AgentInstance) -> None:
+ return None
+
+ server = AgentACPServer(
+ primary_instance=instance,
+ create_instance=create_instance,
+ dispose_instance=dispose_instance,
+ instance_scope="shared",
+ server_name="test",
+ permissions_enabled=False,
+ )
+ connection = CapturingConnection()
+ server.on_connect(cast("Any", connection))
+
+ session_id = "s-3"
+ message_id = "0f7c7a2c-7db0-4b16-a4df-b8f4a98055a8"
+ server.sessions[session_id] = instance
+ server._session_state[session_id] = ACPSessionState(session_id=session_id, instance=instance)
+
+ response = await server.prompt(
+ prompt=[TextContentBlock(type="text", text="p1")],
+ session_id=session_id,
+ message_id=message_id,
+ )
+
+ assert response.stop_reason == "end_turn"
+ assert response.user_message_id == message_id
+
+ assert len(connection.notifications) == 2
+ user_update = connection.notifications[0]["update"]
+ assert user_update.session_update == "user_message_chunk"
+ assert user_update.message_id == message_id
+ assert user_update.content.text == "p1"
+
+ agent_update = connection.notifications[1]["update"]
+ assert agent_update.session_update == "agent_message_chunk"
+ assert agent_update.content.text == "first"
diff --git a/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py b/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
index 596474d0c..826c45289 100644
--- a/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
+++ b/tests/unit/fast_agent/llm/providers/test_multipart_converter_openai.py
@@ -3,6 +3,7 @@
from collections.abc import Iterable, Mapping
from typing import TYPE_CHECKING, cast
+import pytest
from mcp.types import (
BlobResourceContents,
CallToolResult,
@@ -13,6 +14,7 @@
TextContent,
TextResourceContents,
)
+from openai import AsyncOpenAI
from pydantic import AnyUrl
from fast_agent.llm.provider.openai import llm_openai
@@ -23,7 +25,11 @@
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
if TYPE_CHECKING:
- from openai.types.chat import ChatCompletionToolMessageParam, ChatCompletionUserMessageParam
+ from openai.types.chat import (
+ ChatCompletionMessageParam,
+ ChatCompletionToolMessageParam,
+ ChatCompletionUserMessageParam,
+ )
def content_parts(message: Mapping[str, object]) -> list[dict[str, object]]:
@@ -225,6 +231,26 @@ def test_image_resource_link_conversion(self):
self.assertEqual(content_parts(openai_msg)[0]["type"], "image_url")
self.assertEqual(image_url_part(openai_msg)["url"], "https://example.com/image.jpg")
+ def test_document_resource_link_conversion(self):
+ """Test conversion of document ResourceLink to OpenAI file content."""
+ resource_link = ResourceLink(
+ uri=AnyUrl("https://example.com/report.pdf"),
+ type="resource_link",
+ mimeType="application/pdf",
+ name="report.pdf",
+ )
+ multipart = PromptMessageExtended(role="user", content=[resource_link])
+
+ openai_msgs = OpenAIConverter.convert_to_openai(multipart)
+ self.assertEqual(len(openai_msgs), 1)
+ openai_msg = openai_msgs[0]
+
+ self.assertEqual(openai_msg["role"], "user")
+ self.assertEqual(len(content_parts(openai_msg)), 1)
+ self.assertEqual(content_parts(openai_msg)[0]["type"], "file")
+ self.assertEqual(file_part(openai_msg)["filename"], "report.pdf")
+ self.assertEqual(file_part(openai_msg)["file_url"], "https://example.com/report.pdf")
+
def test_multiple_content_blocks(self):
"""Test conversion of messages with multiple content blocks."""
# Create multiple content blocks
@@ -660,3 +686,62 @@ def test_convert_unsupported_binary_format(self):
self.assertEqual(content_parts(openai_msg)[0]["type"], "text")
self.assertIn("Binary resource", text_part(openai_msg))
self.assertIn("data.bin", text_part(openai_msg))
+
+
+@pytest.mark.asyncio
+async def test_normalize_chat_completion_files_uploads_remote_document(monkeypatch):
+ llm = llm_openai.OpenAILLM(Provider.OPENAI, model="gpt-4.1")
+ client = AsyncOpenAI(api_key="test")
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "file",
+ "file": {
+ "filename": "report.pdf",
+ "file_url": "https://example.com/report.pdf",
+ },
+ }
+ ],
+ }
+ ]
+
+ async def fake_download_remote_file(
+ file_url: str,
+ ) -> tuple[bytes | None, str | None]:
+ assert file_url == "https://example.com/report.pdf"
+ return b"%PDF-1.4 remote", "application/pdf"
+
+ async def fake_upload_file_bytes(
+ _client,
+ data: bytes,
+ filename: str | None,
+ mime_type: str | None,
+ ) -> str:
+ assert data == b"%PDF-1.4 remote"
+ assert filename == "report.pdf"
+ assert mime_type == "application/pdf"
+ return "file_remote_pdf"
+
+ monkeypatch.setattr(llm, "_download_remote_file", fake_download_remote_file)
+ monkeypatch.setattr(llm, "_upload_file_bytes", fake_upload_file_bytes)
+
+ normalized = await llm._normalize_chat_completion_files(
+ client,
+ cast("list[ChatCompletionMessageParam]", messages),
+ )
+
+ assert normalized == [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "file",
+ "file": {
+ "file_id": "file_remote_pdf",
+ },
+ }
+ ],
+ }
+ ]
diff --git a/tests/unit/fast_agent/llm/test_model_selection_catalog.py b/tests/unit/fast_agent/llm/test_model_selection_catalog.py
index 86ac7e2a3..ec2114cd1 100644
--- a/tests/unit/fast_agent/llm/test_model_selection_catalog.py
+++ b/tests/unit/fast_agent/llm/test_model_selection_catalog.py
@@ -136,6 +136,7 @@ def test_configured_providers_reads_config_keys() -> None:
def test_configured_providers_does_not_treat_anthropic_vertex_as_base_provider(
monkeypatch,
) -> None:
+ monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
monkeypatch.setattr(
"fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
lambda: GoogleAdcStatus(available=True, project_id="proj", credentials=object()),
@@ -156,6 +157,20 @@ def test_configured_providers_does_not_treat_anthropic_vertex_as_base_provider(
assert Provider.ANTHROPIC not in providers
+def test_configured_providers_reads_anthropic_vertex_env_only_setup(
+ monkeypatch,
+) -> None:
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
+ lambda: GoogleAdcStatus(available=True, project_id="proj", credentials=object()),
+ )
+ monkeypatch.setenv("ANTHROPIC_VERTEX_PROJECT_ID", "proj")
+
+ providers = ModelSelectionCatalog.configured_providers({})
+
+ assert Provider.ANTHROPIC_VERTEX in providers
+
+
def test_configured_providers_reads_environment_keys() -> None:
original = os.environ.get("OPENAI_API_KEY")
diff --git a/tests/unit/fast_agent/ui/test_agent_completer.py b/tests/unit/fast_agent/ui/test_agent_completer.py
index 0d3fb3058..ecd2f71bb 100644
--- a/tests/unit/fast_agent/ui/test_agent_completer.py
+++ b/tests/unit/fast_agent/ui/test_agent_completer.py
@@ -1489,6 +1489,27 @@ def test_resource_mention_local_file_completion_encodes_spaces() -> None:
assert any(completion.text == "./two%20words.txt" for completion in completions)
+def test_resource_mention_local_file_completion_uses_completer_cwd() -> None:
+ with tempfile.TemporaryDirectory() as shell_dir, tempfile.TemporaryDirectory() as process_dir:
+ shell_base = Path(shell_dir)
+ process_base = Path(process_dir)
+ (shell_base / "shell note.txt").write_text("shell", encoding="utf-8")
+ (process_base / "process note.txt").write_text("process", encoding="utf-8")
+
+ completer = AgentCompleter(agents=["agent1"], cwd=shell_base)
+ original_cwd = os.getcwd()
+ try:
+ os.chdir(process_base)
+ doc = Document("^file:./shell", cursor_position=len("^file:./shell"))
+ completions = list(completer.get_completions(doc, None))
+ finally:
+ os.chdir(original_cwd)
+
+ names = [completion.text for completion in completions]
+ assert "./shell%20note.txt" in names
+ assert "./process%20note.txt" not in names
+
+
def test_resource_mention_url_completion_offers_http_schemes() -> None:
completer = AgentCompleter(agents=["agent1"])
@@ -1519,6 +1540,27 @@ def test_attach_command_completion_offers_clear_and_paths() -> None:
assert "'two words.pdf'" in names
+def test_attach_command_completion_uses_completer_cwd() -> None:
+ with tempfile.TemporaryDirectory() as shell_dir, tempfile.TemporaryDirectory() as process_dir:
+ shell_base = Path(shell_dir)
+ process_base = Path(process_dir)
+ (shell_base / "two words.pdf").write_bytes(b"%PDF-1.4")
+ (process_base / "temp.pdf").write_bytes(b"%PDF-1.4")
+
+ completer = AgentCompleter(agents=["agent1"], cwd=shell_base)
+ original_cwd = os.getcwd()
+ try:
+ os.chdir(process_base)
+ doc = Document("/attach t", cursor_position=len("/attach t"))
+ completions = list(completer.get_completions(doc, None))
+ finally:
+ os.chdir(original_cwd)
+
+ names = [completion.text for completion in completions]
+ assert "'two words.pdf'" in names
+ assert "temp.pdf" not in names
+
+
def test_attach_command_completion_offers_https_hint() -> None:
completer = AgentCompleter(agents=["agent1"])
diff --git a/tests/unit/fast_agent/ui/test_attachment_indicator.py b/tests/unit/fast_agent/ui/test_attachment_indicator.py
index a9badb55f..c2241760e 100644
--- a/tests/unit/fast_agent/ui/test_attachment_indicator.py
+++ b/tests/unit/fast_agent/ui/test_attachment_indicator.py
@@ -77,6 +77,21 @@ def test_summarize_draft_attachments_marks_remote_office_doc_questionable_for_an
)
+def test_summarize_draft_attachments_marks_remote_office_doc_questionable_for_openai_chat() -> None:
+ summary = summarize_draft_attachments(
+ "describe ^url:https://example.com/report.docx",
+ model_name="gpt-4.1",
+ provider=Provider.OPENAI,
+ )
+
+ assert summary is not None
+ assert summary.count == 1
+ assert summary.any_questionable is True
+ assert summary.mime_types == (
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ )
+
+
def test_summarize_draft_attachments_marks_unknown_remote_url_questionable() -> None:
summary = summarize_draft_attachments(
"describe ^url:https://example.com/download",
diff --git a/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py b/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
index c5761ea6e..cd97d8730 100644
--- a/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
+++ b/tests/unit/fast_agent/ui/test_interactive_prompt_resource_mentions.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import base64
-from typing import Any, cast
+from typing import TYPE_CHECKING, Any, cast
import pytest
from mcp.types import EmbeddedResource, ReadResourceResult, TextResourceContents
@@ -12,6 +12,9 @@
from fast_agent.ui import interactive_prompt
from fast_agent.ui.interactive_prompt import InteractivePrompt
+if TYPE_CHECKING:
+ from pathlib import Path
+
class _MentionAgent:
def __init__(self) -> None:
@@ -137,6 +140,52 @@ async def fake_send(payload, _agent_name: str) -> str:
assert any(getattr(item, "type", None) == "image" for item in payload.content)
+@pytest.mark.asyncio
+async def test_prompt_loop_resolves_attach_paths_from_shell_working_dir(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ shell_dir = tmp_path / "shell-cwd"
+ shell_dir.mkdir()
+ notes = shell_dir / "notes.txt"
+ notes.write_text("hello", encoding="utf-8")
+
+ inputs = iter(["/attach ./notes.txt", "__USE_PREFILL__", "STOP"])
+
+ async def fake_get_enhanced_input(*_args: Any, **kwargs: Any) -> str:
+ next_input = next(inputs)
+ if next_input == "__USE_PREFILL__":
+ prefill = kwargs.get("pre_populate_buffer", "")
+ assert str(notes.resolve()) in prefill
+ return prefill
+ return next_input
+
+ monkeypatch.setattr(interactive_prompt, "get_enhanced_input", fake_get_enhanced_input)
+ monkeypatch.setattr(interactive_prompt, "resolve_shell_working_dir", lambda **_kwargs: shell_dir)
+
+ sent_payloads: list[str | PromptMessageExtended] = []
+
+ async def fake_send(payload, _agent_name: str) -> str:
+ sent_payloads.append(payload)
+ return "ok"
+
+ prompt_ui = InteractivePrompt()
+ app = _MentionAgentApp()
+ app._agent_obj = _LocalMentionAgent()
+
+ await prompt_ui.prompt_loop(
+ send_func=fake_send,
+ default_agent="agent1",
+ available_agents=["agent1"],
+ prompt_provider=cast("Any", app),
+ )
+
+ assert len(sent_payloads) == 1
+ payload = sent_payloads[0]
+ assert isinstance(payload, PromptMessageExtended)
+ assert any(isinstance(item, EmbeddedResource) for item in payload.content)
+
+
@pytest.mark.asyncio
async def test_resolve_prompt_payload_falls_back_to_plain_text_on_resolution_error(
tmp_path,
@@ -153,3 +202,30 @@ async def test_resolve_prompt_payload_falls_back_to_plain_text_on_resolution_err
)
assert payload == f"can you see ^file:{missing}"
+
+
+@pytest.mark.asyncio
+async def test_resolve_prompt_payload_uses_shell_working_dir_for_local_file_mentions(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ shell_dir = tmp_path / "shell-cwd"
+ shell_dir.mkdir()
+ notes = shell_dir / "notes.txt"
+ notes.write_text("hello", encoding="utf-8")
+
+ monkeypatch.setattr(interactive_prompt, "resolve_shell_working_dir", lambda **_kwargs: shell_dir)
+
+ prompt_ui = InteractivePrompt()
+ app = _MentionAgentApp()
+ app._agent_obj = _LocalMentionAgent()
+
+ payload = await prompt_ui._resolve_prompt_payload(
+ prompt_provider=cast("Any", app),
+ agent_name="agent1",
+ user_input="Read ^file:./notes.txt",
+ )
+
+ assert isinstance(payload, PromptMessageExtended)
+ assert payload.first_text() == "Read"
+ assert any(isinstance(item, EmbeddedResource) for item in payload.content)
diff --git a/tests/unit/fast_agent/ui/test_model_picker.py b/tests/unit/fast_agent/ui/test_model_picker.py
index 18cae65b6..60babbff8 100644
--- a/tests/unit/fast_agent/ui/test_model_picker.py
+++ b/tests/unit/fast_agent/ui/test_model_picker.py
@@ -358,3 +358,26 @@ def test_snapshot_disables_anthropic_vertex_group_when_adc_missing(monkeypatch)
assert option.active is False
assert option.disabled_reason == "Google ADC not found"
+
+
+def test_snapshot_adds_anthropic_vertex_group_for_env_only_setup(monkeypatch) -> None:
+ monkeypatch.setattr(
+ "fast_agent.llm.provider.anthropic.vertex_config.detect_google_adc",
+ lambda: types.SimpleNamespace(
+ available=True,
+ project_id="proj",
+ credentials=object(),
+ ),
+ )
+ monkeypatch.setenv("ANTHROPIC_VERTEX_PROJECT_ID", "proj")
+
+ snapshot = build_snapshot(config_payload={})
+
+ option = next(
+ provider
+ for provider in snapshot.providers
+ if provider.option_key == ANTHROPIC_VERTEX_PROVIDER_KEY
+ )
+
+ assert option.active is True
+ assert all(entry.model.startswith("anthropic-vertex.") for entry in option.curated_entries)
diff --git a/tests/unit/fast_agent/ui/test_resource_mentions.py b/tests/unit/fast_agent/ui/test_resource_mentions.py
index f809d1bb1..c7981a767 100644
--- a/tests/unit/fast_agent/ui/test_resource_mentions.py
+++ b/tests/unit/fast_agent/ui/test_resource_mentions.py
@@ -98,6 +98,19 @@ def test_parse_mentions_normalizes_local_file_paths(
assert parsed.mentions[0].resource_uri == str(report.resolve())
+def test_parse_mentions_normalizes_local_file_paths_from_explicit_cwd(tmp_path) -> None:
+ working_dir = tmp_path / "shell-cwd"
+ working_dir.mkdir()
+ report = working_dir / "report.pdf"
+ report.write_bytes(b"%PDF-1.4")
+
+ parsed = parse_mentions("Summarize ^file:./report.pdf", cwd=working_dir)
+
+ assert len(parsed.mentions) == 1
+ assert parsed.mentions[0].server_name == "file"
+ assert parsed.mentions[0].resource_uri == str(report.resolve())
+
+
def test_parse_mentions_normalizes_remote_urls() -> None:
parsed = parse_mentions("Describe ^url:https://example.com/image.png?size=full")
From bdf2984072597300b80d172cab7ed18897752b77 Mon Sep 17 00:00:00 2001
From: evalstate <1936278+evalstate@users.noreply.github.com>
Date: Sat, 28 Mar 2026 08:52:07 -0400
Subject: [PATCH 9/9] fix acp adapters new version/compta
---
examples/hf-toad-cards/hooks/save_history.py | 4 +--
.../scripts/extract_pr_reviews.py | 17 ++++++++--
publish/fast-agent-acp/pyproject.toml | 4 +--
publish/hf-inference-acp/pyproject.toml | 4 +--
.../src/hf_inference_acp/cli.py | 23 +++++++++++--
.../unit/hf_inference_acp/test_cli_import.py | 32 +++++++++++++++++++
uv.lock | 4 +--
7 files changed, 76 insertions(+), 12 deletions(-)
create mode 100644 tests/unit/hf_inference_acp/test_cli_import.py
diff --git a/examples/hf-toad-cards/hooks/save_history.py b/examples/hf-toad-cards/hooks/save_history.py
index 53c865fb0..d51aaf497 100644
--- a/examples/hf-toad-cards/hooks/save_history.py
+++ b/examples/hf-toad-cards/hooks/save_history.py
@@ -2,7 +2,7 @@
from collections.abc import Iterable
from datetime import datetime
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, cast
from fast_agent.hooks import HookContext
from fast_agent.mcp.prompt_serialization import save_messages
@@ -30,7 +30,7 @@ async def save_history_to_file(ctx: HookContext) -> None:
# Fall back to runner's turn messages + final response
runner_messages = getattr(ctx.runner, "delta_messages", None)
if isinstance(runner_messages, Iterable):
- messages = list(runner_messages)
+ messages = [cast("PromptMessageExtended", message) for message in runner_messages]
else:
messages = []
if ctx.message and ctx.message not in messages:
diff --git a/examples/hf-toad-cards/skills/pr-writing-review/scripts/extract_pr_reviews.py b/examples/hf-toad-cards/skills/pr-writing-review/scripts/extract_pr_reviews.py
index 2a6c7efb4..75ce35445 100644
--- a/examples/hf-toad-cards/skills/pr-writing-review/scripts/extract_pr_reviews.py
+++ b/examples/hf-toad-cards/skills/pr-writing-review/scripts/extract_pr_reviews.py
@@ -25,7 +25,7 @@
import subprocess
import sys
from dataclasses import asdict, dataclass, field
-from typing import Optional
+from typing import Optional, cast
from urllib.parse import quote, urlparse
JsonValue = str | int | float | bool | None | list["JsonValue"] | dict[str, "JsonValue"]
@@ -120,12 +120,25 @@ def run_gh_jsonlines(args: list[str], check: bool = True) -> list[dict]:
return out
+def _ensure_json_value(value: object, label: str) -> JsonValue:
+ if value is None or isinstance(value, str | int | float | bool):
+ return value
+ if isinstance(value, list):
+ return [_ensure_json_value(item, f"{label}[]") for item in value]
+ if isinstance(value, dict):
+ return _ensure_json_dict(value, label)
+ raise ValueError(f"Expected JSON-compatible value for {label}")
+
+
def _ensure_json_dict(value: object, label: str) -> JsonDict:
if not isinstance(value, dict):
raise ValueError(f"Expected object for {label}")
if not all(isinstance(key, str) for key in value.keys()):
raise ValueError(f"Expected string keys for {label}")
- return {str(key): val for key, val in value.items()}
+ return cast(
+ "JsonDict",
+ {str(key): _ensure_json_value(val, f"{label}.{key}") for key, val in value.items()},
+ )
def _get_required_str(data: JsonDict, key: str) -> str:
diff --git a/publish/fast-agent-acp/pyproject.toml b/publish/fast-agent-acp/pyproject.toml
index 29110fd69..8145a1c28 100644
--- a/publish/fast-agent-acp/pyproject.toml
+++ b/publish/fast-agent-acp/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "fast-agent-acp"
-version = "0.4.7"
+version = "0.6.9"
description = "Convenience launcher that pulls in fast-agent-mcp and exposes the ACP CLI entrypoint."
readme = "README.md"
license = { text = "Apache-2.0" }
@@ -18,7 +18,7 @@ classifiers = [
]
requires-python = ">=3.13.5,<3.15"
dependencies = [
- "fast-agent-mcp==0.4.7",
+ "fast-agent-mcp==0.6.9",
]
[project.urls]
diff --git a/publish/hf-inference-acp/pyproject.toml b/publish/hf-inference-acp/pyproject.toml
index 59ec9b94b..b5ea4a647 100644
--- a/publish/hf-inference-acp/pyproject.toml
+++ b/publish/hf-inference-acp/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "hf-inference-acp"
-version = "0.4.31"
+version = "0.6.9"
description = "Hugging Face inference agent with ACP support, powered by fast-agent-mcp"
readme = "README.md"
license = { text = "Apache-2.0" }
@@ -18,7 +18,7 @@ classifiers = [
]
requires-python = ">=3.13.5,<3.15"
dependencies = [
- "fast-agent-mcp==0.5.7",
+ "fast-agent-mcp==0.6.9",
"huggingface_hub>=1.3.4",
]
diff --git a/publish/hf-inference-acp/src/hf_inference_acp/cli.py b/publish/hf-inference-acp/src/hf_inference_acp/cli.py
index aafe93e34..353cf7391 100644
--- a/publish/hf-inference-acp/src/hf_inference_acp/cli.py
+++ b/publish/hf-inference-acp/src/hf_inference_acp/cli.py
@@ -28,6 +28,7 @@
from fast_agent.cli.commands.server_helpers import add_servers_to_config, generate_server_name
from fast_agent.cli.commands.url_parser import generate_server_configs, parse_server_urls
from fast_agent.core.agent_card_validation import collect_agent_card_names, find_loaded_agent_issues
+from fast_agent.llm.model_database import ModelDatabase
from fast_agent.llm.model_factory import ModelFactory
from fast_agent.llm.provider_key_manager import ProviderKeyManager
from fast_agent.llm.provider_types import Provider
@@ -43,8 +44,26 @@
)
from hf_inference_acp.wizard import WizardSetupLLM
-# Register wizard-setup model locally
-ModelFactory.register_runtime_model("wizard-setup", provider=Provider.FAST_AGENT, llm_class=WizardSetupLLM)
+
+def _register_wizard_setup_model() -> None:
+ """Register the local wizard model across fast-agent versions."""
+ register_runtime_model = getattr(ModelFactory, "register_runtime_model", None)
+ if callable(register_runtime_model):
+ register_runtime_model(
+ "wizard-setup",
+ provider=Provider.FAST_AGENT,
+ llm_class=WizardSetupLLM,
+ )
+ return
+
+ ModelFactory.MODEL_SPECIFIC_CLASSES["wizard-setup"] = WizardSetupLLM
+ ModelDatabase.register_runtime_model_params(
+ "wizard-setup",
+ ModelDatabase.FAST_AGENT_STANDARD.model_copy(),
+ )
+
+
+_register_wizard_setup_model()
app = typer.Typer(
help="Run the Hugging Face Inference ACP agent over stdio.",
diff --git a/tests/unit/hf_inference_acp/test_cli_import.py b/tests/unit/hf_inference_acp/test_cli_import.py
new file mode 100644
index 000000000..6116981b1
--- /dev/null
+++ b/tests/unit/hf_inference_acp/test_cli_import.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+import importlib
+import sys
+from pathlib import Path
+
+import pytest
+
+from fast_agent.llm.model_database import ModelDatabase
+from fast_agent.llm.model_factory import ModelFactory
+from fast_agent.llm.provider_types import Provider
+
+
+def _ensure_hf_inference_acp_on_path() -> None:
+ repo_root = Path(__file__).resolve().parents[3]
+ package_root = repo_root / "publish" / "hf-inference-acp" / "src"
+ sys.path.insert(0, str(package_root))
+
+
+def test_cli_import_registers_wizard_setup_model() -> None:
+ pytest.importorskip("ruamel.yaml")
+ _ensure_hf_inference_acp_on_path()
+
+ ModelDatabase.unregister_runtime_model_params("wizard-setup")
+ ModelFactory.MODEL_SPECIFIC_CLASSES.pop("wizard-setup", None)
+ sys.modules.pop("hf_inference_acp.cli", None)
+
+ cli = importlib.import_module("hf_inference_acp.cli")
+
+ assert cli is not None
+ assert ModelDatabase.get_default_provider("wizard-setup") == Provider.FAST_AGENT
+ assert ModelFactory.MODEL_SPECIFIC_CLASSES["wizard-setup"].__name__ == "WizardSetupLLM"
diff --git a/uv.lock b/uv.lock
index f027a473f..08ce836f8 100644
--- a/uv.lock
+++ b/uv.lock
@@ -702,7 +702,7 @@ wheels = [
[[package]]
name = "fast-agent-acp"
-version = "0.4.7"
+version = "0.6.9"
source = { editable = "publish/fast-agent-acp" }
dependencies = [
{ name = "fast-agent-mcp" },
@@ -1061,7 +1061,7 @@ wheels = [
[[package]]
name = "hf-inference-acp"
-version = "0.4.31"
+version = "0.6.9"
source = { editable = "publish/hf-inference-acp" }
dependencies = [
{ name = "fast-agent-mcp" },