Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions examples/llmagent_with_thinking/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from trpc_agent_sdk.tools import FunctionTool
from trpc_agent_sdk.types import GenerateContentConfig
from trpc_agent_sdk.types import ThinkingConfig

from trpc_agent_sdk.types import HttpOptions
from .config import get_model_config
from .prompts import INSTRUCTION
from .tools import get_weather_forecast
Expand All @@ -32,8 +32,9 @@ def _create_model() -> LLMModel:
# if the LLM model service fails to return the JSON format of tool calls, you can also enable ToolPrompt.
# This will prompt the LLM model to output the special text for tool calling in the main content,
# thereby increasing the probability of successful tool invocation.
# You can uncomment the code below to use ToolPrompt.
# add_tools_to_prompt=True,
# Thinking models may emit tool calls as text. ToolPrompt lets the
# framework parse those text calls back into executable FunctionCalls.
add_tools_to_prompt=model_name.lower() == "hy3-preview", # Enable ToolPrompt for Hy3-preview model
)
return model

Expand All @@ -45,6 +46,9 @@ def create_agent():
weather_tool = FunctionTool(get_weather_report)
forecast_tool = FunctionTool(get_weather_forecast)

# Set reasoning effort to high for Hy3-preview model
http_options=HttpOptions(extra_body={"chat_template_kwargs": {"reasoning_effort": "high"}})

return LlmAgent(
name="weather_agent",
description=
Expand All @@ -54,7 +58,7 @@ def create_agent():
instruction=INSTRUCTION,
tools=[weather_tool, forecast_tool],
# Note: thinking_budget must be less than max_output_tokens
generate_content_config=GenerateContentConfig(max_output_tokens=10240, ),
generate_content_config=GenerateContentConfig(max_output_tokens=10240, http_options=http_options),
# The model must be a thinking model to use this Planner; this configuration will not take effect for non-thinking models.
planner=BuiltInPlanner(thinking_config=ThinkingConfig(
include_thoughts=True,
Expand Down
12 changes: 10 additions & 2 deletions examples/llmagent_with_thinking/agent/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,21 @@
- Provide clear, useful weather information and suggestions

**Available tools:**
1. `get_weather`: Get current weather information
1. `get_weather_report`: Get current weather information
2. `get_weather_forecast`: Get multi-day weather forecast

**Tool usage guide:**
- When the user asks about the current weather, use `get_weather`
- When the user asks about the current weather, use `get_weather_report`
- When the user asks about the weather for the next few days, use `get_weather_forecast`
- If the query is not clear, you can use both tools at the same time
- Do not answer with weather data before the required tool result is available
- Do not guess, simulate, or invent tool results
- If a tool is needed, call the tool first and wait for the tool result before giving the final answer

**Thinking guidance:**
- Keep reasoning concise and focused on choosing the right tool and city
- Do not repeat the tool usage rules or tool schema in your reasoning
- Do not draft the final answer in reasoning; use reasoning only to decide the next action

**Reply format:**
- Provide accurate weather information
Expand Down
67 changes: 61 additions & 6 deletions examples/llmagent_with_thinking/run_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def run_weather_agent():
demo_queries = [
"What's the weather like today?",
"What's the current weather in Guangzhou?",
"What will the weather be like in Shanghai for the next three days?",
"Please check both the current weather in Guangzhou and the three-day weather forecast for Shanghai.",
]

for query in demo_queries:
Expand All @@ -51,23 +51,78 @@ async def run_weather_agent():

user_content = Content(parts=[Part.from_text(text=query)])

print("🤖 Assistant: ", end="", flush=True)
printed_thinking = False
printed_assistant = False
in_thinking = False
thinking_line_start = False
assistant_text_started = False

def print_assistant_header() -> None:
nonlocal printed_assistant
if printed_assistant:
return
if printed_thinking:
print("\n")
print("🤖 Assistant: ", end="", flush=True)
printed_assistant = True

def print_thinking_header() -> None:
nonlocal in_thinking, printed_thinking, thinking_line_start
if in_thinking:
return
print("\n 💭 Thinking: ", end="", flush=True)
in_thinking = True
printed_thinking = True
thinking_line_start = False

def print_thinking_text(text: str) -> None:
nonlocal thinking_line_start
for line in text.splitlines(keepends=True):
if thinking_line_start:
print(" ", end="", flush=True)
print(line, end="", flush=True)
thinking_line_start = line.endswith("\n")

def close_thinking_section() -> None:
nonlocal in_thinking, thinking_line_start
if in_thinking:
if not thinking_line_start:
print()
print(" 💭 End Thinking")
in_thinking = False
thinking_line_start = False

async for event in runner.run_async(user_id=user_id, session_id=current_session_id, new_message=user_content):
if not event.content or not event.content.parts:
continue

if event.partial:
for part in event.content.parts:
if part.text:
print(part.text, end="", flush=True)
if part.thought:
if assistant_text_started:
continue
print_thinking_header()
print_thinking_text(part.text)
else:
close_thinking_section()
print_assistant_header()
assistant_text_started = True
print(part.text, end="", flush=True)
continue

for part in event.content.parts:
if part.thought:
continue
if part.function_call:
if part.thought and part.text and not printed_thinking and not assistant_text_started:
print_thinking_header()
print_thinking_text(part.text)
elif part.function_call:
close_thinking_section()
print_assistant_header()
print(f"\n🔧 [Invoke Tool:: {part.function_call.name}({part.function_call.args})]")
elif part.function_response:
close_thinking_section()
printed_thinking = False
print_assistant_header()
print(f"📊 [Tool Result: {part.function_response.response}]")
# elif part.text:
# print(f"\n✅ {part.text}")
Expand Down
Loading
Loading