Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/mcp_tools/agent/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(self):
super().__init__()
self._connection_params = StreamableHTTPConnectionParams(
url="http://localhost:8000/mcp",
headers={"Authorization": "Bearer token"},
headers={"Authorization": "Bearer <token>"},
timeout=5,
sse_read_timeout=60 * 5,
terminate_on_close=True, # send termination signal when the toolset is closed
Expand Down
39 changes: 15 additions & 24 deletions examples/session_summarizer/run_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ async def summarize_session(session_service: InMemorySessionService, app_name: s
print(f" - Compression ratio: {summary.get_compression_ratio():.1f}%")


SUMMARIZER_COUNT = 3 # Run summarization every SUMMARIZER_COUNT turns (e.g. 3 => every 3 turns)
SUMMARIZER_COUNT = 2 # Keep the example short: summarize after a couple of turns.


def create_summarizer_manager(model: OpenAIModel) -> SummarizerSessionManager:
Expand All @@ -154,8 +154,8 @@ def create_summarizer_manager(model: OpenAIModel) -> SummarizerSessionManager:
# set_summarizer_time_interval_threshold(10),
# )
],
max_summary_length=600, # Max summary length kept; default 1000; beyond shows ...
keep_recent_count=4, # How many recent turns to keep; default 10
max_summary_length=300, # Max summary length kept; default 1000; beyond shows ...
keep_recent_count=2, # Keep only the latest turns so compression is easy to observe.
)
# Create SummarizerSessionManager
summarizer_manager = SummarizerSessionManager(
Expand All @@ -169,7 +169,7 @@ def create_summarizer_manager(model: OpenAIModel) -> SummarizerSessionManager:
async def llm_agent_summarizer():
"""Demo LlmAgent integrated with SummarizerSessionManager."""
print("=" * 60)
print("Example 2: LlmAgent + SummarizerSessionManager demo")
print("Example: LlmAgent + SummarizerSessionManager demo")
print("=" * 60)
app_name = "llm_summarizer_manager_demo"

Expand All @@ -183,22 +183,13 @@ async def llm_agent_summarizer():
current_session_id = str(uuid.uuid4())
print(f"📊 Session: {app_name}/{user_id}/{current_session_id}")

# Demo conversation turns
# Short demo conversation. Four turns are enough to trigger automatic
# summarization while keeping the example quick to run.
conversations = [
"Hello! I want to learn Python programming. Can you help me?",
"What is a variable? Can you give an example?",
"Got it! What data types are there?",
"What does control flow mean?",
"I understand those ideas. I'd like a small project to practice.",
"OK! How do I build this calculator?",
"The calculator looks good—I ran it successfully. I'd like to learn more advanced Python.",
"I'd like to start with functions—I think they're central to programming.",
"I see—functions make code modular and reusable. I'd like to learn OOP next.",
"I get OOP now. I'd like to learn exception handling.",
"I've learned these advanced topics. I'd like a bigger project that ties them together.",
"Yes! How do I implement this library system?",
"The structure looks good. How do I persist data to files?",
"Great! I've covered basics and advanced topics including files. I'd like a recap of what I learned.",
"Please give me a tiny calculator example.",
"Can you recap what I learned so far?",
]

print(f"\n💬 Multi-turn dialogue ({len(conversations)} turns)...")
Expand Down Expand Up @@ -230,18 +221,18 @@ async def llm_agent_summarizer():
# elif part.text:
# print(f"\n✅ {part.text}")

# After every SUMMARIZER_COUNT turns, inspect session state
if index % SUMMARIZER_COUNT == 0: # summarizer should fire around this cadence
if session:
print(f"\n📊 Session state after turn {index + 1}:")
summary = await session_service.summarizer_manager.get_session_summary(session)
# Inspect the summary after the threshold cadence.
if (index + 1) % SUMMARIZER_COUNT == 0 and session:
print(f"\n📊 Session state after turn {index + 1}:")
summary = await session_service.summarizer_manager.get_session_summary(session)
if summary:
print(f" - Summary text: {summary.summary_text[:100]}...")
print(f" - Original event count: {summary.original_event_count}")
print(f" - Compressed event count: {summary.compressed_event_count}")
print(f" - Compression ratio: {summary.get_compression_ratio()}")
else:
print(" - Summary not created yet.")
print("\n" + "-" * 40)
# Manual forced summary test
await summarize_session(session_service, app_name, user_id, current_session_id)


if __name__ == "__main__":
Expand Down
10 changes: 5 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,12 @@ knowledge = [
]

a2a = [
"a2a-sdk>=0.2.0",
"a2a-sdk<1.0.0,>=0.3.22",
"protobuf>=5.29.5",
]

agent-claude = [
"claude-agent-sdk>=0.1.3",
"claude-agent-sdk>=0.1.3,<0.1.64",
"cloudpickle>=2.0.0",
]

Expand Down Expand Up @@ -115,20 +115,19 @@ dev = [
"langchain_community>=0.3.27",
"langchain_huggingface>=0.1.0",
"ag-ui-protocol>=0.1.8",
"claude-agent-sdk>=0.1.3",
"claude-agent-sdk>=0.1.3,<0.1.64",
"cloudpickle>=2.0.0",
"typer>=0.9.0",
]

all = [
"a2a-sdk>=0.2.0",
"protobuf>=5.29.5",
"numpy>=2.2.5",
"langchain_community>=0.3.27",
"langchain_huggingface>=0.1.0",
"langchain_tavily",
"ag-ui-protocol>=0.1.8",
"claude-agent-sdk>=0.1.3",
"claude-agent-sdk>=0.1.3,<0.1.64",
"pytest",
"pytest-asyncio",
"rouge-score",
Expand All @@ -140,6 +139,7 @@ all = [
"nanobot-ai>=0.1.4.post6",
"aiofiles",
"wecom-aibot-sdk-python>=0.1.5",
"a2a-sdk<1.0.0,>=0.3.22",
]

[project.scripts]
Expand Down
8 changes: 4 additions & 4 deletions tests/tools/mcp_tool/test_mcp_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ def test_resource_content_with_blob(self):
)
assert tool._parse_mcp_call_tool_result_to_str(result) == "blob_data"

def test_multiple_contents_returns_first_text(self):
def test_multiple_contents_returns_list(self):
tool = self._tool()
result = CallToolResult(
isError=False,
Expand All @@ -367,18 +367,18 @@ def test_multiple_contents_returns_first_text(self):
TextContent(type="text", text="second"),
],
)
assert tool._parse_mcp_call_tool_result_to_str(result) == "first"
assert tool._parse_mcp_call_tool_result_to_str(result) == ["first", "second"]

def test_fallback_returns_raw_content(self):
"""When no content type matches, raw content list is returned."""
"""When no content type matches, stringified raw content is returned."""
tool = self._tool()
result = MagicMock()
result.isError = False
mock_content = MagicMock()
mock_content.type = "unknown"
result.content = [mock_content]
ret = tool._parse_mcp_call_tool_result_to_str(result)
assert ret == result.content
assert ret == str(result.content)


# ---------------------------------------------------------------------------
Expand Down
44 changes: 31 additions & 13 deletions trpc_agent_sdk/tools/mcp_tool/_mcp_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from __future__ import annotations

from typing import Optional
from typing import Union
from typing_extensions import override

from mcp.types import CallToolResult
Expand Down Expand Up @@ -180,28 +181,45 @@ def _get_declaration(self) -> FunctionDeclaration:
)
return function_decl

def _parse_mcp_call_tool_result_to_str(self, result: CallToolResult) -> str:
def _parse_mcp_call_tool_result_to_str(self, result: CallToolResult) -> Union[str, list[str]]:
"""Converts MCP call result into standardized string format.

Args:
result: Raw result from MCP tool call

Returns:
str: Parsed result in string format
Union[str, list[str]]: Parsed tool result.
- Single parsed content returns ``str``.
- Multiple parsed contents return ``list[str]``.
"""
if result.isError:
return f"Error: {result.content[0].text}" # type: ignore
parsed_items: list[str] = []
for data in result.content:
if data.type == "text":
return data.text
if data.type == "image":
return data.data
if data.type == "resource":
text = getattr(data.resource, 'text', '')
if not text:
text = getattr(data.resource, 'blob', '')
return text
return result.content # type: ignore
text = getattr(data, "text", "")
if text:
parsed_items.append(text)
elif data.type == "image":
image_data = getattr(data, "data", "")
if image_data:
parsed_items.append(image_data)
elif data.type == "resource":
resource = getattr(data, "resource", None)
if resource is not None:
text = getattr(resource, "text", "") or getattr(resource, "blob", "")
if text:
parsed_items.append(text)

if not parsed_items:
fallback = str(result.content)
return f"Error: {fallback}" if result.isError else fallback

if len(parsed_items) == 1:
payload = parsed_items[0]
return f"Error: {payload}" if result.isError else payload

if result.isError:
return [f"Error: {item}" for item in parsed_items]
return parsed_items

@retry_on_closed_resource
@override
Expand Down
Loading