-
Notifications
You must be signed in to change notification settings - Fork 795
Fix duplicate JSON response output when agent invokes tools #2154
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
787e641
51116dd
5679d61
a7912b9
921b9c1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -188,3 +188,6 @@ pyrightconfig.json | |
| mypy.ini | ||
| tox.ini | ||
| *.difypkg | ||
|
|
||
| .claude | ||
| dify-plugin.exe | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,4 @@ | ||
| version: 0.0.25 | ||
| version: 0.0.26 | ||
| type: plugin | ||
| author: "langgenius" | ||
| name: "agent" | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,8 +28,10 @@ | |
| from prompt.template import REACT_PROMPT_TEMPLATES | ||
| from pydantic import BaseModel, Field | ||
|
|
||
|
|
||
| class LogMetadata: | ||
| """Metadata keys for logging""" | ||
|
|
||
| STARTED_AT = "started_at" | ||
| PROVIDER = "provider" | ||
| FINISHED_AT = "finished_at" | ||
|
|
@@ -38,6 +40,7 @@ class LogMetadata: | |
| CURRENCY = "currency" | ||
| TOTAL_TOKENS = "total_tokens" | ||
|
|
||
|
|
||
| ignore_observation_providers = ["wenxin"] | ||
|
|
||
|
|
||
|
|
@@ -250,15 +253,15 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: | |
| LogMetadata.FINISHED_AT: time.perf_counter(), | ||
| LogMetadata.ELAPSED_TIME: time.perf_counter() - model_started_at, | ||
| LogMetadata.PROVIDER: model.provider, | ||
| LogMetadata.TOTAL_PRICE: usage_dict["usage"].total_price | ||
| if usage_dict["usage"] | ||
| else 0, | ||
| LogMetadata.CURRENCY: usage_dict["usage"].currency | ||
| if usage_dict["usage"] | ||
| else "", | ||
| LogMetadata.TOTAL_TOKENS: usage_dict["usage"].total_tokens | ||
| if usage_dict["usage"] | ||
| else 0, | ||
| LogMetadata.TOTAL_PRICE: ( | ||
| usage_dict["usage"].total_price if usage_dict["usage"] else 0 | ||
| ), | ||
| LogMetadata.CURRENCY: ( | ||
| usage_dict["usage"].currency if usage_dict["usage"] else "" | ||
| ), | ||
| LogMetadata.TOTAL_TOKENS: ( | ||
| usage_dict["usage"].total_tokens if usage_dict["usage"] else 0 | ||
| ), | ||
| }, | ||
| ) | ||
| if not scratchpad.action: | ||
|
|
@@ -285,11 +288,11 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: | |
| data={}, | ||
| metadata={ | ||
| LogMetadata.STARTED_AT: time.perf_counter(), | ||
| LogMetadata.PROVIDER: tool_instances[ | ||
| tool_name | ||
| ].identity.provider | ||
| if tool_instances.get(tool_name) | ||
| else "", | ||
| LogMetadata.PROVIDER: ( | ||
| tool_instances[tool_name].identity.provider | ||
| if tool_instances.get(tool_name) | ||
| else "" | ||
| ), | ||
| }, | ||
| parent=round_log, | ||
| status=ToolInvokeMessage.LogMessage.LogStatus.START, | ||
|
|
@@ -318,11 +321,11 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: | |
| }, | ||
| metadata={ | ||
| LogMetadata.STARTED_AT: tool_call_started_at, | ||
| LogMetadata.PROVIDER: tool_instances[ | ||
| tool_name | ||
| ].identity.provider | ||
| if tool_instances.get(tool_name) | ||
| else "", | ||
| LogMetadata.PROVIDER: ( | ||
| tool_instances[tool_name].identity.provider | ||
| if tool_instances.get(tool_name) | ||
| else "" | ||
| ), | ||
| LogMetadata.FINISHED_AT: time.perf_counter(), | ||
| LogMetadata.ELAPSED_TIME: time.perf_counter() | ||
| - tool_call_started_at, | ||
|
|
@@ -337,28 +340,28 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: | |
| yield self.finish_log_message( | ||
| log=round_log, | ||
| data={ | ||
| "action_name": scratchpad.action.action_name | ||
| if scratchpad.action | ||
| else "", | ||
| "action_input": scratchpad.action.action_input | ||
| if scratchpad.action | ||
| else "", | ||
| "action_name": ( | ||
| scratchpad.action.action_name if scratchpad.action else "" | ||
| ), | ||
| "action_input": ( | ||
| scratchpad.action.action_input if scratchpad.action else "" | ||
| ), | ||
| "thought": scratchpad.thought, | ||
| "observation": scratchpad.observation, | ||
| }, | ||
| metadata={ | ||
| LogMetadata.STARTED_AT: round_started_at, | ||
| LogMetadata.FINISHED_AT: time.perf_counter(), | ||
| LogMetadata.ELAPSED_TIME: time.perf_counter() - round_started_at, | ||
| LogMetadata.TOTAL_PRICE: usage_dict["usage"].total_price | ||
| if usage_dict["usage"] | ||
| else 0, | ||
| LogMetadata.CURRENCY: usage_dict["usage"].currency | ||
| if usage_dict["usage"] | ||
| else "", | ||
| LogMetadata.TOTAL_TOKENS: usage_dict["usage"].total_tokens | ||
| if usage_dict["usage"] | ||
| else 0, | ||
| LogMetadata.TOTAL_PRICE: ( | ||
| usage_dict["usage"].total_price if usage_dict["usage"] else 0 | ||
| ), | ||
| LogMetadata.CURRENCY: ( | ||
| usage_dict["usage"].currency if usage_dict["usage"] else "" | ||
| ), | ||
| LogMetadata.TOTAL_TOKENS: ( | ||
| usage_dict["usage"].total_tokens if usage_dict["usage"] else 0 | ||
| ), | ||
| }, | ||
| ) | ||
| iteration_step += 1 | ||
|
|
@@ -395,15 +398,21 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: | |
| yield self.create_json_message( | ||
| { | ||
| "execution_metadata": { | ||
| LogMetadata.TOTAL_PRICE: llm_usage["usage"].total_price | ||
| if llm_usage["usage"] is not None | ||
| else 0, | ||
| LogMetadata.CURRENCY: llm_usage["usage"].currency | ||
| if llm_usage["usage"] is not None | ||
| else "", | ||
| LogMetadata.TOTAL_TOKENS: llm_usage["usage"].total_tokens | ||
| if llm_usage["usage"] is not None | ||
| else 0, | ||
| LogMetadata.TOTAL_PRICE: ( | ||
| llm_usage["usage"].total_price | ||
| if llm_usage["usage"] is not None | ||
| else 0 | ||
| ), | ||
| LogMetadata.CURRENCY: ( | ||
| llm_usage["usage"].currency | ||
| if llm_usage["usage"] is not None | ||
| else "" | ||
| ), | ||
| LogMetadata.TOTAL_TOKENS: ( | ||
| llm_usage["usage"].total_tokens | ||
| if llm_usage["usage"] is not None | ||
| else 0 | ||
| ), | ||
| } | ||
| } | ||
| ) | ||
|
|
@@ -516,9 +525,30 @@ def _handle_invoke_action( | |
| ) | ||
| result = "" | ||
| additional_messages = [] # Collect messages that need to be yielded | ||
| for response in tool_invoke_responses: | ||
| # Collect all responses first to detect duplicates | ||
| responses_list = list(tool_invoke_responses) | ||
| # Check if there's a JSON response | ||
| has_json_response = any( | ||
| r.type == ToolInvokeMessage.MessageType.JSON for r in responses_list | ||
| ) | ||
| json_content = None | ||
| if has_json_response: | ||
| # Get the JSON content for comparison | ||
| for r in responses_list: | ||
| if r.type == ToolInvokeMessage.MessageType.JSON: | ||
| json_content = json.dumps( | ||
| cast( | ||
| ToolInvokeMessage.JsonMessage, | ||
| r.message, | ||
| ).json_object, | ||
| ensure_ascii=False, | ||
| ) | ||
| break | ||
|
|
||
| for response in responses_list: | ||
| if response.type == ToolInvokeMessage.MessageType.TEXT: | ||
| result += cast(ToolInvokeMessage.TextMessage, response.message).text | ||
| # Skip TEXT response as workflow always returns both TEXT and JSON | ||
| continue | ||
|
Comment on lines
549
to
+551
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unconditionally skipping all if response.type == ToolInvokeMessage.MessageType.TEXT:
if has_json_response:
# Skip TEXT response as a JSON response exists (likely duplicate content)
continue
result += cast(ToolInvokeMessage.TextMessage, response.message).text |
||
| elif response.type == ToolInvokeMessage.MessageType.LINK: | ||
| result += ( | ||
| f"result link: {cast(ToolInvokeMessage.TextMessage, response.message).text}." | ||
|
|
@@ -546,7 +576,7 @@ def _handle_invoke_action( | |
| ).json_object, | ||
| ensure_ascii=False, | ||
| ) | ||
| result += f"tool response: {text}." | ||
| result += text | ||
| elif response.type == ToolInvokeMessage.MessageType.BLOB: | ||
| result += "Generated file with ... " | ||
| additional_messages.append(response) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -29,8 +29,10 @@ | |
| ) | ||
| from pydantic import BaseModel | ||
|
|
||
|
|
||
| class LogMetadata: | ||
| """Metadata keys for logging""" | ||
|
|
||
| STARTED_AT = "started_at" | ||
| PROVIDER = "provider" | ||
| FINISHED_AT = "finished_at" | ||
|
|
@@ -39,8 +41,10 @@ class LogMetadata: | |
| CURRENCY = "currency" | ||
| TOTAL_TOKENS = "total_tokens" | ||
|
|
||
|
|
||
| class ExecutionMetadata(BaseModel): | ||
| """Execution metadata with default values""" | ||
|
|
||
| total_price: float = 0.0 | ||
| currency: str = "" | ||
| total_tokens: int = 0 | ||
|
|
@@ -53,13 +57,13 @@ class ExecutionMetadata(BaseModel): | |
| completion_price_unit: float = 0.0 | ||
| completion_price: float = 0.0 | ||
| latency: float = 0.0 | ||
|
|
||
| @classmethod | ||
| def from_llm_usage(cls, usage: Optional[LLMUsage]) -> "ExecutionMetadata": | ||
| """Create ExecutionMetadata from LLMUsage, handling None case""" | ||
| if usage is None: | ||
| return cls() | ||
|
|
||
| return cls( | ||
| total_price=float(usage.total_price), | ||
| currency=usage.currency, | ||
|
|
@@ -72,9 +76,10 @@ def from_llm_usage(cls, usage: Optional[LLMUsage]) -> "ExecutionMetadata": | |
| completion_unit_price=float(usage.completion_unit_price), | ||
| completion_price_unit=float(usage.completion_price_unit), | ||
| completion_price=float(usage.completion_price), | ||
| latency=usage.latency | ||
| latency=usage.latency, | ||
| ) | ||
|
|
||
|
|
||
| class ContextItem(BaseModel): | ||
| content: str | ||
| title: str | ||
|
|
@@ -284,15 +289,15 @@ def _invoke( | |
| LogMetadata.FINISHED_AT: time.perf_counter(), | ||
| LogMetadata.ELAPSED_TIME: time.perf_counter() - model_started_at, | ||
| LogMetadata.PROVIDER: model.provider, | ||
| LogMetadata.TOTAL_PRICE: current_llm_usage.total_price | ||
| if current_llm_usage | ||
| else 0, | ||
| LogMetadata.CURRENCY: current_llm_usage.currency | ||
| if current_llm_usage | ||
| else "", | ||
| LogMetadata.TOTAL_TOKENS: current_llm_usage.total_tokens | ||
| if current_llm_usage | ||
| else 0, | ||
| LogMetadata.TOTAL_PRICE: ( | ||
| current_llm_usage.total_price if current_llm_usage else 0 | ||
| ), | ||
| LogMetadata.CURRENCY: ( | ||
| current_llm_usage.currency if current_llm_usage else "" | ||
| ), | ||
| LogMetadata.TOTAL_TOKENS: ( | ||
| current_llm_usage.total_tokens if current_llm_usage else 0 | ||
| ), | ||
| }, | ||
| ) | ||
|
|
||
|
|
@@ -359,15 +364,34 @@ def _invoke( | |
| }, | ||
| ) | ||
| tool_result = "" | ||
| for tool_invoke_response in tool_invoke_responses: | ||
| # Collect all responses first to detect duplicates | ||
| responses_list = list(tool_invoke_responses) | ||
| # Check if there's a JSON response | ||
| has_json_response = any( | ||
| r.type == ToolInvokeMessage.MessageType.JSON | ||
| for r in responses_list | ||
| ) | ||
| json_content = None | ||
| if has_json_response: | ||
| # Get the JSON content for comparison | ||
| for r in responses_list: | ||
| if r.type == ToolInvokeMessage.MessageType.JSON: | ||
| json_content = json.dumps( | ||
| cast( | ||
| ToolInvokeMessage.JsonMessage, | ||
| r.message, | ||
| ).json_object, | ||
| ensure_ascii=False, | ||
| ) | ||
| break | ||
|
Comment on lines
+374
to
+386
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| for tool_invoke_response in responses_list: | ||
| if ( | ||
| tool_invoke_response.type | ||
| == ToolInvokeMessage.MessageType.TEXT | ||
| ): | ||
| tool_result += cast( | ||
| ToolInvokeMessage.TextMessage, | ||
| tool_invoke_response.message, | ||
| ).text | ||
| # Skip TEXT response as workflow always returns both TEXT and JSON | ||
| continue | ||
|
Comment on lines
390
to
+394
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unconditionally skipping all if (
tool_invoke_response.type
== ToolInvokeMessage.MessageType.TEXT
):
if has_json_response:
# Skip TEXT response as a JSON response exists (likely duplicate content)
continue
tool_result += cast(
ToolInvokeMessage.TextMessage,
tool_invoke_response.message,
).text |
||
| elif ( | ||
| tool_invoke_response.type | ||
| == ToolInvokeMessage.MessageType.LINK | ||
|
|
@@ -432,7 +456,7 @@ def _invoke( | |
| ).json_object, | ||
| ensure_ascii=False, | ||
| ) | ||
| tool_result += f"tool response: {text}." | ||
| tool_result += text | ||
| elif ( | ||
| tool_invoke_response.type | ||
| == ToolInvokeMessage.MessageType.BLOB | ||
|
|
@@ -500,15 +524,15 @@ def _invoke( | |
| LogMetadata.STARTED_AT: round_started_at, | ||
| LogMetadata.FINISHED_AT: time.perf_counter(), | ||
| LogMetadata.ELAPSED_TIME: time.perf_counter() - round_started_at, | ||
| LogMetadata.TOTAL_PRICE: current_llm_usage.total_price | ||
| if current_llm_usage | ||
| else 0, | ||
| LogMetadata.CURRENCY: current_llm_usage.currency | ||
| if current_llm_usage | ||
| else "", | ||
| LogMetadata.TOTAL_TOKENS: current_llm_usage.total_tokens | ||
| if current_llm_usage | ||
| else 0, | ||
| LogMetadata.TOTAL_PRICE: ( | ||
| current_llm_usage.total_price if current_llm_usage else 0 | ||
| ), | ||
| LogMetadata.CURRENCY: ( | ||
| current_llm_usage.currency if current_llm_usage else "" | ||
| ), | ||
| LogMetadata.TOTAL_TOKENS: ( | ||
| current_llm_usage.total_tokens if current_llm_usage else 0 | ||
| ), | ||
| }, | ||
| ) | ||
| # If max_iteration_steps=1, need to return tool responses | ||
|
|
@@ -545,11 +569,7 @@ def _invoke( | |
| ) | ||
|
|
||
| metadata = ExecutionMetadata.from_llm_usage(llm_usage["usage"]) | ||
| yield self.create_json_message( | ||
| { | ||
| "execution_metadata": metadata.model_dump() | ||
| } | ||
| ) | ||
| yield self.create_json_message({"execution_metadata": metadata.model_dump()}) | ||
|
|
||
| def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool: | ||
| """ | ||
|
|
@@ -648,11 +668,15 @@ def _clear_user_prompt_image_messages( | |
| ): | ||
| prompt_message.content = "\n".join( | ||
| [ | ||
| content.data | ||
| if content.type == PromptMessageContentType.TEXT | ||
| else "[image]" | ||
| if content.type == PromptMessageContentType.IMAGE | ||
| else "[file]" | ||
| ( | ||
| content.data | ||
| if content.type == PromptMessageContentType.TEXT | ||
| else ( | ||
| "[image]" | ||
| if content.type == PromptMessageContentType.IMAGE | ||
| else "[file]" | ||
| ) | ||
| ) | ||
| for content in prompt_message.content | ||
| ] | ||
| ) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The variable
json_contentis assigned here but it's never used later in the code. This block appears to be dead code and can be removed to simplify the logic. Thehas_json_responsecheck is sufficient for the deduplication logic.