From 4e8973a98107247a928d5d8a512ced7090d8e2ff Mon Sep 17 00:00:00 2001 From: Alexander Schenk <5020307+alschenk@users.noreply.github.com> Date: Thu, 20 Nov 2025 19:00:22 +0100 Subject: [PATCH] fix: renamed usage to usage_details to make it v3 compatible --- examples/filters/langfuse_v3_filter_pipeline.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/filters/langfuse_v3_filter_pipeline.py b/examples/filters/langfuse_v3_filter_pipeline.py index a046eeea..80c593b8 100644 --- a/examples/filters/langfuse_v3_filter_pipeline.py +++ b/examples/filters/langfuse_v3_filter_pipeline.py @@ -313,19 +313,20 @@ async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: assistant_message = get_last_assistant_message(body["messages"]) assistant_message_obj = get_last_assistant_message_obj(body["messages"]) - usage = None + usage_details = None if assistant_message_obj: info = assistant_message_obj.get("usage", {}) if isinstance(info, dict): input_tokens = info.get("prompt_eval_count") or info.get("prompt_tokens") output_tokens = info.get("eval_count") or info.get("completion_tokens") if input_tokens is not None and output_tokens is not None: - usage = { - "input": input_tokens, - "output": output_tokens, - "unit": "TOKENS", + # Langfuse v3 token aggregation expects usage_details. + usage_details = { + "input": int(input_tokens), + "output": int(output_tokens), + "total": int(input_tokens) + int(output_tokens), } - self.log(f"Usage data extracted: {usage}") + self.log(f"Usage data extracted: {usage_details}") # Update the trace with complete output information trace = self.chat_traces[chat_id] @@ -387,8 +388,8 @@ async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: ) # Update with usage if available - if usage: - generation.update(usage=usage) + if usage_details: + generation.update(usage_details=usage_details) generation.end() self.log(f"LLM generation completed for chat_id: {chat_id}")