From 7239f7fca10c2c4a9b1151bf5276f157adaed2fe Mon Sep 17 00:00:00 2001 From: majiayu000 <1835304752@qq.com> Date: Mon, 29 Dec 2025 00:58:08 +0800 Subject: [PATCH] Add reasoning summaries support - Add reasoning_summary option (auto, concise, detailed) for reasoning models - Pass both reasoning_effort and reasoning_summary to the API in the reasoning parameter - This also fixes reasoning_effort not being passed to the API Fixes #16 Signed-off-by: majiayu000 <1835304752@qq.com> --- llm_openai.py | 25 ++++++++++++++++++++++++ tests/test_openai.py | 46 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/llm_openai.py b/llm_openai.py index 4013c31..175ee6d 100644 --- a/llm_openai.py +++ b/llm_openai.py @@ -82,6 +82,12 @@ class ReasoningEffortEnum(str, Enum): high = "high" +class ReasoningSummaryEnum(str, Enum): + auto = "auto" + concise = "concise" + detailed = "detailed" + + class BaseOptions(Options): max_output_tokens: Optional[int] = Field( description=( @@ -149,6 +155,14 @@ class ReasoningOptions(Options): ), default=None, ) + reasoning_summary: Optional[ReasoningSummaryEnum] = Field( + description=( + "A summary of the reasoning performed by the model. This can be useful for " + "debugging and understanding the model's reasoning process. One of 'auto', " + "'concise', or 'detailed'." + ), + default=None, + ) class _SharedResponses: @@ -286,6 +300,17 @@ def _build_kwargs(self, prompt, conversation): if value is not None: kwargs[option] = value + # Build reasoning options if either effort or summary is specified + reasoning_effort = getattr(prompt.options, "reasoning_effort", None) + reasoning_summary = getattr(prompt.options, "reasoning_summary", None) + if reasoning_effort is not None or reasoning_summary is not None: + reasoning = {} + if reasoning_effort is not None: + reasoning["effort"] = reasoning_effort.value + if reasoning_summary is not None: + reasoning["summary"] = reasoning_summary.value + kwargs["reasoning"] = reasoning + if prompt.tools: tool_defs = [] for tool in prompt.tools: diff --git a/tests/test_openai.py b/tests/test_openai.py index bb83185..6269b73 100644 --- a/tests/test_openai.py +++ b/tests/test_openai.py @@ -76,3 +76,49 @@ def simple_tool(number): ) output = chain_response.text() assert output == snapshot + + +def test_reasoning_options_in_kwargs(): + """Test that reasoning options are correctly passed to the API.""" + model = llm.get_model("openai/gpt-5-mini") + prompt = llm.Prompt( + model=model, + prompt="What is 2+2?", + options=model.Options(reasoning_effort="high", reasoning_summary="auto") + ) + kwargs = model._build_kwargs(prompt, conversation=None) + + # Verify reasoning parameter is set correctly + assert "reasoning" in kwargs + assert kwargs["reasoning"]["effort"] == "high" + assert kwargs["reasoning"]["summary"] == "auto" + + +def test_reasoning_effort_only_in_kwargs(): + """Test that reasoning_effort works without reasoning_summary.""" + model = llm.get_model("openai/gpt-5-mini") + prompt = llm.Prompt( + model=model, + prompt="What is 2+2?", + options=model.Options(reasoning_effort="medium") + ) + kwargs = model._build_kwargs(prompt, conversation=None) + + assert "reasoning" in kwargs + assert kwargs["reasoning"]["effort"] == "medium" + assert "summary" not in kwargs["reasoning"] + + +def test_reasoning_summary_only_in_kwargs(): + """Test that reasoning_summary works without reasoning_effort.""" + model = llm.get_model("openai/gpt-5-mini") + prompt = llm.Prompt( + model=model, + prompt="What is 2+2?", + options=model.Options(reasoning_summary="detailed") + ) + kwargs = model._build_kwargs(prompt, conversation=None) + + assert "reasoning" in kwargs + assert kwargs["reasoning"]["summary"] == "detailed" + assert "effort" not in kwargs["reasoning"]