diff --git a/README.md b/README.md index 6bd6ae9..bf31510 100644 --- a/README.md +++ b/README.md @@ -51,9 +51,17 @@ OpenAI: openai/gpt-4o-mini OpenAI: openai/gpt-4.5-preview OpenAI: openai/gpt-4.5-preview-2025-02-27 OpenAI: openai/o3-mini +OpenAI: openai/o3-mini-high +OpenAI: openai/o3-mini-low OpenAI: openai/o1-mini +OpenAI: openai/o1-mini-high +OpenAI: openai/o1-mini-low OpenAI: openai/o1 +OpenAI: openai/o1-high +OpenAI: openai/o1-low OpenAI: openai/o1-pro +OpenAI: openai/o1-pro-high +OpenAI: openai/o1-pro-low OpenAI: openai/gpt-4.1 OpenAI: openai/gpt-4.1-2025-04-14 OpenAI: openai/gpt-4.1-mini @@ -61,12 +69,26 @@ OpenAI: openai/gpt-4.1-mini-2025-04-14 OpenAI: openai/gpt-4.1-nano OpenAI: openai/gpt-4.1-nano-2025-04-14 OpenAI: openai/o3 +OpenAI: openai/o3-high +OpenAI: openai/o3-low OpenAI: openai/o3-2025-04-16 +OpenAI: openai/o3-2025-04-16-high +OpenAI: openai/o3-2025-04-16-low OpenAI: openai/o3-streaming +OpenAI: openai/o3-streaming-high +OpenAI: openai/o3-streaming-low OpenAI: openai/o3-2025-04-16-streaming +OpenAI: openai/o3-2025-04-16-streaming-high +OpenAI: openai/o3-2025-04-16-streaming-low OpenAI: openai/o4-mini +OpenAI: openai/o4-mini-high +OpenAI: openai/o4-mini-low OpenAI: openai/o4-mini-2025-04-16 +OpenAI: openai/o4-mini-2025-04-16-high +OpenAI: openai/o4-mini-2025-04-16-low OpenAI: openai/codex-mini-latest +OpenAI: openai/codex-mini-latest-high +OpenAI: openai/codex-mini-latest-low ``` Add `--options` to see a full list of options that can be provided to each model. diff --git a/llm_openai.py b/llm_openai.py index 522f610..4cf8c07 100644 --- a/llm_openai.py +++ b/llm_openai.py @@ -47,6 +47,20 @@ def register_models(register): ResponsesModel(model_id, **options), AsyncResponsesModel(model_id, **options), ) + if options.get("reasoning"): + for effort in (ReasoningEffortEnum.high, ReasoningEffortEnum.low): + register( + ResponsesModel( + f"{model_id}-{effort.value}", + default_options={"reasoning_effort": effort}, + **options, + ), + AsyncResponsesModel( + f"{model_id}-{effort.value}", + default_options={"reasoning_effort": effort}, + **options, + ), + ) class TruncationEnum(str, Enum): @@ -140,7 +154,13 @@ class _SharedResponses: key_env_var = "OPENAI_API_KEY" def __init__( - self, model_name, vision=False, streaming=True, schemas=True, reasoning=False + self, + model_name, + vision=False, + streaming=True, + schemas=True, + reasoning=False, + default_options=None, ): self.model_id = "openai/" + model_name streaming_suffix = "-streaming" @@ -162,6 +182,10 @@ def __init__( options.append(VisionOptions) if reasoning: options.append(ReasoningOptions) + self.option_defaults = default_options or {} + if self.option_defaults: + default_mixin = _option_defaults_mixin(options, self.option_defaults) + options.append(default_mixin) self.Options = combine_options(*options) def __str__(self): @@ -233,8 +257,11 @@ def _build_kwargs(self, prompt, conversation): "top_p", "store", "truncation", + "reasoning_effort", ): value = getattr(prompt.options, option, None) + if value is None: + value = self.option_defaults.get(option) if value is not None: kwargs[option] = value if self.supports_schema and prompt.schema: @@ -333,6 +360,19 @@ def _attachment(attachment, image_detail): } +def _option_defaults_mixin(mixins, defaults): + fields = {} + for name, value in defaults.items(): + for mixin in mixins: + if hasattr(mixin, "model_fields") and name in mixin.model_fields: + field_type = mixin.model_fields[name].annotation + fields[name] = (field_type, Field(default=value)) + break + if not fields: + return Options + return create_model("OptionDefaults", __base__=Options, **fields) + + def combine_options(*mixins): # reversed() here makes --options display order correct return create_model("CombinedOptions", __base__=tuple(reversed(mixins)))