diff --git a/llm_openai.py b/llm_openai.py index 4013c31..80ee5fb 100644 --- a/llm_openai.py +++ b/llm_openai.py @@ -249,8 +249,6 @@ def _build_messages(self, prompt, conversation): "arguments": json.dumps(tool_call.arguments), } ) - if prompt.system and prompt.system != current_system: - messages.append({"role": "system", "content": prompt.system}) if not prompt.attachments: messages.append({"role": "user", "content": prompt.prompt or ""}) else: @@ -275,6 +273,8 @@ def _build_messages(self, prompt, conversation): def _build_kwargs(self, prompt, conversation): messages = self._build_messages(prompt, conversation) kwargs = {"model": self.model_name, "input": messages} + if prompt.system: + kwargs["instructions"] = prompt.system for option in ( "max_output_tokens", "temperature", diff --git a/tests/test_openai.py b/tests/test_openai.py index bb83185..265eb88 100644 --- a/tests/test_openai.py +++ b/tests/test_openai.py @@ -76,3 +76,23 @@ def simple_tool(number): ) output = chain_response.text() assert output == snapshot + + +def test_instructions_in_kwargs(): + """Test that system prompt is sent as instructions parameter, not as a system message.""" + model = llm.get_model("openai/gpt-4o-mini") + prompt = llm.Prompt( + model=model, + prompt="say hi", + system="You are a friendly assistant who speaks like a pirate", + options=model.Options() + ) + kwargs = model._build_kwargs(prompt, conversation=None) + + # Verify instructions parameter is set + assert kwargs["instructions"] == "You are a friendly assistant who speaks like a pirate" + + # Verify no system message in input + for item in kwargs["input"]: + if isinstance(item, dict): + assert item.get("role") != "system"