From 9a12918e5ec4d25749b0dfe60774251f2f44007c Mon Sep 17 00:00:00 2001 From: Sergio Barrio Date: Thu, 5 Mar 2026 17:00:06 +0100 Subject: [PATCH] Make Issue Notification action read OPENAI_MODL env var and update default value --- .github/workflows/issue_notification.yml | 3 +++ tools/issue_handler/README.md | 8 ++++---- tools/issue_handler/setup_env.sh | 4 ++-- tools/issue_handler/src/openai_handler.py | 8 ++++---- tools/issue_handler/tests/test_openai_handler.py | 2 +- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.github/workflows/issue_notification.yml b/.github/workflows/issue_notification.yml index 41731149b..55e3f92dc 100644 --- a/.github/workflows/issue_notification.yml +++ b/.github/workflows/issue_notification.yml @@ -47,6 +47,9 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} OPENAI_TOKEN: ${{ secrets.OPENAI_TOKEN }} OPENAI_SYSTEM_PROMPT: ${{ vars.OPENAI_SYSTEM_PROMPT }} + OPENAI_MODEL: ${{ vars.OPENAI_MODEL }} + OPENAI_TEMPERATURE: ${{ vars.OPENAI_TEMPERATURE }} + OPENAI_MAX_COMPLETION_TOKENS: ${{ vars.OPENAI_MAX_COMPLETION_TOKENS }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} GITHUB_REPOSITORY: ${{ github.repository }} diff --git a/tools/issue_handler/README.md b/tools/issue_handler/README.md index 174ddb1b6..0c01133f9 100644 --- a/tools/issue_handler/README.md +++ b/tools/issue_handler/README.md @@ -47,9 +47,9 @@ This creates a `.env` file that you will need to fill with the required tokens a - `GITHUB_REPOSITORY` - Repository in format `owner/repo` **Optional variables** (override defaults): -- `OPENAI_MODEL` - Model to use (default: `chatgpt-4o-latest`) +- `OPENAI_MODEL` - Model to use (default: `gpt-5.2-chat-latest`) - `OPENAI_TEMPERATURE` - Response creativity 0.0-1.0 (default: `0.4`) -- `OPENAI_MAX_RESPONSE_TOKENS` - Max response length (default: `500`) +- `OPENAI_MAX_COMPLETION_TOKENS` - Max response length (default: `500`) ## Usage @@ -80,9 +80,9 @@ The tool runs: - `OPENAI_SYSTEM_PROMPT` - OpenAI analysis prompt (stored as variable for easier updates) **Optional GitHub Variables** (override defaults if needed): -- `OPENAI_MODEL` - Model to use (default: `chatgpt-4o-latest`) +- `OPENAI_MODEL` - Model to use (default: `gpt-5.2-chat-latest`) - `OPENAI_TEMPERATURE` - Response creativity 0.0-1.0 (default: `0.4`) -- `OPENAI_MAX_RESPONSE_TOKENS` - Max response length (default: `500`) +- `OPENAI_MAX_COMPLETION_TOKENS` - Max response length (default: `500`) **Automatically Provided**: - `GITHUB_TOKEN` - Provided by GitHub Actions diff --git a/tools/issue_handler/setup_env.sh b/tools/issue_handler/setup_env.sh index 3744a0351..bfb265207 100755 --- a/tools/issue_handler/setup_env.sh +++ b/tools/issue_handler/setup_env.sh @@ -30,9 +30,9 @@ SLACK_CHANNEL_ID= GITHUB_REPOSITORY=DataDog/dd-sdk-ios # Optional: Override OpenAI defaults -# OPENAI_MODEL=chatgpt-4o-latest +# OPENAI_MODEL=gpt-5.2-chat-latest # OPENAI_TEMPERATURE=0.4 -# OPENAI_MAX_RESPONSE_TOKENS=500 +# OPENAI_MAX_COMPLETION_TOKENS=500 EOL echo "✨ Created .env file" diff --git a/tools/issue_handler/src/openai_handler.py b/tools/issue_handler/src/openai_handler.py index be3bd267a..5a659bd3b 100644 --- a/tools/issue_handler/src/openai_handler.py +++ b/tools/issue_handler/src/openai_handler.py @@ -54,7 +54,7 @@ class OpenAIHandler: # Content limits to prevent abuse MAX_CONTENT_LENGTH = 4000 - MAX_RESPONSE_TOKENS = int(os.environ.get("OPENAI_MAX_RESPONSE_TOKENS", "500")) + MAX_COMPLETION_TOKENS = int(os.environ.get("OPENAI_MAX_COMPLETION_TOKENS", "500")) def __init__(self, api_key: str): """ @@ -71,7 +71,7 @@ def __init__(self, api_key: str): raise EnvironmentError("OPENAI_SYSTEM_PROMPT environment variable must be set") # Model can be overridden via env - self.model = os.environ.get("OPENAI_MODEL", "chatgpt-4o-latest") + self.model = os.environ.get("OPENAI_MODEL", "gpt-5.2-chat-latest") def analyze_issue(self, issue: GithubIssue) -> AnalysisResult: """ @@ -117,8 +117,8 @@ def analyze_issue(self, issue: GithubIssue) -> AnalysisResult: response = self.client.chat.completions.create( model=self.model, messages=messages, - temperature=float(os.environ.get("OPENAI_TEMPERATURE", "0.4")), - max_tokens=self.MAX_RESPONSE_TOKENS, + # temperature=float(os.environ.get("OPENAI_TEMPERATURE", "0.4")), + max_completion_tokens=self.MAX_COMPLETION_TOKENS, response_format={"type": "json_object"} ) diff --git a/tools/issue_handler/tests/test_openai_handler.py b/tools/issue_handler/tests/test_openai_handler.py index 6a8d0c001..5f92dd825 100644 --- a/tools/issue_handler/tests/test_openai_handler.py +++ b/tools/issue_handler/tests/test_openai_handler.py @@ -138,7 +138,7 @@ def test_analyze_issue_success(self, mock_openai): # Verify OpenAI call mock_client.chat.completions.create.assert_called_once() call_args = mock_client.chat.completions.create.call_args - assert call_args[1]["max_tokens"] == 500 + assert call_args[1]["max_completion_tokens"] == 500 assert call_args[1]["response_format"] == {"type": "json_object"} @patch('src.openai_handler.openai.OpenAI')