Skip to content

Commit fa1457f

Browse files
jwesleyeclaude
andcommitted
Bump version to 1.3.2
- Add harmony.enabled config override (auto/yes/no) - Fix model detection for Strands agents (check model.config dict) - Update config wizard with harmony enabled option - Improve harmony processor initialization with config priority 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent 87ef86a commit fa1457f

8 files changed

Lines changed: 151 additions & 14 deletions

File tree

CHANGELOG.md

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,56 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [1.3.2] - 2024-12-24
11+
12+
### Added
13+
- **Harmony Config Override** - Manual control for harmony processing
14+
- New `harmony.enabled` config option with tri-state values (auto/yes/no)
15+
- `auto` (default) - Auto-detect harmony agents
16+
- `yes` - Force enable harmony processing for all agents
17+
- `no` - Disable harmony processing entirely
18+
- Added to config wizard with validation
19+
- Fixes issue where harmony wasn't activating despite being configured
20+
21+
### Fixed
22+
- **Model Detection for Strands Agents** - Correctly extract model metadata
23+
- Fixed "Unknown Model" display for Strands-based agents
24+
- Now checks `model.config` dict for Strands-style configuration
25+
- Properly extracts `model_id`, `max_tokens`, and `temperature` from config
26+
- Agent metadata now displays correctly for all Strands agents
27+
28+
### Changed
29+
- Harmony processor initialization now respects config override priority
30+
- Improved logging for harmony enablement (shows whether forced or auto-detected)
31+
32+
## [1.3.1] - 2024-12-24
33+
34+
### Fixed
35+
- **Package Metadata** - Removed setuptools deprecation warnings
36+
- Removed deprecated `license = "MIT"` table format
37+
- Removed deprecated "License :: OSI Approved :: MIT License" classifier
38+
- Added modern `license-files = ["LICENSE"]` reference
39+
- Added `maintainers` field
40+
- Fixes "invalid distribution" warning on Windows
41+
42+
## [1.3.0] - 2024-12-24
43+
44+
### Added
45+
- **OpenAI Harmony Support** - Full integration for gpt-oss models
46+
- Automatic detection of harmony-formatted agents
47+
- Specialized `HarmonyProcessor` for parsing structured responses
48+
- Multi-channel output support (reasoning, analysis, commentary, final)
49+
- Configurable detailed thinking mode with labeled prefixes
50+
- New `harmony.show_detailed_thinking` config option
51+
- Added to config wizard
52+
- Now a core dependency (openai-harmony>=0.0.8)
53+
54+
### Changed
55+
- **Python 3.9+ Required** - Upgraded from Python 3.8
56+
- Required by openai-harmony dependency (pydantic>=2.11.7)
57+
- Updated all documentation and classifiers
58+
- Modernized type annotations (Dict→dict, List→list, Tuple→tuple)
59+
1060
## [0.3.7] - 2025-10-20
1161

1262
### Fixed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "basic-agent-chat-loop"
7-
version = "1.3.1"
7+
version = "1.3.2"
88
description = "Feature-rich interactive CLI for AI agents with token tracking, prompt templates, aliases, and configuration"
99
readme = "README.md"
1010
requires-python = ">=3.9"

src/basic_agent_chat_loop/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
agent aliases, and extensive configuration options.
55
"""
66

7-
__version__ = "1.3.1"
7+
__version__ = "1.3.2"
88

99
from .chat_config import ChatConfig
1010
from .chat_loop import ChatLoop

src/basic_agent_chat_loop/chat_config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ class ChatConfig:
6565
"notification_sound": None, # Uses bundled notification.wav if None
6666
},
6767
"harmony": {
68+
# Enable harmony processing (auto-detects by default, set to True to force)
69+
"enabled": None, # None = auto-detect, True = force enable, False = disable
6870
# Show reasoning/analysis/commentary channels
6971
"show_detailed_thinking": False,
7072
},

src/basic_agent_chat_loop/chat_loop.py

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -596,7 +596,23 @@ def __init__(
596596

597597
# Setup Harmony processor if agent uses Harmony format
598598
self.harmony_processor = None
599-
if self.agent_metadata.get("uses_harmony", False):
599+
600+
# Check if harmony should be enabled
601+
# Priority: config override > auto-detection
602+
harmony_enabled_config = (
603+
self.config.get("harmony.enabled", None) if self.config else None
604+
)
605+
uses_harmony = self.agent_metadata.get("uses_harmony", False)
606+
607+
# Determine if harmony should be enabled
608+
# None = auto-detect, True = force enable, False = force disable
609+
should_enable_harmony = (
610+
harmony_enabled_config
611+
if harmony_enabled_config is not None
612+
else uses_harmony
613+
)
614+
615+
if should_enable_harmony:
600616
# Get detailed thinking config option
601617
show_detailed = (
602618
self.config.get("harmony.show_detailed_thinking", False)
@@ -606,9 +622,18 @@ def __init__(
606622
self.harmony_processor = HarmonyProcessor(
607623
show_detailed_thinking=show_detailed
608624
)
609-
logger.info(
610-
f"Harmony processor enabled (detailed_thinking={show_detailed})"
611-
)
625+
626+
# Log how harmony was enabled
627+
if harmony_enabled_config is True:
628+
logger.info(
629+
f"Harmony processor enabled via config override (detailed_thinking={show_detailed})"
630+
)
631+
elif harmony_enabled_config is False:
632+
logger.info("Harmony processor disabled via config override")
633+
else:
634+
logger.info(
635+
f"Harmony processor auto-detected (detailed_thinking={show_detailed})"
636+
)
612637

613638
def _extract_token_usage(self, response_obj) -> Optional[dict[str, int]]:
614639
"""

src/basic_agent_chat_loop/components/agent_loader.py

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -214,11 +214,18 @@ def extract_agent_metadata(agent: Any) -> dict[str, Any]:
214214

215215
# Try multiple attribute names for model ID
216216
model_id = None
217-
for attr in ["model_id", "model", "model_name", "_model_id", "name"]:
218-
if hasattr(model, attr):
219-
model_id = getattr(model, attr)
220-
if model_id and model_id != "Unknown":
221-
break
217+
218+
# Check for Strands-style config dict first
219+
if hasattr(model, "config") and isinstance(model.config, dict):
220+
model_id = model.config.get("model_id")
221+
222+
# Fall back to checking various attributes
223+
if not model_id:
224+
for attr in ["model_id", "model", "model_name", "_model_id", "name"]:
225+
if hasattr(model, attr):
226+
model_id = getattr(model, attr)
227+
if model_id and model_id != "Unknown":
228+
break
222229

223230
# Clean up model_id if it's a long AWS model string
224231
if model_id and isinstance(model_id, str):
@@ -238,8 +245,17 @@ def extract_agent_metadata(agent: Any) -> dict[str, Any]:
238245
model_id = "Claude Haiku"
239246

240247
metadata["model_id"] = model_id or "Unknown Model"
241-
metadata["max_tokens"] = getattr(model, "max_tokens", "Unknown")
242-
metadata["temperature"] = getattr(model, "temperature", "Unknown")
248+
249+
# Try to get max_tokens and temperature
250+
# Check config dict first (Strands-style), then attributes
251+
if hasattr(model, "config") and isinstance(model.config, dict):
252+
metadata["max_tokens"] = model.config.get("max_tokens", "Unknown")
253+
# Temperature might be in params dict within config
254+
params = model.config.get("params", {})
255+
metadata["temperature"] = params.get("temperature", "Unknown")
256+
else:
257+
metadata["max_tokens"] = getattr(model, "max_tokens", "Unknown")
258+
metadata["temperature"] = getattr(model, "temperature", "Unknown")
243259

244260
# Try to extract tools - check multiple attributes
245261
tools = None

src/basic_agent_chat_loop/components/config_wizard.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -518,6 +518,49 @@ def _configure_harmony(self):
518518

519519
self.config["harmony"] = {}
520520

521+
# enabled (tri-state: None/auto, True/force, False/disable)
522+
current_enabled = (
523+
self.current_config.get("harmony.enabled", None)
524+
if self.current_config
525+
else None
526+
)
527+
528+
# Convert tri-state to user-friendly options
529+
if current_enabled is None:
530+
default_enabled_str = "auto"
531+
elif current_enabled:
532+
default_enabled_str = "yes"
533+
else:
534+
default_enabled_str = "no"
535+
536+
# Prompt for choice with validation
537+
print(
538+
"Enable Harmony processing? (auto/yes/no)\n"
539+
" auto = Auto-detect harmony agents (default)\n"
540+
" yes = Force enable for all agents\n"
541+
" no = Disable harmony processing"
542+
)
543+
544+
enabled_response = None
545+
while enabled_response is None:
546+
response = self._prompt_string(
547+
f"Enter choice [auto/yes/no] (default: {default_enabled_str})",
548+
default=default_enabled_str,
549+
).lower()
550+
551+
if response in ["auto", "yes", "no"]:
552+
enabled_response = response
553+
else:
554+
print("Invalid choice. Please enter 'auto', 'yes', or 'no'.")
555+
556+
# Convert back to tri-state
557+
if enabled_response == "auto":
558+
self.config["harmony"]["enabled"] = None
559+
elif enabled_response == "yes":
560+
self.config["harmony"]["enabled"] = True
561+
else:
562+
self.config["harmony"]["enabled"] = False
563+
521564
# show_detailed_thinking
522565
current_show_detailed = (
523566
self.current_config.get("harmony.show_detailed_thinking", False)

tests/unit/test_config_wizard.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -730,7 +730,8 @@ def test_run_complete_flow(self, mock_write, mock_input, wizard):
730730
"y",
731731
"",
732732
# Harmony
733-
"",
733+
"", # enabled (auto/yes/no)
734+
"", # show_detailed_thinking
734735
# Behavior
735736
"",
736737
"",

0 commit comments

Comments
 (0)