Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions autobot-backend/chat_workflow/llm_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
from autobot_shared.http_client import get_http_client
from constants.model_constants import ModelConstants
from dependencies import global_config_manager
from extensions.base import HookContext
from extensions.hooks import HookPoint
from extensions.manager import get_extension_manager
from prompt_manager import get_language_instruction, get_prompt, resolve_language

from .models import WorkflowSession
Expand All @@ -27,6 +30,73 @@
_VALID_URL_SCHEMES = ("http://", "https://")


async def _emit_system_prompt_ready(system_prompt: str, session: Any) -> str:
"""Emit ON_SYSTEM_PROMPT_READY to registered extensions and return result.

Issue #3405: Fires after _get_system_prompt() so extensions can inspect or
rewrite the system prompt before it enters prompt assembly. If no extension
is registered for this hook the function is a no-op and the original prompt
is returned unchanged.

Args:
system_prompt: The assembled system prompt string.
session: WorkflowSession instance (passed as data["session"]).

Returns:
Possibly modified system prompt string.
"""
ctx = HookContext(
session_id=getattr(session, "session_id", ""),
data={"system_prompt": system_prompt, "session": session},
)
result = await get_extension_manager().invoke_with_transform(
HookPoint.SYSTEM_PROMPT_READY, ctx, "system_prompt"
)
if isinstance(result, str) and result != system_prompt:
logger.debug(
"[#3405] SYSTEM_PROMPT_READY modified system prompt (%d -> %d chars)",
len(system_prompt),
len(result),
)
return result
return system_prompt


async def _emit_full_prompt_ready(
prompt: str, llm_params: Dict[str, Any], context: Dict[str, Any]
) -> str:
"""Emit ON_FULL_PROMPT_READY to registered extensions and return result.

Issue #3405: Fires after _build_full_prompt() so extensions can append
dynamic content (e.g. infrastructure telemetry hints) before the prompt
is sent to the LLM. If no extension is registered for this hook the
function is a no-op and the original prompt is returned unchanged.

Args:
prompt: The fully assembled prompt string.
llm_params: Dict containing model/endpoint selection.
context: Arbitrary request-level context dict.

Returns:
Possibly modified full prompt string.
"""
ctx = HookContext(
session_id=context.get("session_id", ""),
data={"prompt": prompt, "llm_params": llm_params, "context": context},
)
result = await get_extension_manager().invoke_with_transform(
HookPoint.FULL_PROMPT_READY, ctx, "prompt"
)
if isinstance(result, str) and result != prompt:
logger.debug(
"[#3405] FULL_PROMPT_READY modified full prompt (%d -> %d chars)",
len(prompt),
len(result),
)
return result
return prompt


class LLMHandlerMixin:
"""Mixin for LLM interaction handling."""

Expand Down Expand Up @@ -310,6 +380,7 @@ async def _prepare_llm_request_params(
else:
ollama_endpoint = self._get_ollama_endpoint_for_model(selected_model)
system_prompt = self._get_system_prompt(language=language)
system_prompt = await _emit_system_prompt_ready(system_prompt, session)
conversation_context = self._build_conversation_context(session)

# Knowledge retrieval for RAG
Expand All @@ -324,6 +395,11 @@ async def _prepare_llm_request_params(
full_prompt = self._build_full_prompt(
system_prompt, knowledge_context, conversation_context, message
)
full_prompt = await _emit_full_prompt_ready(
full_prompt,
{"endpoint": ollama_endpoint, "model": selected_model},
{"session_id": session.session_id, "message": message},
)

logger.info(
"[ChatWorkflowManager] Making Ollama request to: %s", ollama_endpoint
Expand Down
195 changes: 195 additions & 0 deletions autobot-backend/chat_workflow/prompt_hooks_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
# AutoBot - AI-Powered Automation Platform
# Copyright (c) 2025 mrveiss
# Author: mrveiss
"""
Unit tests for Issue #3405 — ON_SYSTEM_PROMPT_READY and ON_FULL_PROMPT_READY
plugin hooks in the chat pipeline.

Tests verify:
1. New HookPoint enum members exist
2. ON_SYSTEM_PROMPT_READY fires with correct args and return value replaces prompt
3. ON_FULL_PROMPT_READY fires with correct args and return value replaces prompt
4. No-op when no extensions are registered for a hook
5. Extension errors do not crash the pipeline
"""

from typing import Optional
from unittest.mock import AsyncMock, patch

import pytest

from extensions.base import Extension, HookContext
from extensions.hooks import HookPoint
from extensions.manager import ExtensionManager, reset_extension_manager
from chat_workflow.llm_handler import _emit_system_prompt_ready, _emit_full_prompt_ready


class _SystemPromptWatcher(Extension):
"""Extension that records args and returns a modified system prompt."""

name = "test_system_prompt_watcher"

def __init__(self, return_value: Optional[str] = None) -> None:
self._return_value = return_value
self.captured_system_prompt: Optional[str] = None

async def on_system_prompt_ready(self, ctx: HookContext) -> Optional[str]:
self.captured_system_prompt = ctx.get("system_prompt")
return self._return_value


class _FullPromptWatcher(Extension):
"""Extension that records args and returns a modified full prompt."""

name = "test_full_prompt_watcher"

def __init__(self, return_value: Optional[str] = None) -> None:
self._return_value = return_value
self.captured_prompt: Optional[str] = None
self.captured_llm_params: Optional[dict] = None
self.captured_context: Optional[dict] = None

async def on_full_prompt_ready(self, ctx: HookContext) -> Optional[str]:
self.captured_prompt = ctx.get("prompt")
self.captured_llm_params = ctx.get("llm_params")
self.captured_context = ctx.get("context")
return self._return_value


class _ErrorExtension(Extension):
"""Extension that always raises an exception."""

name = "test_error_extension"

async def on_full_prompt_ready(self, ctx: HookContext) -> Optional[str]:
raise RuntimeError("simulated extension failure")


class _FakeSession:
session_id = "sess-test-001"
metadata: dict = {}


@pytest.fixture(autouse=True)
def reset_manager():
"""Ensure global ExtensionManager is reset between tests."""
reset_extension_manager()
yield
reset_extension_manager()


class TestNewHookPoints:
"""Verify the new HookPoint members are present."""

def test_on_system_prompt_ready_exists(self):
assert HookPoint.SYSTEM_PROMPT_READY is not None

def test_on_full_prompt_ready_exists(self):
assert HookPoint.FULL_PROMPT_READY is not None

def test_total_hook_count_increased(self):
# Original 22 hooks + 2 new ones = 24
assert len(HookPoint) == 24


class TestEmitSystemPromptReady:
"""Tests for _emit_system_prompt_ready helper."""

@pytest.mark.asyncio
async def test_noop_when_no_extension_registered(self):
"""Returns original prompt unchanged when no extension is registered."""
original = "You are AutoBot."
result = await _emit_system_prompt_ready(original, _FakeSession())
assert result == original

@pytest.mark.asyncio
async def test_extension_receives_correct_args(self):
"""Extension receives system_prompt and session via HookContext."""
watcher = _SystemPromptWatcher(return_value=None)
from extensions.manager import get_extension_manager

get_extension_manager().register(watcher)

original = "You are AutoBot."
session = _FakeSession()
await _emit_system_prompt_ready(original, session)

assert watcher.captured_system_prompt == original

@pytest.mark.asyncio
async def test_return_value_replaces_prompt(self):
"""A non-None str returned by extension replaces the system prompt."""
modified = "You are AutoBot [modified by extension]."
watcher = _SystemPromptWatcher(return_value=modified)
from extensions.manager import get_extension_manager

get_extension_manager().register(watcher)

result = await _emit_system_prompt_ready("You are AutoBot.", _FakeSession())
assert result == modified

@pytest.mark.asyncio
async def test_none_return_keeps_original(self):
"""Returning None from extension keeps the original prompt."""
watcher = _SystemPromptWatcher(return_value=None)
from extensions.manager import get_extension_manager

get_extension_manager().register(watcher)

original = "You are AutoBot."
result = await _emit_system_prompt_ready(original, _FakeSession())
assert result == original


class TestEmitFullPromptReady:
"""Tests for _emit_full_prompt_ready helper."""

@pytest.mark.asyncio
async def test_noop_when_no_extension_registered(self):
"""Returns original prompt unchanged when no extension is registered."""
original = "System prompt\n\nUser: hello\n\nAssistant:"
result = await _emit_full_prompt_ready(original, {}, {})
assert result == original

@pytest.mark.asyncio
async def test_extension_receives_correct_args(self):
"""Extension receives prompt, llm_params and context via HookContext."""
watcher = _FullPromptWatcher(return_value=None)
from extensions.manager import get_extension_manager

get_extension_manager().register(watcher)

original = "System prompt\n\nUser: hello\n\nAssistant:"
llm_params = {"model": "llama3", "endpoint": "http://localhost:11434/api/generate"}
context = {"session_id": "sess-abc", "message": "hello"}

await _emit_full_prompt_ready(original, llm_params, context)

assert watcher.captured_prompt == original
assert watcher.captured_llm_params == llm_params
assert watcher.captured_context == context

@pytest.mark.asyncio
async def test_return_value_replaces_prompt(self):
"""A non-None str returned by extension replaces the full prompt."""
modified = "System prompt\n\nUser: hello\n\nAssistant:\n\n[hint: be concise]"
watcher = _FullPromptWatcher(return_value=modified)
from extensions.manager import get_extension_manager

get_extension_manager().register(watcher)

result = await _emit_full_prompt_ready(
"System prompt\n\nUser: hello\n\nAssistant:", {}, {}
)
assert result == modified

@pytest.mark.asyncio
async def test_extension_error_does_not_crash_pipeline(self):
"""An exception inside an extension is swallowed; original prompt is returned."""
from extensions.manager import get_extension_manager

get_extension_manager().register(_ErrorExtension())

original = "System prompt\n\nUser: hello\n\nAssistant:"
result = await _emit_full_prompt_ready(original, {}, {})
assert result == original
34 changes: 34 additions & 0 deletions autobot-backend/extensions/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,40 @@ async def on_approval_received(self, ctx: HookContext) -> Optional[None]:
None (logging only)
"""

# ========== Prompt Pipeline Hooks (Issue #3405) ==========

async def on_system_prompt_ready(self, ctx: HookContext) -> Optional[str]:
"""
Called after the system prompt is built.

Receives the assembled system prompt in ctx.data["system_prompt"] and
ctx.data["session"] for session metadata. Return a non-None str to
replace the system prompt; return None to leave it unchanged.

Args:
ctx: Hook context with data["system_prompt"] and data["session"]

Returns:
Modified system prompt str or None to keep unchanged
"""

async def on_full_prompt_ready(self, ctx: HookContext) -> Optional[str]:
"""
Called after the full prompt (system + knowledge + conversation) is built.

Receives the full prompt in ctx.data["prompt"], LLM parameters in
ctx.data["llm_params"], and request context in ctx.data["context"].
Return a non-None str to replace the full prompt; return None to keep it
unchanged.

Args:
ctx: Hook context with data["prompt"], data["llm_params"],
data["context"]

Returns:
Modified full prompt str or None to keep unchanged
"""

# ========== Utility Methods ==========

def __repr__(self) -> str:
Expand Down
9 changes: 7 additions & 2 deletions autobot-backend/extensions/extension_hooks_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ class TestHookPoint:
"""Test HookPoint enum definitions."""

def test_hook_count(self):
"""Should have exactly 22 hook points."""
assert len(HookPoint) == 22
"""Should have exactly 24 hook points (22 original + 2 added in #3405)."""
assert len(HookPoint) == 24

def test_message_preparation_hooks(self):
"""Should have message preparation hooks."""
Expand Down Expand Up @@ -82,6 +82,11 @@ def test_approval_hooks(self):
assert HookPoint.APPROVAL_REQUIRED is not None
assert HookPoint.APPROVAL_RECEIVED is not None

def test_prompt_pipeline_hooks(self):
"""Should have prompt pipeline hooks added in Issue #3405."""
assert HookPoint.SYSTEM_PROMPT_READY is not None
assert HookPoint.FULL_PROMPT_READY is not None

def test_hook_metadata_exists(self):
"""Every hook should have metadata."""
for hook in HookPoint:
Expand Down
14 changes: 14 additions & 0 deletions autobot-backend/extensions/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ class HookPoint(Enum):
APPROVAL_REQUIRED = auto() # Method: on_approval_required
APPROVAL_RECEIVED = auto() # Method: on_approval_received

# Prompt pipeline — Issue #3405
SYSTEM_PROMPT_READY = auto() # Method: on_system_prompt_ready
FULL_PROMPT_READY = auto() # Method: on_full_prompt_ready


# Hook metadata for documentation and validation
HOOK_METADATA = {
Expand Down Expand Up @@ -189,6 +193,16 @@ class HookPoint(Enum):
"can_modify": [],
"return_type": "None (logging only)",
},
HookPoint.SYSTEM_PROMPT_READY: {
"description": "Called after the system prompt is built; return a str to replace it",
"can_modify": ["system_prompt"],
"return_type": "Modified system prompt str or None",
},
HookPoint.FULL_PROMPT_READY: {
"description": "Called after the full prompt is assembled; return a str to replace it",
"can_modify": ["prompt"],
"return_type": "Modified full prompt str or None",
},
}


Expand Down
Loading
Loading