Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ SLACK_CLIENT_SECRET=...
# Maximum reasoning iterations before responding
MAX_REASONING_ITERATIONS=5

# Confidence threshold for auto-reply (0.0 - 1.0)
# threshold for auto-reply (0.0 - 1.0)
# Below this, agent will escalate or ask for clarification
CONFIDENCE_THRESHOLD_FOR_AUTO_REPLY=0.75
_THRESHOLD_FOR_AUTO_REPLY=0.75

# Enable multi-iteration deep reasoning
ENABLE_DEEP_REASONING=true
Expand Down
44 changes: 44 additions & 0 deletions .github/workflows/claude-code-review.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
name: Claude Code Review

on:
pull_request:
types: [opened, synchronize, ready_for_review, reopened]
# Optional: Only run on specific file changes
# paths:
# - "src/**/*.ts"
# - "src/**/*.tsx"
# - "src/**/*.js"
# - "src/**/*.jsx"

jobs:
claude-review:
# Optional: Filter by PR author
# if: |
# github.event.pull_request.user.login == 'external-contributor' ||
# github.event.pull_request.user.login == 'new-developer' ||
# github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'

runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write

steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1

- name: Run Claude Code Review
id: claude-review
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
plugin_marketplaces: 'https://github.com/anthropics/claude-code.git'
plugins: 'code-review@claude-code-plugins'
prompt: '/code-review:code-review ${{ github.repository }}/pull/${{ github.event.pull_request.number }}'
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
# or https://code.claude.com/docs/en/cli-reference for available options

36 changes: 36 additions & 0 deletions .github/workflows/claude.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]

jobs:
claude:
if: |
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
issues: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
model: stepfun/step-3.5-flash:free
env:
ANTHROPIC_BASE_URL: ${{ secrets.ANTHROPIC_BASE_URL }}
Binary file modified agent/__pycache__/__init__.cpython-311.pyc
Binary file not shown.
Binary file modified agent/__pycache__/config.cpython-311.pyc
Binary file not shown.
28 changes: 10 additions & 18 deletions agent/cli_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
from agent.state import create_initial_state
from agent.cli_graph import get_cli_agent_graph
from agent.github_repo_tracker import GitHubRepoTracker
from agent.codebase_search import CodebaseSearchEngine

# Configure Rich console
console = Console(width=120)
Expand Down Expand Up @@ -337,19 +336,17 @@ def print_files(self, files: List[Any]):

for i, file in enumerate(files[:10], 1): # Show top 10
self.console.print(f"\n[bold cyan]{i}.[/bold cyan] [yellow]{file.path}[/yellow]")
self.console.print(f" [dim]Source: {file.source} | Language: {file.language} | Score: {len(file.matches)}[/dim]")
self.console.print(f" [dim]Matches: {len(file.matches)}[/dim]")
self.console.print(f" [dim]Why: {file.retrieval_reason}[/dim]")

if file.matches:
self.console.print(f" [dim]Matches:[/dim]")
# Show more matches for directory structure files
max_matches = 20 if file.language == "directory" else 3
for match in file.matches[:max_matches]:
for match in file.matches[:3]:
display_match = match[:100] if len(match) > 100 else match
self.console.print(f" [gray]• {display_match}[/gray]")

# Show content preview for small files
if file.content and file.language != "directory" and len(file.content) < 2000:
if file.content and len(file.content) < 2000:
self.console.print(f" [dim]Content preview:[/dim]")
self.console.print(f" [gray]{file.content[:500]}[/gray]")

Expand All @@ -360,17 +357,15 @@ def print_research_summary(self, state: Dict[str, Any]):
"""Print research summary after agent processing."""
thinking_log = state.get("thinking_log", [])
research_files = state.get("research_files", [])
confidence = state.get("research_confidence", 0)


# Print thinking log
if thinking_log:
self.console.print("\n")
for entry in thinking_log:
# Extract the thinking part after "Iteration X:"
if ": " in entry:
thinking = entry.split(": ", 1)[1]
self.print_thinking(thinking)

# Print files
if research_files:
self.console.print("\n")
Expand All @@ -379,9 +374,8 @@ def print_research_summary(self, state: Dict[str, Any]):
# Print summary
self.console.print(f"\n[bold green]✓ Research Complete[/bold green]")
self.console.print(
f" [dim]Iterations: {state.get('research_iterations', 0)} | "
f"Files: {len(research_files)} | "
f"Confidence: {confidence:.1%}[/dim]\n"
f" [dim]Iterations: {len(state.get('search_history') or [])} | "
f"Files: {len(research_files)}[/dim]\n"
)

def process_message(self, message: str) -> Optional[str]:
Expand Down Expand Up @@ -454,12 +448,10 @@ def make_display() -> Any:

# Short summary; full thinking/files available via 'thinking' and 'files' commands
research_files = result.get("research_files", [])
confidence = result.get("research_confidence", 0)
self.console.print(f"\n[bold green]✓ Research complete[/bold green]")
self.console.print(
f" [dim]Iterations: {result.get('research_iterations', 0)} | "
f"Files: {len(research_files)} | "
f"Confidence: {confidence:.1%}[/dim]"
f" [dim]Iterations: {len(result.get('search_history') or [])} | "
f"Files: {len(research_files)}[/dim]"
)
self.console.print(
"[dim]Type [cyan]thinking[/cyan] or [cyan]files[/cyan] for full details.[/dim]\n"
Expand Down
58 changes: 23 additions & 35 deletions agent/cli_graph.py
Original file line number Diff line number Diff line change
@@ -1,74 +1,62 @@
"""
LangGraph workflow for CLI mode.

Simplified topology without Slack dependencies:
cli_context_builder → olake_context_summariser → (needs_codebase_search? deep_researcher → relevance_filter : direct) → solution → END
Topology:
cli_context_builder → gate_filter → deep_researcher → solution → END

Note: gate_filter's Slack reply is silently skipped in CLI mode (no Slack credentials).
Blocked/non-actionable messages still reach END without a solution node call.
"""

from langgraph.graph import StateGraph, END
from typing import Literal

from agent.state import ConversationState
from agent.nodes.olake_context_summariser import summarise_olake_context
from agent.nodes.gate_filter import gate_filter
from agent.nodes.deep_researcher import deep_researcher
from agent.nodes.cli import (
build_cli_context,
cli_solution_provider,
cli_relevance_filter,
)
from agent.nodes.cli import build_cli_context, cli_solution_provider
from agent.logger import get_logger


def route_after_summariser(
def route_after_gate(
state: ConversationState,
) -> Literal["deep_researcher", "solution"]:
"""If question needs codebase search go to deep_researcher, else directly to solution."""
if state.get("needs_codebase_search", True):
return "deep_researcher"
return "solution"
) -> Literal["deep_researcher", "__end__"]:
if state.get("is_harmful") or not state.get("is_relevant", True):
return "__end__"
if not state.get("is_actionable", True):
return "__end__"
return "deep_researcher"


# ---------------------------------------------------------------------------
# Graph factory
# ---------------------------------------------------------------------------

def create_cli_agent_graph() -> StateGraph:
"""
Build and compile the LangGraph agent workflow for CLI mode.

Node sequence:
cli_context_builder → olake_context_summariser → (deep_researcher → relevance_filter) or direct → solution
"""
logger = get_logger()
logger.logger.info("Creating CLI agent graph...")

workflow = StateGraph(ConversationState)

# ── Nodes ────────────────────────────────────────────────────────────
workflow.add_node("cli_context_builder", build_cli_context)
workflow.add_node("olake_context_summariser", summarise_olake_context)
workflow.add_node("deep_researcher", deep_researcher)
workflow.add_node("relevance_filter", cli_relevance_filter)
workflow.add_node("solution", cli_solution_provider)
workflow.add_node("gate_filter", gate_filter)
workflow.add_node("deep_researcher", deep_researcher)
workflow.add_node("solution", cli_solution_provider)

# ── Entry ─────────────────────────────────────────────────────────────
workflow.set_entry_point("cli_context_builder")

workflow.add_edge("cli_context_builder", "olake_context_summariser")
workflow.add_edge("cli_context_builder", "gate_filter")
workflow.add_conditional_edges(
"olake_context_summariser",
route_after_summariser,
"gate_filter",
route_after_gate,
{
"deep_researcher": "deep_researcher",
"solution": "solution",
"__end__": END,
},
)

# After research: filter relevance, then to solution
workflow.add_edge("deep_researcher", "relevance_filter")
workflow.add_edge("relevance_filter", "solution")
workflow.add_edge("deep_researcher", "solution")
workflow.add_edge("solution", END)

compiled = workflow.compile()
logger.logger.info("CLI agent graph created successfully")
return compiled
Expand Down
Loading