-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_coordinator.py
More file actions
121 lines (101 loc) · 3.6 KB
/
test_coordinator.py
File metadata and controls
121 lines (101 loc) · 3.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#!/usr/bin/env python3
"""Test the Coordinator with multiple agents."""
import asyncio
from codecourt.config import settings
from codecourt.tools import parse_diff
from codecourt.agents import Coordinator
# Sample diff with both style and security issues
SAMPLE_DIFF = """diff --git a/src/api.py b/src/api.py
--- a/src/api.py
+++ b/src/api.py
@@ -10,6 +10,20 @@ from flask import Flask, request
app = Flask(__name__)
+API_KEY = "sk-secret-key-12345" # Security: hardcoded secret
+
+@app.route("/user/<user_id>")
+def get_user(user_id):
+ query = f"SELECT * FROM users WHERE id = {user_id}"
+ result = db.execute(query)
+ # Style: inconsistent return
+ if result:
+ return result
+ else:
+ return None
"""
print("=" * 60)
print("COORDINATOR TEST - Multi-Agent Review")
print("=" * 60)
print()
print("This runs BOTH CodeReviewer AND SecurityAgent in parallel,")
print("then combines their findings.")
print()
async def test_coordinator():
parsed = parse_diff(SAMPLE_DIFF)
print(f"Files: {parsed.changed_files}")
print(f"Lines added: {parsed.total_additions}")
print()
# Determine provider (checks .env file)
if settings.openai_api_key:
print("Using OpenAI (gpt-4o-mini) - key loaded from .env")
coordinator = Coordinator.with_default_agents(
provider_name="openai",
model="gpt-4o-mini"
)
else:
print("Using Ollama (local)...")
print("(Add OPENAI_API_KEY to .env for better results)")
coordinator = Coordinator.with_default_agents(provider_name="ollama")
print(f"Agents: {coordinator.agent_names}")
print(f"Parallel mode: {coordinator.parallel}")
print()
print("🔍 Running coordinated review...")
print()
result = await coordinator.review(parsed)
print("=" * 60)
print("COORDINATED REVIEW RESULTS")
print("=" * 60)
print()
print(f"Total agents: {result.total_agents}")
print(f"Agents approving: {result.agents_approving}")
print(f"Agents requesting changes: {result.agents_requesting_changes}")
print()
print(f"Overall approval: {result.approval.value}")
print(f"Average confidence: {result.confidence:.2f}")
print()
print(f"Summary: {result.summary}")
print()
# Show per-agent results
print("-" * 60)
print("Per-Agent Results:")
print("-" * 60)
for agent_name, agent_result in result.agent_results.items():
print(f"\n📋 {agent_name}:")
print(f" Approval: {agent_result.approval.value}")
print(f" Findings: {len(agent_result.findings)}")
print(f" Summary: {agent_result.summary[:100]}...")
# Show combined findings
print()
print("-" * 60)
print(f"Combined Findings ({len(result.findings)} total, deduplicated):")
print("-" * 60)
if result.findings:
for i, finding in enumerate(result.findings, 1):
severity_icon = {
"critical": "🔴",
"error": "🟠",
"warning": "🟡",
"info": "🔵",
"praise": "🟢",
}.get(finding.severity.value, "⚪")
print(f"\n{severity_icon} {i}. [{finding.severity.value.upper()}] {finding.category.value}")
print(f" File: {finding.file}:{finding.line}")
print(f" Issue: {finding.message}")
if finding.suggestion:
print(f" Fix: {finding.suggestion}")
else:
print("\nNo findings (agents may not have returned valid JSON)")
print()
print("=" * 60)
print("✓ Coordinated review complete!")
print("=" * 60)
asyncio.run(test_coordinator())