-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathpermission_agent.py
More file actions
106 lines (87 loc) · 3.58 KB
/
permission_agent.py
File metadata and controls
106 lines (87 loc) · 3.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
"""Example demonstrating PermissionComponent and PermissionSystem with dual-mode LLM model.
This script shows how to restrict an agent's access to specific tools using
a whitelist/blacklist policy. Supports both FakeModel (no API key) and
OpenAIModel (with LLM_API_KEY) for flexible testing.
"""
import asyncio
import os
from ecs_agent.core import World, Runner
from ecs_agent.components import (
LLMComponent,
ConversationComponent,
PermissionComponent,
PendingToolCallsComponent,
ToolRegistryComponent,
)
from ecs_agent.providers import FakeModel, Model
from ecs_agent.providers.config import ApiFormat
from ecs_agent.systems.permission import PermissionSystem
from ecs_agent.systems.tool_execution import ToolExecutionSystem
from ecs_agent.types import CompletionResult, Message, ToolCall, ToolSchema
async def main() -> None:
# --- Read environment variables for LLM model ---
api_key = os.environ.get("LLM_API_KEY", "")
base_url = os.environ.get(
"LLM_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"
)
model = os.environ.get("LLM_MODEL", "qwen3.5-flash")
# --- Create LLM model ---
if api_key:
print(f"Using model: {model}")
print(f"Base URL: {base_url}")
model = Model(model, base_url=base_url, api_key=api_key, api_format=ApiFormat.OPENAI_CHAT_COMPLETIONS)
else:
print("No LLM_API_KEY provided. Using FakeModel for demonstration.")
print("To use a real API, set LLM_API_KEY, LLM_BASE_URL, and LLM_MODEL.")
print()
model = FakeModel([])
world = World()
# 1. Register tools in the registry
async def safe_tool(**kwargs) -> str:
return "Safe operation successful"
async def dangerous_tool(**kwargs) -> str:
return "Dangerous operation successful"
tools = {
"safe_tool": ToolSchema(
name="safe_tool", description="A safe tool", parameters={}
),
"dangerous_tool": ToolSchema(
name="dangerous_tool", description="A dangerous tool", parameters={}
),
}
handlers = {"safe_tool": safe_tool, "dangerous_tool": dangerous_tool}
# 2. Create agent with permissions
agent = world.create_entity()
world.add_component(agent, ToolRegistryComponent(tools=tools, handlers=handlers))
# Deny 'dangerous_tool' explicitly
world.add_component(agent, PermissionComponent(denied_tools=["dangerous_tool"]))
# Setup conversation and LLM
world.add_component(agent, LLMComponent(model=model,
))
world.add_component(agent, ConversationComponent(messages=[]))
# 3. Register PermissionSystem (priority -10) and ToolExecutionSystem (priority 5)
world.register_system(PermissionSystem(priority=-10), priority=-10)
world.register_system(ToolExecutionSystem(priority=5), priority=5)
# 4. Attempt to call both tools
print("Attempting to call 'safe_tool' and 'dangerous_tool'...")
world.add_component(
agent,
PendingToolCallsComponent(
tool_calls=[
ToolCall(id="c1", name="safe_tool", arguments={}),
ToolCall(id="c2", name="dangerous_tool", arguments={}),
]
),
)
# Run one tick
runner = Runner()
await runner.run(world, max_ticks=1)
# 5. Verify results
conv = world.get_component(agent, ConversationComponent)
if conv:
print("\nConversation History:")
for msg in conv.messages:
status = "ALLOWED" if "denied" not in msg.content else "DENIED"
print(f"[{msg.role}] {status}: {msg.content}")
if __name__ == "__main__":
asyncio.run(main())