-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathecho_api_server_sdk.py
More file actions
265 lines (221 loc) · 8.41 KB
/
echo_api_server_sdk.py
File metadata and controls
265 lines (221 loc) · 8.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
#!/usr/bin/env python3
"""
Echo API Server - SDK Version
OpenAI-compatible API server for Echo CLM System using the clean SDK
"""
import asyncio
import hashlib
import uuid
import time
from typing import Dict, List, Optional, Any
from datetime import datetime
import logging
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
# Import the clean CLM SDK
from clm_sdk import create_echo, CLMSystem
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('api_server.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# API Models
class ChatMessage(BaseModel):
role: str
content: str
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = 0.7
max_tokens: Optional[int] = 1000
stream: Optional[bool] = False
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[Dict[str, Any]]
usage: Dict[str, int]
# FastAPI app
app = FastAPI(title="Echo CLM API", version="1.0.0")
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Session management for multi-turn conversations
active_sessions: Dict[str, CLMSystem] = {}
def generate_session_id(messages: List[ChatMessage]) -> str:
"""Generate consistent session ID from first user message"""
user_messages = [msg for msg in messages if msg.role == "user"]
if user_messages:
# Use first user message as session key
first_message = user_messages[0].content[:200] # First 200 chars
session_hash = hashlib.md5(first_message.encode()).hexdigest()[:16]
return f"session_{session_hash}"
else:
return f"session_{uuid.uuid4().hex[:16]}"
def get_or_create_session(session_id: str) -> CLMSystem:
"""Get existing session or create new one"""
global active_sessions
if session_id not in active_sessions:
logger.info(f"Creating new Echo CLM session: {session_id}")
active_sessions[session_id] = create_echo() # Use SDK
else:
logger.info(f"Using existing session: {session_id}")
return active_sessions[session_id]
def cleanup_old_sessions():
"""Placeholder for future session cleanup logic"""
# Could implement timeout-based cleanup here
pass
# API Endpoints
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy", "service": "Echo CLM API", "timestamp": datetime.now().isoformat()}
@app.get("/status")
async def status():
"""Get API status and session information"""
return {
"service": "Echo CLM API",
"version": "1.0.0",
"active_sessions": len(active_sessions),
"timestamp": datetime.now().isoformat()
}
@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest) -> ChatCompletionResponse:
"""Create chat completion using Echo CLM system"""
try:
# Generate session ID for conversation continuity
session_id = generate_session_id(request.messages)
session_clm = get_or_create_session(session_id)
# Extract user input from the last message
user_messages = [msg for msg in request.messages if msg.role == "user"]
if not user_messages:
raise HTTPException(status_code=400, detail="No user messages found")
user_input = user_messages[-1].content
# Calculate conversation metrics
conversation_length = len(user_messages)
is_first_turn = conversation_length == 1
if not is_first_turn:
logger.info(f"Continuing conversation in session {session_id} (turn {conversation_length})")
# The CLMSystem's internal conversation_history handles context
else:
logger.info(f"Starting new conversation in session {session_id}")
# Process input through Echo CLM system
logger.info(f"Processing user input (session {session_id}, turn {conversation_length}): {user_input[:100]}...")
start_time = time.time()
result = await session_clm.process_input(user_input)
processing_time = time.time() - start_time
logger.info(f"CLM processing completed in {processing_time:.2f}s")
# Create OpenAI-compatible response
response = ChatCompletionResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
created=int(time.time()),
model=request.model,
choices=[
{
"index": 0,
"message": {
"role": "assistant",
"content": result["consciousness_response"]
},
"finish_reason": "stop"
}
],
usage={
"prompt_tokens": len(user_input.split()),
"completion_tokens": len(result["consciousness_response"].split()),
"total_tokens": len(user_input.split()) + len(result["consciousness_response"].split())
}
)
logger.info(f"Response sent (session {session_id}, quality: {result.get('response_quality', 'N/A'):.2f})")
return response
except Exception as e:
logger.error(f"Error in chat completion: {e}")
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
@app.get("/v1/models")
async def list_models():
"""List available models (OpenAI compatibility)"""
return {
"object": "list",
"data": [
{
"id": "echo",
"object": "model",
"created": int(time.time()),
"owned_by": "echo-clm",
"permission": [],
"root": "echo",
"parent": None
}
]
}
# Session management endpoints (for debugging/monitoring)
@app.get("/sessions")
async def list_sessions():
"""List active sessions (debugging endpoint)"""
session_info = {}
for session_id, clm_system in active_sessions.items():
status = clm_system.get_system_status()
session_info[session_id] = {
"turns": status["conversation_turns"],
"avg_quality": status["avg_response_quality"],
"memory_count": status["brain_memory_count"],
"evolution_metrics": status["evolution_metrics"]
}
return {
"active_sessions": len(active_sessions),
"sessions": session_info
}
@app.get("/sessions/{session_id}")
async def get_session_status(session_id: str):
"""Get specific session status"""
if session_id not in active_sessions:
raise HTTPException(status_code=404, detail="Session not found")
clm_system = active_sessions[session_id]
return clm_system.get_system_status()
@app.delete("/sessions/{session_id}")
async def delete_session(session_id: str):
"""Delete a specific session"""
if session_id not in active_sessions:
raise HTTPException(status_code=404, detail="Session not found")
del active_sessions[session_id]
logger.info(f"Deleted session: {session_id}")
return {"message": f"Session {session_id} deleted"}
@app.post("/sessions/{session_id}/introspect")
async def session_introspect(session_id: str):
"""Trigger introspection for a specific session"""
if session_id not in active_sessions:
raise HTTPException(status_code=404, detail="Session not found")
clm_system = active_sessions[session_id]
reflection = await clm_system.introspect()
return {
"session_id": session_id,
"introspection": reflection,
"timestamp": datetime.now().isoformat()
}
if __name__ == "__main__":
import uvicorn
print("🚀 Starting Echo CLM API Server (SDK Version)")
print("=" * 50)
print("API Documentation: http://localhost:8000/docs")
print("Health Check: http://localhost:8000/health")
print("Status: http://localhost:8000/status")
print("Sessions: http://localhost:8000/sessions")
print("=" * 50)
uvicorn.run(
app,
host="0.0.0.0",
port=8000,
log_level="info"
)