-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathprompt_flow.py
More file actions
123 lines (101 loc) · 4.46 KB
/
prompt_flow.py
File metadata and controls
123 lines (101 loc) · 4.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import logging
from typing import Optional as _Optional
from openai import OpenAI
from coderag.config import OPENAI_API_KEY, OPENAI_CHAT_MODEL
from coderag.search import search_code
logger = logging.getLogger(__name__)
# Initialize OpenAI client with error handling
client: _Optional[OpenAI]
try:
if not OPENAI_API_KEY:
raise ValueError("OpenAI API key not found")
client = OpenAI(api_key=OPENAI_API_KEY)
logger.info(f"OpenAI client initialized with chat model: {OPENAI_CHAT_MODEL}")
except Exception as e:
logger.error(f"Failed to initialize OpenAI client: {e}")
client = None
SYSTEM_PROMPT = (
"You are an expert coding assistant. Your task is to help users with their "
"question. Use the retrieved code context to inform your responses, but feel "
"free to suggest better solutions if appropriate."
)
PRE_PROMPT = (
"Based on the user's query and the following code context, provide a helpful "
"response. If improvements can be made, suggest them with explanations.\n\n"
"User Query: {query}\n\n"
"Retrieved Code Context:\n{code_context}\n\nYour response:"
)
def execute_rag_flow(user_query: str) -> str:
"""Execute the RAG flow for answering user queries.
Args:
user_query: The user's question or request
Returns:
AI-generated response based on code context
"""
try:
if not client:
logger.error("OpenAI client not initialized")
return (
"Error: AI service is not available. Please check your "
"OpenAI API key."
)
if not user_query or not user_query.strip():
logger.warning("Empty query received")
return "Please provide a question or request."
logger.info(f"Processing query: '{user_query[:50]}...'")
# Perform code search
search_results = search_code(user_query)
if not search_results:
logger.info("No relevant code found for query")
return (
"No relevant code found for your query. The codebase might not be "
"indexed yet or your query might be too specific."
)
logger.debug(f"Found {len(search_results)} search results")
# Prepare code context with error handling
try:
code_context = "\n\n".join(
[
(
f"File: {result['filename']}\n"
f"Path: {result['filepath']}\n"
# Cosine similarity (IndexFlatIP returns inner product)
f"Similarity: {max(0.0, min(1.0, result['distance'])):.3f}\n"
f"{result['content']}"
)
for result in search_results[:3] # Limit to top 3 results
]
)
except (KeyError, TypeError) as e:
logger.error(f"Error preparing code context: {e}")
return "Error processing search results. Please try again."
# Construct the full prompt
full_prompt = PRE_PROMPT.format(query=user_query, code_context=code_context)
# Generate response using OpenAI with error handling
try:
logger.debug("Sending request to OpenAI")
# Rough heuristic: keep total under ~7000 tokens
est_prompt_tokens = max(1, len(full_prompt) // 4)
max_completion = max(256, min(2000, 7000 - est_prompt_tokens))
response = client.chat.completions.create(
model=OPENAI_CHAT_MODEL,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": full_prompt},
],
temperature=0.3,
max_tokens=max_completion,
timeout=60,
)
if not response.choices or not response.choices[0].message.content:
logger.error("Empty response from OpenAI")
return "Error: Received empty response from AI service."
result = response.choices[0].message.content.strip()
logger.info("Successfully generated response")
return result
except Exception as e:
logger.error(f"OpenAI API error: {str(e)}")
return "Error communicating with AI service. Please try again later."
except Exception as e:
logger.error(f"Unexpected error in RAG flow: {str(e)}")
return "An unexpected error occurred. Please try again."