-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtestchat.py
More file actions
166 lines (140 loc) · 5.5 KB
/
testchat.py
File metadata and controls
166 lines (140 loc) · 5.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os
import random
from dotenv import load_dotenv
from langchain_community.document_loaders import TextLoader
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chat_models import init_chat_model
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
# Load environment variables
load_dotenv()
# ===== Step 1. Load knowledge base =====
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
knowledge_dir = "knowledge_base"
faiss_index_path = "./faiss_index"
# Build FAISS index only once
if not os.path.exists(faiss_index_path):
documents = []
for file in os.listdir(knowledge_dir):
if file.endswith(".txt"):
loader = TextLoader(os.path.join(knowledge_dir, file), encoding="utf-8")
documents.extend(loader.load())
db = FAISS.from_documents(documents, embeddings)
db.save_local(faiss_index_path)
else:
db = FAISS.load_local(faiss_index_path, embeddings, allow_dangerous_deserialization=True)
retriever = db.as_retriever()
# ===== Step 2. Gemini Flash model =====
llm = init_chat_model(
"gemini-2.5-flash",
model_provider="google_genai",
temperature=0.8
)
chat_history = []
# ===== Step 3. Custom System Prompt =====
system_prompt = """
You are AkBot 🤖, a friendly AI assistant built by Anik Chand.
### Core Purpose
- Prioritize talking about Anik Chand.
- If a question is outside scope, you may politely redirect back to Anik Chand, but you’re also allowed to handle **simple general queries** (like small talk, greetings, or basic math).
- If the query is completely unrelated and too broad (e.g., politics, world news, sports), gently say:
"I’m mainly here to share about Anik Chand 🙂. Would you like to hear about his projects, skills, or experiences?"
Anik Chand's resume link : https://drive.google.com/file/d/1CdQwBAh4v6P90z_6Bm2dG1g8lLMJbpHh/view
### Style
- Keep responses short, warm, and conversational. Use various types of emojis when required with the situation context.
- Be clear and simple when technical.
- Be empathetic when personal.
### Context
Here’s some context from Anik Chand’s knowledge base:
{context}
Question: {question}
"""
prompt = PromptTemplate(
input_variables=["context", "question"],
template=system_prompt
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
return_source_documents=False,
combine_docs_chain_kwargs={"prompt": prompt},
verbose=False
)
def safe_invoke(query, history):
result = qa_chain.invoke({"question": query, "chat_history": history})
if not result["answer"].strip() or "I’m here to talk about Anik" in result["answer"]:
return "I’m here to talk about Anik Chand and his work only. Would you like to hear about his projects or skills? 🙂"
return result["answer"]
# ===== Step 4. Small Talk =====
small_talk_responses = {
"hi": [
"Hey! 👋 Nice to see you here.",
"Hi there! 😊 How’s your day going?",
"Yo! 👋 What’s up?"
],
"hello": [
"Hello! 🙂 How’s it going?",
"Hey there! 👋 Long time no see.",
"Hi! 🌟 How are you?"
],
"hey": [
"Hey there! What’s up?",
"Yo! 👋 How’s everything?",
"Heyyy 😎 what’s new?"
],
"good morning": [
"Good morning ☀️ Wishing you a productive day!",
"Morning! 🌄 Hope today treats you well.",
"Rise and shine! ☀️ Let’s make it a great day."
],
"good afternoon": [
"Good afternoon 🌞 Hope you’re doing well!",
"Hey! 👋 How’s your afternoon so far?",
"Good afternoon! 🌻 Feeling productive?"
],
"good evening": [
"Good evening 🌙 How was your day?",
"Evening! 🌆 Hope you had a good one.",
"Good evening 🌌 Relax and recharge!"
],
"thanks": [
"You’re welcome! 🙌",
"No problem, glad I could help! 🙂",
"Anytime! 🤗"
],
"thank you": [
"No problem at all, happy to help! 😊",
"You got it! 👍",
"Always here if you need me 🙌"
],
"who are you": [
"I’m a bot 🤖 created by Anik Chand 👨💻 to share his story, projects, and experiences.",
"I’m an AI assistant built by Anik Chand 👨💻 to talk about him and his work.",
"I’m a portfolio bot 🤖 designed by Anik Chand to introduce him and what he does."
],
"what can you do": [
"I can share details about Anik Chand, his projects, skills, and experiences—or we can just have a casual chat!",
"I can tell you about Anik’s coding journey, his portfolio, and the things he has built 🙂",
"I can give you insights into Anik Chand’s work, projects, and skills 🚀"
]
}
def is_small_talk(query: str):
return query.lower().strip() in small_talk_responses
def handle_small_talk(query: str) -> str:
return random.choice(small_talk_responses[query.lower().strip()])
# ===== Step 5. Chat Loop =====
while True:
query = input("You: ")
if query.lower() in ["exit", "quit", "goodbye", "ok bye", "bye"]:
print("Bot: Goodbye! 👋")
break
# Small talk check
if is_small_talk(query):
print("Bot:", handle_small_talk(query))
continue
# Otherwise → use RAG
answer = safe_invoke(query, chat_history)
# Save conversation
chat_history.append((query, answer))
print("Bot:", answer)