-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbasic_chain.py
More file actions
184 lines (140 loc) · 5.36 KB
/
basic_chain.py
File metadata and controls
184 lines (140 loc) · 5.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
"""Basic chain example: ChatOpenAI + SulcusMemory.
Demonstrates a simple conversational loop where each turn is persisted to
Sulcus and relevant context is retrieved before each response.
Requirements:
pip install sulcus-langchain langchain-openai
Usage:
SULCUS_API_KEY=sk-... OPENAI_API_KEY=sk-... python examples/basic_chain.py
"""
from __future__ import annotations
import os
from langchain_core.prompts import PromptTemplate
try:
from langchain_openai import ChatOpenAI
except ImportError:
raise ImportError(
"This example requires langchain-openai.\n"
"Install with: pip install langchain-openai"
)
from sulcus import Sulcus
from sulcus_langchain import SulcusMemory, SulcusChatMessageHistory, SulcusRetriever
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
SULCUS_API_KEY: str = os.environ.get("SULCUS_API_KEY", "")
SULCUS_SERVER: str = os.environ.get("SULCUS_SERVER", "https://api.sulcus.ca")
OPENAI_API_KEY: str = os.environ.get("OPENAI_API_KEY", "")
NAMESPACE: str = os.environ.get("SULCUS_NAMESPACE", "example-chat")
SESSION_ID: str = os.environ.get("SULCUS_SESSION_ID", "demo-session-001")
if not SULCUS_API_KEY:
raise EnvironmentError("Set the SULCUS_API_KEY environment variable.")
if not OPENAI_API_KEY:
raise EnvironmentError("Set the OPENAI_API_KEY environment variable.")
# ---------------------------------------------------------------------------
# Initialise Sulcus client and LangChain components
# ---------------------------------------------------------------------------
sulcus = Sulcus(
api_key=SULCUS_API_KEY,
base_url=SULCUS_SERVER,
namespace=NAMESPACE,
)
# Conversational memory — fetches and stores full turn context
memory = SulcusMemory(
client=sulcus,
memory_type="conversation", # maps to "episodic" inside Sulcus
memory_key="history",
search_limit=8,
heat=0.85,
return_messages=False, # inject as formatted string, not message list
)
# Per-session structured chat log (optional — shows raw message history)
chat_history = SulcusChatMessageHistory(
client=sulcus,
session_id=SESSION_ID,
heat=0.75,
)
# Document retriever for standalone RAG queries
retriever = SulcusRetriever(
client=sulcus,
search_limit=5,
min_heat=0.2,
)
# ---------------------------------------------------------------------------
# Prompt + LLM
# ---------------------------------------------------------------------------
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0.7,
openai_api_key=OPENAI_API_KEY,
)
PROMPT_TEMPLATE = (
"You are a helpful assistant with persistent memory powered by Sulcus.\n\n"
"Relevant memory context:\n"
"{history}\n\n"
"Human: {input}\n"
"AI:"
)
prompt = PromptTemplate(
input_variables=["history", "input"],
template=PROMPT_TEMPLATE,
)
def chat(user_input: str) -> str:
"""Run one conversational turn with Sulcus-backed memory.
1. Loads relevant memories from Sulcus matching the user input.
2. Formats the prompt with history + input.
3. Calls the LLM for a response.
4. Saves both the human input and AI response back to Sulcus.
5. Appends the turn to the structured chat history.
Args:
user_input: The user's message text.
Returns:
The AI assistant's response string.
"""
# 1. Retrieve relevant memories
mem_vars = memory.load_memory_variables({"input": user_input})
# 2. Build and invoke the chain
formatted = prompt.format(input=user_input, **mem_vars)
response = llm.invoke(formatted)
ai_text: str = response.content if hasattr(response, "content") else str(response)
# 3. Persist this turn to Sulcus
memory.save_context({"input": user_input}, {"output": ai_text})
# 4. Also store in the structured session history
chat_history.add_user_message(user_input)
chat_history.add_ai_message(ai_text)
return ai_text
def demo_retriever(query: str) -> None:
"""Show how SulcusRetriever works as a standalone RAG component."""
print(f"\n[Retriever] Searching: {query!r}")
docs = retriever.invoke(query)
if not docs:
print(" No documents found.")
return
for doc in docs:
heat = doc.metadata.get("heat", 0.0)
mtype = doc.metadata.get("memory_type", "?")
print(f" [{mtype}] heat={heat:.2f} {doc.page_content[:100]}")
# ---------------------------------------------------------------------------
# Main interactive loop
# ---------------------------------------------------------------------------
def main() -> None:
"""Run an interactive chat session with Sulcus-backed memory."""
print("Sulcus + LangChain demo — type 'quit' to exit, '!search <query>' to use retriever.\n")
while True:
try:
user_input = input("You: ").strip()
except (EOFError, KeyboardInterrupt):
print("\nGoodbye.")
break
if not user_input:
continue
if user_input.lower() in ("quit", "exit"):
print("Goodbye.")
break
if user_input.startswith("!search "):
query = user_input[len("!search "):]
demo_retriever(query)
continue
response = chat(user_input)
print(f"AI: {response}\n")
if __name__ == "__main__":
main()