-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdataset_generator_langchain.py
More file actions
54 lines (44 loc) · 1.75 KB
/
dataset_generator_langchain.py
File metadata and controls
54 lines (44 loc) · 1.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from langchain.chains import LLMChain
prompt_template = "What is a good name for a company that makes {product}?"
def get_chunk_question(context):
system_message = """
Generate a question related to the context.
The input is provided in the following format:
Context: [The context that for the generated question]
The output is in the following format:
#Question#: [Text of the question]
The context is: {context}
"""
client = OpenAI()
prompt = system_message.format(
context=context,
)
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[{"role": "user", "content": prompt}],
)
response_message = response.choices[0].message.content
return response_message
def generate_answer(context, question):
system_message_output_answer = """
Generate an answer to the question provided in the input using the context provided. They will have the following format:
Context: ["Context1", "Context2", "Context3", "Context4"]
Question: [Text of the question]
The context is: {context}
The question is: {question}
"""
# llm = OpenAI(temperature=0)
llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0, max_tokens=512)
formatted_message = system_message_output_answer.format(
context=context, question=question
)
for chunk in llm.stream(formatted_message):
print(chunk, end="", flush=True)
yield chunk
# llm_chain = LLMChain(
# llm=llm, prompt=PromptTemplate.from_template(system_message_output_answer)
# )
# output = llm_chain((context, question))
# return output["text"]