-
Notifications
You must be signed in to change notification settings - Fork 37
/
Copy pathllm.py
132 lines (101 loc) · 4.1 KB
/
llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from operator import itemgetter
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.memory import ConversationBufferMemory
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_core.messages import get_buffer_string
from langchain_core.prompts import format_document
from langchain.prompts.prompt import PromptTemplate
condense_question = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(condense_question)
answer = """
### Instruction:
You're a helpful research assistant, who answers questions based on provided research in a clear way and easy-to-understand way.
If there is no research, or the research is irrelevant to answering the question, simply reply that you can't answer.
Please reply with just the detailed answer and your sources. If you're unable to answer the question, do not list sources
## Research:
{context}
## Question:
{question}
"""
ANSWER_PROMPT = ChatPromptTemplate.from_template(answer)
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(
template="Source Document: {source}, Page {page}:\n{page_content}"
)
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)
memory = ConversationBufferMemory(
return_messages=True, output_key="answer", input_key="question"
)
def getStreamingChain(question: str, memory, llm, db):
retriever = db.as_retriever(search_kwargs={"k": 10})
loaded_memory = RunnablePassthrough.assign(
chat_history=RunnableLambda(
lambda x: "\n".join(
[f"{item['role']}: {item['content']}" for item in x["memory"]]
)
),
)
standalone_question = {
"standalone_question": {
"question": lambda x: x["question"],
"chat_history": lambda x: x["chat_history"],
}
| CONDENSE_QUESTION_PROMPT
| llm
}
retrieved_documents = {
"docs": itemgetter("standalone_question") | retriever,
"question": lambda x: x["standalone_question"],
}
final_inputs = {
"context": lambda x: _combine_documents(x["docs"]),
"question": itemgetter("question"),
}
answer = final_inputs | ANSWER_PROMPT | llm
final_chain = loaded_memory | standalone_question | retrieved_documents | answer
return final_chain.stream({"question": question, "memory": memory})
def getChatChain(llm, db):
retriever = db.as_retriever(search_kwargs={"k": 10})
loaded_memory = RunnablePassthrough.assign(
chat_history=RunnableLambda(memory.load_memory_variables)
| itemgetter("history"),
)
standalone_question = {
"standalone_question": {
"question": lambda x: x["question"],
"chat_history": lambda x: get_buffer_string(x["chat_history"]),
}
| CONDENSE_QUESTION_PROMPT
| llm
}
# Now we retrieve the documents
retrieved_documents = {
"docs": itemgetter("standalone_question") | retriever,
"question": lambda x: x["standalone_question"],
}
# Now we construct the inputs for the final prompt
final_inputs = {
"context": lambda x: _combine_documents(x["docs"]),
"question": itemgetter("question"),
}
# And finally, we do the part that returns the answers
answer = {
"answer": final_inputs
| ANSWER_PROMPT
| llm.with_config(callbacks=[StreamingStdOutCallbackHandler()]),
"docs": itemgetter("docs"),
}
final_chain = loaded_memory | standalone_question | retrieved_documents | answer
def chat(question: str):
inputs = {"question": question}
result = final_chain.invoke(inputs)
memory.save_context(inputs, {"answer": result["answer"]})
return chat