Skip to content

Commit 3fce5f2

Browse files
committed
🎨 apply linting
1 parent b47acdb commit 3fce5f2

File tree

3 files changed

+32
-22
lines changed

3 files changed

+32
-22
lines changed

demo/README.md

-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@ http://localhost:8000/chat
4141

4242
![demo](../assets/demo.gif)
4343

44-
4544
### Sample cURL request for retrieval qa with sources
4645

4746
```bash

demo/app.py

+27-18
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,10 @@
88
from langchain.chat_models import ChatOpenAI
99
from pydantic import BaseModel
1010

11-
from fastapi_async_langchain.responses import LLMChainStreamingResponse, RetrievalQAStreamingResponse
11+
from fastapi_async_langchain.responses import (
12+
LLMChainStreamingResponse,
13+
RetrievalQAStreamingResponse,
14+
)
1215

1316
load_dotenv()
1417

@@ -46,32 +49,38 @@ async def chat(
4649
chain, request.query, media_type="text/event-stream"
4750
)
4851

52+
4953
def retrieval_qa_chain():
5054
from langchain.chains import RetrievalQAWithSourcesChain
5155
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
5256
from langchain.chains.qa_with_sources.stuff_prompt import PROMPT as QA_PROMPT
53-
from langchain.vectorstores import FAISS
5457
from langchain.embeddings import OpenAIEmbeddings
58+
from langchain.vectorstores import FAISS
5559

5660
callback_manager = AsyncCallbackManager([])
57-
vectorstore = FAISS.load_local(index_name="langchain-python", embeddings=OpenAIEmbeddings(), folder_path="demo/")
61+
vectorstore = FAISS.load_local(
62+
index_name="langchain-python",
63+
embeddings=OpenAIEmbeddings(),
64+
folder_path="demo/",
65+
)
5866
retriever = vectorstore.as_retriever()
59-
streaming_llm = ChatOpenAI(streaming=True, callback_manager=callback_manager, verbose=True, temperature=0)
60-
doc_chain = load_qa_with_sources_chain(llm=streaming_llm,
61-
chain_type="stuff",
62-
prompt=QA_PROMPT)
63-
return RetrievalQAWithSourcesChain(combine_documents_chain=doc_chain,
64-
retriever=retriever,
65-
callback_manager=callback_manager,
66-
return_source_documents=True,
67-
verbose=True)
67+
streaming_llm = ChatOpenAI(
68+
streaming=True, callback_manager=callback_manager, verbose=True, temperature=0
69+
)
70+
doc_chain = load_qa_with_sources_chain(
71+
llm=streaming_llm, chain_type="stuff", prompt=QA_PROMPT
72+
)
73+
return RetrievalQAWithSourcesChain(
74+
combine_documents_chain=doc_chain,
75+
retriever=retriever,
76+
callback_manager=callback_manager,
77+
return_source_documents=True,
78+
verbose=True,
79+
)
80+
6881

6982
@app.post("/retrieval-qa-with-sources")
70-
async def retrieval_qa_with_sources(
71-
request: Request
72-
) -> RetrievalQAStreamingResponse:
83+
async def retrieval_qa_with_sources(request: Request) -> RetrievalQAStreamingResponse:
7384
return RetrievalQAStreamingResponse(
74-
chain=retrieval_qa_chain(),
75-
inputs=request.query,
76-
media_type="text/event-stream"
85+
chain=retrieval_qa_chain(), inputs=request.query, media_type="text/event-stream"
7786
)
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
from typing import Any, Dict
2+
23
from .base import AsyncFastApiStreamingCallback
34

5+
46
class RetrievalQAFastApiStreamingCallback(AsyncFastApiStreamingCallback):
57
"""Async Callback handler for FastAPI StreamingResponse to RetrievalQA."""
68

79
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
810
"""Run when chain ends running."""
9-
if not outputs['source_documents'] is None:
11+
if outputs["source_documents"] is not None:
1012
await self.send("\n\nSOURCE DOCUMENTS: \n")
11-
if not outputs['source_documents'] is None:
12-
for doc in outputs['source_documents']:
13+
if outputs["source_documents"] is not None:
14+
for doc in outputs["source_documents"]:
1315
await self.send(f"page content: {doc.page_content} \n")
1416
await self.send(f"source: {doc.metadata['source']} \n\n")

0 commit comments

Comments
 (0)