Skip to content

Commit

Permalink
fixed retrival
Browse files Browse the repository at this point in the history
  • Loading branch information
Royal-lobster committed Nov 25, 2023
1 parent d621fa4 commit 72e018f
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 38 deletions.
Binary file removed artifacts/chrome.zip
Binary file not shown.
1 change: 0 additions & 1 deletion src/components/Sidebar/chat/ChatInput.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ export function SidebarInput({
})
context = (await pageContent) as string
}
console.log('context', context)
submitMessage(text, isWebpageContextOn ? context : undefined)
setText('')
}
Expand Down
81 changes: 44 additions & 37 deletions src/hooks/useChatCompletion.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import { AvailableModels, Mode } from '../config/settings'
import endent from 'endent'
import { ChatOpenAI } from 'langchain/chat_models/openai'
import { useCurrentChat, ChatRole } from './useCurrentChat'
import { useMemo } from 'react'
import { OpenAIEmbeddings } from 'langchain/embeddings/openai'
import { AIMessage, HumanMessage, SystemMessage } from 'langchain/schema'
import { useState } from 'react'
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'
import { HNSWLib } from 'langchain/vectorstores/hnswlib'
import { OpenAIEmbeddings } from 'langchain/embeddings/openai'
import { loadQAMapReduceChain } from 'langchain/chains'
import { MemoryVectorStore } from 'langchain/vectorstores/memory'
import { useMemo, useState } from 'react'
import { AvailableModels, Mode } from '../config/settings'
import { ChatRole, useCurrentChat } from './useCurrentChat'

interface UseChatCompletionProps {
model: AvailableModels
Expand Down Expand Up @@ -67,46 +66,54 @@ export const useChatCompletion = ({

const submitQuery = async (query: string, context?: string) => {
await addNewMessage(ChatRole.USER, query)
const messages = [
new SystemMessage(systemPrompt),
...previousMessages,
new HumanMessage(query),
]
const options = {
signal: controller.signal,
callbacks: [{ handleLLMNewToken: updateAssistantMessage }],
}

setGenerating(true)

// if there is no web page context, run with a simple llm call
if (!context) {
llm.call(messages, options).then(() => {
commitToStoredMessages()
setGenerating(false)
/**
* If context is provided, we need to use the LLM to get the relevant documents
* and then run the LLM on those documents. We use in memory vector store to
* get the relevant documents
*/
let matchedContext
if (context) {
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
})
return
const docs = await textSplitter.createDocuments([context])
const vectorStore = await MemoryVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings({
openAIApiKey: apiKey,
}),
)
const retriever = vectorStore.asRetriever()
const relevantDocs = await retriever.getRelevantDocuments(query)
console.log(relevantDocs)
matchedContext = relevantDocs.map((doc) => doc.pageContent).join('\n')
}

// if there is a web page context, run with a map reduce chain
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
})
const docs = await textSplitter.createDocuments([context])
const vectorStore = await HNSWLib.fromDocuments(
docs,
new OpenAIEmbeddings(),
)
const retriever = vectorStore.asRetriever()
const relevantDocs = await retriever.getRelevantDocuments(query)
const mapReduceChain = loadQAMapReduceChain(llm)
await mapReduceChain.invoke(
{
messages: messages,
input_documents: relevantDocs,
},
options,
)
const expandedQuery = matchedContext
? endent`
### Context
${matchedContext}
### Question:
${query}
`
: query

const messages = [
new SystemMessage(systemPrompt),
...previousMessages,
new HumanMessage(expandedQuery),
]

await llm.call(messages, options)
commitToStoredMessages()
setGenerating(false)
}

const cancelRequest = () => {
Expand Down

0 comments on commit 72e018f

Please sign in to comment.