rag.py
unknown
plain_text
a year ago
1.1 kB
11
Indexable
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
# Function to chat with text files
def chat_with_text_files(user_input, texts):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
split_texts = text_splitter.split_documents(texts)
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(split_texts, embeddings)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
qa_chain = load_qa_chain(OpenAI(model_name="gpt-3.5-turbo"), chain_type="stuff", memory=memory)
qa = ConversationalRetrievalChain(combine_docs_chain=qa_chain, retriever=vectorstore.as_retriever(), memory=memory)
response = qa({"question": user_input})
return response["answer"]
Editor is loading...
Leave a Comment