Untitled
unknown
python
2 years ago
2.1 kB
10
Indexable
from fastapi import FastAPI
from dotenv import load_dotenv
from langchain.vectorstores import Qdrant
import qdrant_client
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory
from langchain.chat_models import ChatOpenAI
import os
# Import things that are needed generically
from langchain import LLMMathChain, SerpAPIWrapper
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool, StructuredTool, Tool, tool
app = FastAPI(title="Liberate Labs Chat API")
load_dotenv()
def get_vectorstore():
client = qdrant_client.QdrantClient(
os.getenv("QDRANT_HOST"),
api_key=os.getenv("QDRANT_API_KEY")
)
embeddings = OpenAIEmbeddings()
vectorstore = Qdrant(
client=client,
collection_name=os.getenv("QDRANT_COLLECTION"),
embeddings=embeddings
)
return vectorstore
vectorstore = get_vectorstore()
@app.post("/conversational_agent")
async def conv_reply (input_query : str):
llm = ChatOpenAI(model_name='gpt-3.5-turbo',temperature=0.5)
memory = ConversationBufferWindowMemory( llm=llm, k=5, return_messages=True, memory_key="chat_history")
# memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", return_messages=True)
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(model_name='gpt-3.5-turbo',temperature=0.5), vectorstore.as_retriever())
tools = [
Tool.from_function(
func=qa.run,
name="qa",
description="This is a helpful ai assistance"
# coroutine= ... <- you can specify an async method if desired as well
),
]
agent = initialize_agent(tools, llm, verbose=False)
result = agent.run(input_query)
return {
'status': "200 Ok",
'data': {
"Answer": result,
}
}
Editor is loading...