Untitled
unknown
plain_text
2 years ago
3.8 kB
10
Indexable
from llama_index import LLMPredictor, QuestionAnswerPrompt, GPTPineconeIndex, SimpleDirectoryReader from langchain.agents import ConversationalAgent, Tool, AgentExecutor from langchain import LLMChain from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( AIMessagePromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, ) import pinecone os.environ['OPENAI_API_KEY'] = settings.OPENAI_API_KEY pinecone_api_key = settings.PINECONE_API_KEY ## Open Pinecone Index pinecone.init(api_key=pinecone_api_key, environment="us-east1-gcp") # pinecone.create_index("name", dimension=1536, metric="euclidean", pod_type="p1") index = pinecone.Index("name") # # Create Index # documents = SimpleDirectoryReader('api\data').load_data() INDEX = GPTPineconeIndex('', pinecone_index=index, chunk_size_limit=512) ## Define custom Question/Answer prompt for GPT 3.5 Turob (ChatGPT) QA_TURBO_TEMPLATE_MSG = [ SystemMessagePromptTemplate.from_template( """ RULES Provided context information below.\n ---------------------\n {context_str} \n---------------------\n Given this information, please answer the question and spell check everything especially the tradition or custom names in context and replace them with correct one. """ ), HumanMessagePromptTemplate.from_template("What is the average age difference between bride and groom?"), AIMessagePromptTemplate.from_template("Approximately two years. According to the 2016 Real Weddings Study, the average age of to-be-weds in the US was 29 for the bride and 31 for the groom. This indicates that couples are taking their time entering life stages, such as moving in with their partner premarriage or finishing up a master's degree before getting married. This is also reflected in the birth rates, which increased in women aged 30 to 34, 35 to 39 and 40 to 44, and decreased slightly in the age groups 20 to 24 and 25 to 29."), HumanMessagePromptTemplate.from_template("{query_str}") ] QA_TURBO_TEMPLATE_LC = ChatPromptTemplate.from_messages(QA_TURBO_TEMPLATE_MSG) QA_TURBO_TEMPLATE = QuestionAnswerPrompt.from_langchain_prompt(QA_TURBO_TEMPLATE_LC) QA_PROMPT = QA_TURBO_TEMPLATE ## Set number of output tokens NUM_OUTPUT = 600 ## Define model and it's parameters LLM_PREDICTOR = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=NUM_OUTPUT)) ## Set how many matches are found SIMILARITY_TOP_K = 3 ## Define the langchain tool to use TOOLS = [ Tool( name = "GPT Index", func=lambda q: str(INDEX.query(q, llm_predictor=LLM_PREDICTOR, text_qa_template=QA_PROMPT, similarity_top_k=SIMILARITY_TOP_K, response_mode="compact")), description="useful for when you need to answer questions about weddings or marriage.", return_direct=True ), ] ## Can be changed with OpenAI for davinci model usage LLM=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") prefix = """ RULES TOOLS: ------ Assistant has access to the following tools. """ suffix = """ EXAMPLES User Input: {input} {agent_scratchpad} """ prompt = ConversationalAgent.create_prompt( TOOLS, prefix=prefix, suffix=suffix, input_variables=["input", "agent_scratchpad"] ) llm_chain = LLMChain(llm=LLM, prompt=prompt) ## Create langchain agent agent = ConversationalAgent(llm_chain=llm_chain) #, allowed_tools=tool_names) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=TOOLS, verbose=True) response = agent_executor.run(user_message)
Editor is loading...