Python and GPTCache
unknown
python
a year ago
2.7 kB
8
Indexable
# Here is the translated code from Japanese to English:
import os
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
# from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain.tools import StructuredTool
# from langchain_openai import ChatOpenAI
from langchain_openai import AzureChatOpenAI
from typing import List
from dotenv import load_dotenv
from categorize_definition import category_definition
import hashlib
from gptcache import Cache
from gptcache.manager.factory import manager_factory
from gptcache.processor.pre import get_prompt
from langchain.cache import GPTCache
from langchain.globals import set_llm_cache
from gptcache.adapter.api import init_similar_cache
# from model_setup import chatmodel_gpt4omini_turbo, chatmodel_gpt4_turbo
####################
# LLM
####################
def get_hashed_name(name):
return hashlib.sha256(name.encode()).hexdigest()
def init_gptcache(cache_obj: Cache, llm: str):
hashed_llm = get_hashed_name(llm)
init_similar_cache(cache_obj=cache_obj, data_dir=f"similar_cache_{hashed_llm}")
load_dotenv()
API_KEY = os.getenv('OPEN_API_KEY')
OPENAI_API_VERSION = os.getenv('OPEN_API_VERSION')
AZURE_OPENAI_ENDPOINT = os.getenv('OPEN_API_BASE')
llm = AzureChatOpenAI(
deployment_name="gpt-4",
temperature=0.0,
api_key=API_KEY,
api_version=OPENAI_API_VERSION,
azure_endpoint=AZURE_OPENAI_ENDPOINT
)
def eval_func(content: dict, criterion: str) -> dict:
template_sys = '''
## Classification Criteria:
"""
{criterion}
"""
## Output Format:
Output in JSON
'''
template_usr = """
## Input Data: New Business Idea Information
```json
{content}
```
## Task:
- Please read [Input Data: New Business Idea Information] thoroughly.
- Based on [Classification Criteria], select the category that best represents the new business idea.
- Output the category of the classification result and a brief explanation of the classification reason (about 20 words).
"""
prompt = ChatPromptTemplate.from_messages(
[("system", template_sys), ("user", template_usr)]
)
output_func = convert_pydantic_to_openai_function(EvalOutput)
llm_func = llm.bind(
functions=[output_func], function_call={"name": output_func["name"]}
)
parser = JsonOutputFunctionsParser()
chain = prompt | llm_func | parser
result = chain.invoke({"content": content, "criterion": criterion})
return resultEditor is loading...
Leave a Comment