Untitled

 avatar
unknown
plain_text
a year ago
1.0 kB
4
Indexable
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch

# Load the tokenizer and model for Mistral 7B
tokenizer = AutoTokenizer.from_pretrained("mistral-7b")
model = AutoModelForQuestionAnswering.from_pretrained("mistral-7b")

# Example question and context
question = "Who wrote the Declaration of Independence?"
context = "The Declaration of Independence was written by Thomas Jefferson."

# Encode the inputs
inputs = tokenizer.encode_plus(question, context, return_tensors='pt')
input_ids = inputs['input_ids'].tolist()[0]

# Get model outputs
outputs = model(**inputs)
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits

# Find the tokens with the highest start and end scores
answer_start = torch.argmax(answer_start_scores)
answer_end = torch.argmax(answer_end_scores) + 1

# Convert tokens to the answer
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))

print(f"Question: {question}")
print(f"Answer: {answer}")
Editor is loading...
Leave a Comment