Untitled
unknown
plain_text
a year ago
1.9 kB
12
Indexable
# Install required libraries
!pip install transformers torch datasets
from transformers import pipeline
def load_model():
"""
Load the DistilBERT model and tokenizer for question answering.
Returns a Hugging Face pipeline for question answering.
"""
print("Loading model...")
qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
print("Model loaded successfully!")
return qa_pipeline
def answer_question(qa_pipeline, context, question):
"""
Use the model to answer a question based on the provided context.
Args:
- qa_pipeline: Hugging Face pipeline object for question answering
- context: The context in which to search for the answer
- question: The question to answer
Returns:
- Answer text generated by the model
"""
result = qa_pipeline(question=question, context=context)
return result['answer']
def main():
# Load the model
qa_pipeline = load_model()
# Sample context for testing
context = """
OpenAI is an AI research and deployment company. Our mission is to ensure that artificial general intelligence
benefits all of humanity. OpenAI conducts research in a variety of fields, including natural language processing,
machine learning, and artificial intelligence safety.
"""
print("Welcome to the Question-Answering Model!")
print("You can ask questions based on the provided context.")
print("\nContext:")
print(context)
while True:
# Get user input
question = input("\nEnter your question (or type 'exit' to quit): ")
if question.lower() == 'exit':
print("Goodbye!")
break
# Answer the question
answer = answer_question(qa_pipeline, context, question)
print(f"Answer: {answer}")
if __name__ == "__main__":
main()
Editor is loading...
Leave a Comment