import nltk
nltk.download('punkt')
nltk.download('maxent_ne_chunker')
nltk.download('words')
from nltk import pos_tag, RegexpParser
from nltk.tokenize import word_tokenize
from nltk.chunk import ne_chunk
# Get user input for the sentence
sentence = input("Enter a sentence: ")
# Tokenize the input sentence
words = word_tokenize(sentence)
# Perform POS tagging
pos_tags = pos_tag(words)
print("POS Tagging:")
print(pos_tags)
# Define a grammar for chunking noun phrases
grammar = r"""
NP: {<DT|JJ|NN.*>+} # Noun Phrase
PP: {<IN><NP>} # Prepositional Phrase
"""
# Create a chunk parser with the defined grammar
chunk_parser = RegexpParser(grammar)
# Perform chunking
chunks = chunk_parser.parse(pos_tags)
print("\nChunking:")
chunks.pretty_print()