0% found this document useful (0 votes)
5 views10 pages

Natural Language Processing Lab 7

The document outlines a lab assignment on Natural Language Processing, focusing on implementing a regex parser, dependency parsing using spaCy, and an arc-based shift-reduce parser. It includes code examples for parsing sentences and analyzing their grammatical structures. The assignment covers various sentences to demonstrate the parsing techniques and their outputs.

Uploaded by

ragebhanukiran
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views10 pages

Natural Language Processing Lab 7

The document outlines a lab assignment on Natural Language Processing, focusing on implementing a regex parser, dependency parsing using spaCy, and an arc-based shift-reduce parser. It includes code examples for parsing sentences and analyzing their grammatical structures. The assignment covers various sentences to demonstrate the parsing techniques and their outputs.

Uploaded by

ragebhanukiran
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 10

Natural Language Processing

Lab Assignment

R.BhanuKiran
22BCE9560
L45+L46
1. implement regex parser to the given sentence
The quick brown fox jumps over the lazy dog
NP-> DT ADJ NN
VP-> V NP LPP
P-> IN
V->V
PP->IN NP

CODE
import re

grammar_rules = {
'NP': r'DT ADJ ADJ NN',
'VP': r'V NP',
'P': r'IN',
'V': r'V',
'PP': r'IN NP'
}
sentence = "The quick brown fox jumps over the lazy dog"
words = sentence.lower().split()
part_of_speech_tags = {
'the': 'DT',
'quick': 'ADJ',
'brown': 'ADJ',
'fox': 'NN',
'jumps': 'V',
'over': 'IN',
'lazy': 'ADJ',
'dog': 'NN'
}

parsed_words = [(word, part_of_speech_tags.get(word)) for word in


words if word in part_of_speech_tags]

pos_sequence = ' '.join(tag for _, tag in parsed_words)

def matches_rule(phrase, rule_pattern):


"""Check if the POS sequence matches a given grammar rule."""
return bool(re.search(r'\b' + rule_pattern + r'\b', phrase))

def analyze_sentence(parsed_words):
"""Analyze sentence structure based on defined grammar rules."""
print("Parsed words: ", parsed_words)

pos_sequence = ' '.join(tag for _, tag in parsed_words)


print("POS sequence:", pos_sequence)
if matches_rule(pos_sequence, grammar_rules['NP']):
print("Detected a Noun Phrase (NP)")
if matches_rule(pos_sequence, grammar_rules['VP']):
print("Detected a Verb Phrase (VP)")
if matches_rule(pos_sequence, grammar_rules['P']):
print("Detected a Preposition (P)")
if matches_rule(pos_sequence, grammar_rules['V']):
print("Detected a Verb (V)")
if matches_rule(pos_sequence, grammar_rules['PP']):
print("Detected a Prepositional Phrase (PP)")

analyze_sentence(parsed_words)

output

2. implement dependency parsing for:


1) i prefer the morning flight through Denver
2) the dog barked loudly at the stranger
3) the quick brown fox jumps over the lazy dog
4) i booked a flight from Vijayawada to Delhi
5) john saw a dog yesterday which was a Yorkshire
terrier
CODE
import spacy
nlp = spacy.load("en_core_web_sm")

sentences = [
"I prefer morning fight through denver",
"The Dog barked loudly at the stranger",
"The quick brown fox jumped over the lazy dog",
"i booked a fight from vijayawada to delhi",
"John saw a dog yesterday which was a yokshire terrier"
]

for sentence in sentences:


doc = nlp(sentence)
print(f"Sentence: {sentence}")
for token in doc:
print(f"{token.text} --> {token.dep_} ({token.head.text})")
print("-" * 20)
Output
3.Implement arc based dependency parsing transition
using shift- reduce(S-R) operations and display step
wise results for each transition.
1) I wanted to try someplace new
2) I booked a flight from Delhi to Vijayawada.
class SimpleShiftReduceParser:
def __init__(self, sentence):
self.input_buffer = sentence.lower().split()
self.stack = []
self.dependency_arcs = []
self.transition_log = []

def shift_word(self):
if self.input_buffer:
word = self.input_buffer.pop(0)
self.stack.append(word)
self.transition_log.append(f"Shift: {word}")
def reduce_stack(self):
if len(self.stack) >= 2:
dependent_word = self.stack.pop()
head_word = self.stack[-1]
arc = (head_word, dependent_word) # head_word ->
dependent_word (dependency arc)
self.dependency_arcs.append(arc)
self.transition_log.append(f"Reduce: {head_word} ->
{dependent_word}")
def parse_sentence(self):
while self.input_buffer:
self.shift_word()
self.display_current_state()
if len(self.stack) > 1:
if self.stack[-1] in ["try", "booked", "flight", "someplace", "new"]:
self.reduce_stack()
self.display_current_state()
while len(self.stack) > 1:
self.reduce_stack()
self.display_current_state()

def display_current_state(self):
print(f"Stack: {self.stack}")
print(f"Input Buffer: {' '.join(self.input_buffer)}")
print(f"Dependency Arcs: {self.dependency_arcs}")
print(f"Transitions Log: {self.transition_log}")
print("-" * 50)
sentence1 = "I wanted to try someplace new"
sentence2 = "I booked a flight from delhi to vijayawada"
print("Parsing sentence 1:")
parser1 = SimpleShiftReduceParser(sentence1)
parser1.parse_sentence()
print("\nParsing sentence 2:")
parser2 = SimpleShiftReduceParser(sentence2)
parser2.parse_sentence()

You might also like