0% found this document useful (0 votes)
12 views13 pages

Arduino Dll

Uploaded by

Khairul Srx
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views13 pages

Arduino Dll

Uploaded by

Khairul Srx
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 13

Write a lex program to recognize telephone operators in

Bangladesh.

%{​
#include<stdio.h>​
%}​

%%​
"015"[0-9]{8} {printf("Teletalk: %s\n",yytext);}​
"016"[0-9]{8} {printf("Airtel: %s\n",yytext);}​
"017"[0-9]{8} {printf("GP: %s\n",yytext);}​
"018"[0-9]{8} {printf("Robi: %s\n",yytext);}​
"019"[0-9]{8} {printf("Banglalink: %s\n",yytext);}​
.|\n​;​
%%​

int yywrap()​
{​
return 1;​
}​

int main()​
{​
yylex();​
return 0;​
}
Write a lex program to count the characters, words and lines in a
given C program.

%{​
#include <stdio.h>​
int char_count = 0, word_count = 0, line_count = 0;​
%}​

%%​

[a-zA-Z0-9_]+ { word_count++; char_count +=
yyleng; }​
\n { char_count++; line_count++; }​
. { char_count++; }​

%%​

int yywrap() {​
return 1;​
}​

int main() {​
yylex();​
printf("Characters: %d\nWords: %d\nLines: %d\n",
char_count, word_count, line_count);​
return 0;​
}
Write a lex program to identify integer, floating point, exponential
and complex numbers.

%{​
#include<stdio.h>​
%}​

%%​
[+-]?[0-9]+ {printf("Integer: %s\n",yytext);}​
[+-]?[0-9]+\.[0-9]+([eE][+-]?[0-9]+)?
{printf("Floating-Point: %s\n",yytext);}​
[+-]?[0-9]+([eE][+-]?[0-9]+)
{printf("Exponential: %s\n",yytext);}​
[+-]?[0-9]+\.[0-9]+([eE][+-]?[0-9]+)?[-+]?[0-9]+\.[0
-9]+[i] {printf("Complex: %s\n",yytext);}​
.|\n ;​
%%​

int yywrap()​
{​
return 1;​
}​

int main()​
{​
yylex();​
return 0;​
}
Write a lex program to identify and count the spaces and comments
in a given C program.

%{​
#include<stdio.h>​
int comment= 0;​
int space= 0;​
%}​

%%​
[ ]+ {space+=yyleng;}​
"//".* {comment++;}​
"/*"([^*]|\*+[^*/])*"*"+"/" {comment++;}​
.|\n ;​
%%​

int yywrap()​
{​
return 1;​
}​

int main()​
{​
yylex();​
printf("No of Comment: %d\n",comment);​
printf("No of Space: %d\n",space);​
return 0;​
}
Write a lex program to recognize and count all of the identifiers in a
C program

%{​
#include <stdio.h>​
#include <string.h>​
int count = 0;​
%}​

IDENTIFIER [a-zA-Z_][a-zA-Z0-9_]*​
KEYWORD
int|float|char|double|long|short|void|return|if|else|while|
for|do|switch|case|break|continue|struct|union|typedef​
%%​
{KEYWORD} ; // Ignore keywords​
{IDENTIFIER} {​
count++;​
printf("Identifier: %s\n", yytext);​
}​
.|\n ; // Ignore everything else​
%%​

int yywrap() ​
{ ​
return 1; ​
}​

int main() ​
{​
yylex();​
printf("\nTotal Identifiers (excluding keywords):
%d\n", count);​
return 0;​
}
Write a lex program to recognize “to be” verbs from a given
paragraph.

%{​
#include<stdio.h>​
#include<string.h>​
int count= 0;​
%}​

TO_BE am|is|are|was|were|been|have|has|be​

%%​

.*({TO_BE}).* {​
count++;​
printf("To-Be Verb Found: %s\n", yytext);​
}​

.|\n ;​

%%​

int yywrap() {​
return 1;​
}​

int main() {​
yylex();​
printf("Total No of To-Be Verbs: %d\n", count);​
return 0;​
}
Write a lex program to identify whether a given sentence is simple/
complex/ compound.

%{
#include <stdio.h>
#include <string.h>
%}

COMPLEX_KEYWORDS that|since|because
COMPOUND_KEYWORDS and|or|but

%%

.*({COMPLEX_KEYWORDS}).* { printf("Complex Sentence:


%s\n", yytext);}

.*({COMPOUND_KEYWORDS}).* { printf("Compound Sentence:


%s\n", yytext);}

.* { printf("Simple Sentence:
%s\n", yytext); }

.|\n ; // Ignore anything else


%%

int yywrap() {
return 1;
}

int main() {
yylex();
return 0;
}
Calculate FIRST() of the Non-Terminals.

grammar = {​
'E': ['T E1'],​
'E1': ['+ T E1', 'e'],​
'T': ['F T1'],​
'T1': ['* F T1', 'e'],​
'F': ['( E )', 'i']​
}

first= {}

for symbol in grammar:​
first[symbol]= set()​
def get_first(symbol):​
if symbol not in grammar:​
return {symbol}​
if first[symbol]:​
return first[symbol]​
result= set()​
for rule in grammar[symbol]:​
parts= rule.split()​
for part in parts:​
part_first= get_first(part)​
result.update(part_first -{'e'})​
if 'e' not in part_first:​
break​
else:​
result.add('e')

first[symbol]= result​
return result​

for symbol in grammar:​
get_first(symbol)

print("FIRST SETS:\n")​
for symbol in sorted(first):​
if symbol in grammar or symbol=='e':​
print(f"{symbol} : {first[symbol]}")
Ambiguity

from nltk import CFG, ChartParser​


import re​

def get_input():​
rules= []​
while True:​
line= input().strip()​
if line.upper() == "END":​
break​
rules.append(line)​
return rules​

def is_ambiguous(cfg_text, test_string):​
cfg= CFG.fromstring("\n".join(cfg_text))​
parser= ChartParser(cfg)

trees= list(parser.parse(test_string.split()))​

if len(trees) > 1:​
print("CFG is ambiguous")​
return True​
else:​
print("CFG is unambiguous")​
return False​

def remove_left_recursion(rules):​
new_rules= []​
grammar_dict= {}​
for rule in rules:​
head, productions= rule.split("->")​
head= head.strip()​
productions= productions.split("|")​
productions= [production.strip() for production in productions]​
grammar_dict[head]= productions​

for non_ter in list(grammar_dict.keys()):​
alpha= []​
beta= []
for production in grammar_dict[non_ter]:​
if production[0] == non_ter:

alpha.append(production[3:].strip())​
else:​
beta.append(production)​

if alpha:​
new_non_term= non_ter + "'"​
while new_non_term in grammar_dict:​
new_non_term += "'"​

new_beta_productions= [b + " " + new_non_term for b in beta]​
new_alpha_productions= [a + " " + new_non_term for a in alpha]​
grammar_dict[non_ter]= new_beta_productions​
grammar_dict[new_non_term]= new_alpha_productions + ["E"]​
for non_ter in list(grammar_dict.keys()):​
new_rule= non_ter + "->" + "|".join(grammar_dict[non_ter])​
new_rules.append(new_rule)​
return new_rules​
user_cfg= get_input()​
test_string= input("\nEnter test string: ").strip()​
if is_ambiguous(user_cfg, test_string):​
print("Remove Left Recursion")​
fixed_cfg= remove_left_recursion(user_cfg)​
print("\nNew Rules:")​
for rule in fixed_cfg:​
print(rule)
FIRST AND FOLLOW:
def compute_first_sets(grammar):​
first = {nt: set() for nt in grammar}​

def first_of(symbol):​
if symbol not in grammar:​
return {symbol} # Terminal​
if first[symbol]: # Already computed​
return first[symbol]​

for rule in grammar[symbol]:​
for part in rule:​
part_first = first_of(part)​
first[symbol].update(part_first - {'ε'})​
if 'ε' not in part_first:​
break​
else:​
first[symbol].add('ε') # All parts can go to ε​
return first[symbol]​

for nt in grammar:​
first_of(nt)​

return first​


def compute_follow_sets(grammar, first_sets, start_symbol):​
follow = {nt: set() for nt in grammar}​
follow[start_symbol].add('$') # Rule 1: Add $ to FOLLOW(start)​

while True:​
updated = False​
for head in grammar:​
for rule in grammar[head]:​
trailer = follow[head].copy()​
for symbol in reversed(rule):​
if symbol in grammar: # Non-terminal​
before = len(follow[symbol])​
follow[symbol].update(trailer)​
if 'ε' in first_sets[symbol]:​
trailer.update(first_sets[symbol] -
{'ε'})​
else:​
trailer = first_sets[symbol]​
if len(follow[symbol]) > before:​
updated = True​
else: # Terminal​
trailer = {symbol}​
if not updated:​
break​

return follow​


# === Example Grammar ===​
grammar = {​
'E': [['T', "E'"]],​
"E'": [['+', 'T', "E'"], ['ε']],​
'T': [['F', "T'"]],​
"T'": [['*', 'F', "T'"], ['ε']],​
'F': [['(', 'E', ')'], ['id']]​
}​

# === Compute Sets ===​
first_sets = compute_first_sets(grammar)​
follow_sets = compute_follow_sets(grammar, first_sets, 'E')​

# === Print FIRST sets ===​
print("FIRST Sets:")​
for nt in first_sets:​
print(f"FIRST({nt}) = {{ {', '.join(sorted(first_sets[nt]))} }}")​

# === Print FOLLOW sets ===​
print("\nFOLLOW Sets:")​
for nt in follow_sets:​
print(f"FOLLOW({nt}) = {{ {', '.join(sorted(follow_sets[nt]))}
}}")​
Left Recursion:
grammar = {
'E': ['E + T', 'T'],
'T': ['T * F', 'F'],
'F': ['( E )', 'id']
}

for nt in grammar:
for prod in grammar[nt]:
if prod.split()[0] == nt:
print(f"Left recursion in {nt} -> {prod}")
break
else:
print(f"No left recursion in {nt}")

Ambiguity:
import nltk
from nltk import CFG
from nltk.parse.chart import ChartParser
grammar = CFG.fromstring("""
S -> S '+' S
| S '*' S
| 'id'
""")

user_input = input("Enter a space-separated sentence using 'id', '+', '*':\n").strip()


tokens = user_input.split()

parser = ChartParser(grammar)
trees = list(parser.parse(tokens))

# Show results
if trees:

print(f"\n Number of parse trees: {len(trees)}")

⚠️
if len(trees) > 1:
print(" Ambiguity Detected!\n")
else:

print(" No ambiguity detected.\n")

You might also like