0% found this document useful (0 votes)
16 views23 pages

Compiler Design Final

Compiler design lab manual

Uploaded by

Aanchal Shukla
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
16 views23 pages

Compiler Design Final

Compiler design lab manual

Uploaded by

Aanchal Shukla
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 23

EXPERIMENT-1(A)

AIM: Practice LEX Of Compiler Writing.

THEORY:
LEX program to count the number of words:

Code:

%{

#include<stdio.h> #include<string.h>

int i = 0;

%}

/* Rules Section*/

%%

([a-zA-Z0-9])* {i++;} /* Rule for counting number of words*/

"\n" {printf("%d\n", i); i = 0;}

%%

int yywrap(void){} int

main()

// The function that starts the

analysis yylex(); return 0;

OUTPUT:
LEX program to count the number of vowels and consonants in a string.

Code: %{ int

vow_count=0; int

const_count =0;

%}

%%

[aeiouAEIOU] {vow_count++;}

[a-zA-Z] {const_count++;}

%%

int yywrap(){}

int main()

printf("Enter the string of vowels and consonants:");

yylex();

printf("Number of vowels are: %d\n", vow_count);

printf("Number of consonants are: %d\n", const_count);

return 0;

}
EXPERIMENT-1(B)

AIM: Practice YACC Of Compiler Writing.

THEORY:
YACC program to implement a Calculator and recognize a valid Arithmetic
expression.

Code:

Lexical Analyzer Source Code:

%{

/* Definition section */

#include<stdio.h>

#include "y.tab.h"

extern int yylval;

%}

/* Rule Section */

%%

[0-9]+ {

yylval=atoi(yytext);

return NUMBER;

[\t] ;

[\n] return 0;

. return yytext[0];

%%

int yywrap()

return 1;

}
Parser Source Code:

%{

/* Definition section */

#include<stdio.h> int

flag=0;

%}

%token NUMBER

%left '+' '-'

%left '*' '/' '%'

%left '(' ')'

/* Rule Section */

%%

ArithmeticExpression: E{

printf("\nResult=%d\n", $$);

return 0;

};

E:E'+'E {$$=$1+$3;}

|E'-'E {$$=$1-$3;}

|E'*'E {$$=$1*$3;}

|E'/'E {$$=$1/$3;}

|E'%'E {$$=$1%$3;}

|'('E')' {$$=$2;}

| NUMBER {$$=$1;}

%%

//driver code

void main()

{
printf("\nEnter Any Arithmetic Expression :\n"); yyparse(); if(flag==0)

printf("\nEntered arithmetic expression is Valid\n\n");

void yyerror()

printf("\nEntered arithmetic expression is Invalid\n\n");

flag=1;

OUTPUT:
EXPERIMENT-2

AIM: Write a program to check whether a string belongs to the grammar


or not.

THEORY:
CODE:
import ply.lex as lex
import ply.yacc as yacc

# Define tokens
tokens = (
'A',
'B',
)

# Regular expression rules for tokens


t_A = r'a'
t_B = r'b'

# Ignored characters
t_ignore = ' \t\n'

# Error handling rule for invalid characters


def t_error(t):
print(f"Illegal character '{t.value[0]}'")
t.lexer.skip(1)

# Define the lexer


lexer = lex.lex()

# Define the CFG grammar


def p_start(p):
'''
start : equal_ab
| non_equal_ab
| empty
'''
pass
def p_equal_ab(p):
'''
equal_ab : equal_ab A B
| equal_ab B A
| AB
'''
pass

def p_non_equal_ab(p):
'''
non_equal_ab : non_equal_ab A
| non_equal_ab B
| BA
'''
pass

def p_AB(p):
'''
AB : A B
'''
pass

def p_BA(p):
'''
BA : B A
'''
pass

def p_empty(p):
'empty :'
pass
# Error handling rule for syntax errors
def p_error(p):
raise Exception("Syntax error in input!")

# Create the parser


parser = yacc.yacc()

# Function to check if a string belongs to the grammar


def check_string(input_string):
lexer.input(input_string)
try:
parser.parse(lexer=lexer)
return f"'{input_string}' belongs to the grammar."
except Exception as e:
return f"'{input_string}' does not belong to the grammar: {e}"

# Test the program


while True:
input_string = input("Enter a string (or 'exit' to quit): ")
if input_string.lower() == 'exit':
break

result = check_string(input_string)
print(result)

OUTPUT:
EXPERIMENT-3

AIM: Write a program to check whether a string includes a keyword or


not.

THEORY:
CODE:
import ply.lex as lex
import ply.yacc as yacc

# Define the list of keywords you want to check for


keywords = {'if', 'else', 'while', 'for', 'break', 'continue', 'return'}

# Define the list of tokens


tokens = ['ID'] + [keyword.upper() for keyword in keywords]

# Regular expression rules for tokens


t_ignore = ' \t\n'

# Define a rule to match keywords


def t_KEYWORD(t):
r'(if|else|while|for|break|continue|return)'
t.type = t.value.upper() # Convert keyword to uppercase
return t

# Error handling rule for invalid characters


def t_error(t):
print(f"Illegal character '{t.value[0]}'")
t.lexer.skip(1)

# Build the lexer


lexer = lex.lex()

# Define the grammar for an input containing keywords


def p_input(p):
'''
input : statement
| empty
'''
pass

def p_statement(p):
'''
statement : ID
| IF
| ELSE
| WHILE
| FOR
| BREAK
| CONTINUE
| RETURN
'''
print(f"'{p[1]}' is a keyword")

def p_empty(p):
'empty :'
pass

# Error handling rule for syntax errors


def p_error(p):
print(f"Syntax error at '{p.value}'")

# Build the parser


parser = yacc.yacc()

# Function to check for keywords in an input string


def check_keywords(input_string):
parser.parse(input_string, lexer=lexer)

# Test the program


while True:
input_string = input("Enter an input string (or 'exit' to quit): ")
if input_string.lower() == 'exit':
break

check_keywords(input_string)

OUTPUT:
EXPERIMENT-4

AIM: Write a program to remove left recursion from a Grammar.

THEORY:
CODE:
import ply.lex as lex
import ply.yacc as yacc

# Lexer tokens
tokens = (
'ID', # Non-terminals
'ARROW', # Arrow (->)
'OR', #|
)

# Token regex rules


t_ARROW = r'->'
t_OR = r'\|'

# Ignore spaces and tabs


t_ignore = " \t"

# Rule for non-terminal IDs (single uppercase letter)


def t_ID(t):
r'[A-Z]'
return t

# Rule for new lines


def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)

# Error handling
def t_error(t):
print(f"Illegal character '{t.value[0]}'")
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()

# Dictionary to store grammar rules


grammar = {}
def p_grammar(p):
"""grammar : grammar production
| production"""
pass

def p_production(p):
"""production : ID ARROW rules"""
non_terminal = p[1]
grammar[non_terminal] = p[3]

def p_rules(p):
"""rules : rules OR rule
| rule"""
if len(p) == 4:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]

def p_rule(p):
"""rule : ID"""
p[0] = p[1]

def p_epsilon(p):
"""rule : epsilon"""
p[0] = []

def p_error(p):
if p:
print(f"Syntax error at '{p.value}'")
else:
print("Syntax error at EOF")

# Build the parser


parser = yacc.yacc()

# Function to remove left recursion from the grammar


def remove_left_recursion(grammar):
new_grammar = {}

for non_terminal, productions in grammar.items():


alpha_productions = []
beta_productions = []
new_non_terminal = non_terminal + "'"

for production in productions:


if production.startswith(non_terminal):
alpha_productions.append(production[len(non_terminal):] + new_non_terminal)
else:
beta_productions.append(production + new_non_terminal)

if alpha_productions:
new_grammar[non_terminal] = [beta + new_non_terminal for beta in beta_productions]
new_grammar[new_non_terminal] = [alpha for alpha in alpha_productions] + ['ε']
else:
new_grammar[non_terminal] = productions

return new_grammar

# Function to print the grammar


def print_grammar(grammar):
for non_terminal, productions in grammar.items():
print(f"{non_terminal} -> {' | '.join(productions)}")

if __name__ == "__main__":
input_data = """
A -> A a | B
B -> b
"""

lexer.input(input_data)

# Parsing the grammar


parser.parse(input_data)

print("Original Grammar:")
print_grammar(grammar)

new_grammar = remove_left_recursion(grammar)

print("\nGrammar after removing left recursion:")


print_grammar(new_grammar)

OUTPUT:

You might also like