AI & Soft Computing
AI & Soft Computing
Artificial Intelligence
& Soft Computing Laboratory
(PGCA-1929)
19 Maximize the function fix=r using GA, where x ranges form 0-25. 49-50
Perform 6 iterations.
[1]
Practical-1
Use logic programming in Python to check for prime numbers.
Prime numbers:
If the natural number is greater than 1 and having no positive divisors other than 1 and the
number itself etc.
For example: 3, 7, 11 etc are prime numbers.
CODE:
# A default function for Prime checking
conditions def PrimeChecker(a):
# Checking that given number is more than 1
if a > 1:
# Iterating over the given number with for
loop for j in range(2, int(a/2) + 1):
# If the given number is divisible or
not if (a % j) == 0:
print(a, "is not a prime
number") break
# Else it is a prime number
else:
print(a, "is a prime
number") # If the given number
is 1
else:
print(a, "is not a prime number")
# Taking an input number from the user
a = int(input("Enter an input number:"))
# Printing result
PrimeChecker(a)
OUTPUT:
Enter an input number: 17
17 is a prime number
[2]
Practical-2
Logic programming using python parse a family tree and infer the relationship between the
family member .
Import json
from logpy import Relation, facts, run, conde, var, eq
# Check if 'x' is the parent of
'y' def parent(x, y):
return conde([father(x, y)], [mother(x, y)])
# Check if 'x' is the grandparent of
'y' def grandparent(x, y):
temp = var()
return conde((parent(x, temp), parent(temp, y)))
# Check for sibling relationship between 'a' and
'b' def sibling(x, y):
temp = var()
return conde((parent(temp, x), parent(temp, y)))
# Check if x is y's
uncle def uncle(x, y):
temp = var()
return conde((father(temp, x), grandparent(temp, y)))
if __name ==' main ':
father = Relation()
mother = Relation()
with open('relationships.json') as f:
d = json.loads(f.read())
for item in d['father']:
facts(father, (list(item.keys())[0], list(item.values())[0]))
for item in d['mother']:
facts(mother, (list(item.keys())[0], list(item.values())[0]))
x = var()
# John's children
name = 'John'
output = run(0, x, father(name, x))
print("\nList of " + name + "'s
children:") for item in output:
print(item)
# William's mother
name = 'William'
output = run(0, x, mother(x, name))[0]
print("\n" + name + "'s mother:\n" + output)
[3]
# Adam's parents
[4]
name = 'Adam'
output = run(0, x, parent(x, name))
print("\nList of " + name + "'s
parents:") for item in output:
print(item)
# Wayne's grandparents
name = 'Wayne'
output = run(0, x, grandparent(x, name))
print("\nList of " + name + "'s grandparents:")
for item in output:
print(item)
# Megan's
grandchildren name =
'Megan'
output = run(0, x, grandparent(name, x))
print("\nList of " + name + "'s grandchildren:")
for item in output:
print(item)
# David's siblings
name = 'David'
output = run(0, x, sibling(x, name))
siblings = [x for x in output if x !=
name] print("\nList of " + name + "'s
siblings:") for item in siblings:
print(item)
# Tiffany's uncles
name = 'Tiffany'
name_father = run(0, x, father(x, name))
[0] output = run(0, x, uncle(x, name))
output = [x for x in output if x !=
name_father] print("\nList of " + name + "'s
uncles:")
for item in output:
print(item)
# All spouses
a, b, c = var(), var(), var()
output = run(0, (a, b), (father, a, c), (mother, b,
c)) print("\nList of all spouses:")
for item in output:
print('Husband:', item[0], '<==> Wife:', item[1])
[5]
Practical-3
Python script for building a puzzle
solver. CODE:
import pygame, sys, random
from pygame.locals import
*
# Create the constants (go ahead and experiment with different
values) BOARDWIDTH = 4 # number of columns in the board
BOARDHEIGHT = 4 # number of rows in the board
TILESIZE = 80
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
FPS = 30
BLANK = None
# R G B
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BRIGHTBLUE = ( 0, 50, 255)
DARKTURQUOISE = ( 3, 54, 73)
BLUE = ( 0, 50, 255)
GREEN = ( 0, 128, 0)
RED = (255, 0, 0)
BGCOLOR = DARKTURQUOISE
TILECOLOR = BLUE
TEXTCOLOR = WHITE
BORDERCOLOR = RED
BASICFONTSIZE = 20
TEXT = GREEN
BUTTONCOLOR = WHITE
BUTTONTEXTCOLOR =
BLACK MESSAGECOLOR =
WHITE
XMARGIN = int((WINDOWWIDTH - (TILESIZE * BOARDWIDTH
+ (BOARDWIDTH - 1))) / 2)
YMARGIN = int((WINDOWHEIGHT - (TILESIZE * BOARDHEIGHT
+ (BOARDHEIGHT - 1))) / 2)
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, RESET_SURF, RESET_RECT,
NEW_SURF, NEW_RECT, SOLVE_SURF, SOLVE_RECT
pygame.init()
FPSCLOCK = pygame.time.Clock()
[6]
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH,
WINDOWHEIGHT))
[7]
pygame.display.set_caption('Slide Puzzle')
BASICFONT = pygame.font.Font('freesansbold.ttf', BASICFONTSIZE)
# Store the option buttons and their rectangles in OPTIONS.
RESET_SURF, RESET_RECT = makeText('Reset', TEXT, BGCOLOR,
WINDOWWIDTH - 120, WINDOWHEIGHT - 310)
NEW_SURF, NEW_RECT = makeText('New Game', TEXT, BGCOLOR,
WINDOWWIDTH - 120, WINDOWHEIGHT - 280)
SOLVE_SURF, SOLVE_RECT = makeText('Solve', TEXT, BGCOLOR,
WINDOWWIDTH - 120, WINDOWHEIGHT - 250)
mainBoard, solutionSeq = generateNewPuzzle(80)
SOLVEDBOARD = getStartingBoard() # a solved board is the same as the board in a
start state.
allMoves = [] # list of moves made from the solved
configuration while True: # main game loop
slideTo = None # the direction, if any, a tile should slide
msg = 'Click tile or press arrow keys to slide.' # contains the message to show in the
upper left corner.
if mainBoard == SOLVEDBOARD:
msg = 'Solved!'
drawBoard(mainBoard, msg)
checkForQuit()
for event in pygame.event.get(): # event handling
loop if event.type == MOUSEBUTTONUP:
spotx, spoty = getSpotClicked(mainBoard, event.pos[0], event.pos[1])
if (spotx, spoty) == (None, None):
# check if the user clicked on an option button
if RESET_RECT.collidepoint(event.pos):
resetAnimation(mainBoard, allMoves) # clicked on Reset
button allMoves = []
elif NEW_RECT.collidepoint(event.pos):
mainBoard, solutionSeq = generateNewPuzzle(80) # clicked on New Game
button
allMoves = []
elif SOLVE_RECT.collidepoint(event.pos):
resetAnimation(mainBoard, solutionSeq + allMoves) # clicked on Solve
button
allMoves = []
else:
# check if the clicked tile was next to the blank
spot blankx, blanky =
getBlankPosition(mainBoard)
if spotx == blankx + 1 and spoty == blanky:
slideTo = LEFT
elif spotx == blankx - 1 and spoty == blanky:
slideTo = RIGHT
elif spotx == blankx and spoty == blanky + 1:
slideTo = UP
[8]
eli eTo = DOWN
f
s
p
o
t
x
=
=
b
l
a
n
k
x
a
n
d
s
p
o
t
y
=
=
b
l
a
n
k
y
1
:
s
l
i
d
[9]
elif event.type == KEYUP:
# check if the user pressed a key to slide a tile
if event.key in (K_LEFT, K_a) and isValidMove(mainBoard,
LEFT): slideTo = LEFT
elif event.key in (K_RIGHT, K_d) and isValidMove(mainBoard, RIGHT):
slideTo = RIGHT
elif event.key in (K_UP, K_w) and isValidMove(mainBoard,
UP): slideTo = UP
elif event.key in (K_DOWN, K_s) and isValidMove(mainBoard, DOWN):
slideTo = DOWN
if slideTo:
slideAnimation(mainBoard, slideTo, 'Click tile or press arrow keys to slide.', 8) #
show slide on screen
makeMove(mainBoard, slideTo)
allMoves.append(slideTo) # record the slide
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT
events terminate() # terminate if any QUIT events are
present
for event in pygame.event.get(KEYUP): # get all the KEYUP
events if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects
back
def getStartingBoard():
# Return a board data structure with tiles in the solved state.
# For example, if BOARDWIDTH and BOARDHEIGHT are both 3, this function
# returns [[1, 4, 7], [2, 5, 8], [3, 6, BLANK]]
counter = 1
board = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(counter)
counter += BOARDWIDTH
board.append(column)
counter -= BOARDWIDTH * (BOARDHEIGHT - 1) + BOARDWIDTH - 1
board[BOARDWIDTH-1][BOARDHEIGHT-1] = BLANK
return board
def getBlankPosition(board):
# Return the x and y of board coordinates of the blank space.
for x in range(BOARDWIDTH):
[10]
for y in range(BOARDHEIGHT):
if board[x][y] == BLANK:
return (x, y)
[11]
def makeMove(board, move):
# This function does not check if the move is
valid. blankx, blanky = getBlankPosition(board)
if move == UP:
board[blankx][blanky], board[blankx][blanky + 1]
= board[blankx][blanky + 1],
board[blankx][blanky]
elif move == DOWN:
board[blankx][blanky], board[blankx][blanky - 1]
= board[blankx][blanky - 1],
board[blankx][blanky]
elif move == LEFT:
board[blankx][blanky], board[blankx + 1][blanky]
= board[blankx + 1][blanky],
board[blankx][blanky]
elif move == RIGHT:
board[blankx][blanky], board[blankx - 1][blanky]
= board[blankx - 1][blanky],
board[blankx][blanky]
def isValidMove(board, move):
blankx, blanky = getBlankPosition(board)
return (move == UP and blanky != len(board[0]) - 1) or
\ (move == DOWN and blanky != 0) or \
(move == LEFT and blankx != len(board) - 1) or
\ (move == RIGHT and blankx != 0)
def getRandomMove(board,
lastMove=None): # start with a full list of
all four moves
validMoves = [UP, DOWN, LEFT, RIGHT]
# remove moves from the list as they are disqualified
if lastMove == UP or not isValidMove(board,
DOWN): validMoves.remove(DOWN)
if lastMove == DOWN or not isValidMove(board,
UP): validMoves.remove(UP)
if lastMove == LEFT or not isValidMove(board,
RIGHT): validMoves.remove(RIGHT)
if lastMove == RIGHT or not isValidMove(board,
LEFT): validMoves.remove(LEFT)
# return a random move from the list of remaining moves
return random.choice(validMoves)
def getLeftTopOfTile(tileX, tileY):
left = XMARGIN + (tileX * TILESIZE) + (tileX -
1) top = YMARGIN + (tileY * TILESIZE) + (tileY
- 1) return (left, top)
def getSpotClicked(board, x, y):
# from the x & y pixel coordinates, get the x & y board
coordinates for tileX in range(len(board)):
[12]
for tileY in range(len(board[0])):
left, top = getLeftTopOfTile(tileX, tileY)
tileRect = pygame.Rect(left, top, TILESIZE,
TILESIZE) if tileRect.collidepoint(x, y):
return (tileX, tileY)
return (None, None)
[13]
def drawTile(tilex, tiley, number, adjx=0, adjy=0):
# draw a tile at board coordinates tilex and tiley, optionally a
few # pixels over (determined by adjx and adjy)
left, top = getLeftTopOfTile(tilex, tiley)
pygame.draw.rect(DISPLAYSURF, TILECOLOR, (left + adjx, top + adjy, TILESIZE,
TILESIZE))
textSurf = BASICFONT.render(str(number), True, TEXTCOLOR)
textRect = textSurf.get_rect()
textRect.center = left + int(TILESIZE / 2) + adjx, top + int(TILESIZE / 2) + adjy
DISPLAYSURF.blit(textSurf, textRect)
def makeText(text, color, bgcolor, top, left):
# create the Surface and Rect objects for some text.
textSurf = BASICFONT.render(text, True, color, bgcolor)
textRect = textSurf.get_rect()
textRect.topleft = (top, left)
return (textSurf, textRect)
def drawBoard(board, message):
DISPLAYSURF.fill(BGCOLOR)
if message:
textSurf, textRect = makeText(message, MESSAGECOLOR, BGCOLOR, 5, 5)
DISPLAYSURF.blit(textSurf, textRect)
for tilex in range(len(board)):
for tiley in range(len(board[0])):
if board[tilex][tiley]:
drawTile(tilex, tiley, board[tilex][tiley])
left, top = getLeftTopOfTile(0, 0)
width = BOARDWIDTH * TILESIZE
height = BOARDHEIGHT *
TILESIZE
pygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (left - 5, top - 5, width + 11,
height + 11), 4)
DISPLAYSURF.blit(RESET_SURF, RESET_RECT)
DISPLAYSURF.blit(NEW_SURF, NEW_RECT)
DISPLAYSURF.blit(SOLVE_SURF, SOLVE_RECT)
def slideAnimation(board, direction, message,
animationSpeed): # Note: This function does not check if the
move is valid. blankx, blanky = getBlankPosition(board)
if direction == UP:
movex = blankx
movey = blanky + 1
elif direction == DOWN:
movex = blankx
movey = blanky - 1
elif direction ==
LEFT: movex =
blankx + 1 movey =
[14]
blanky
elif direction == RIGHT:
movex = blankx - 1
[15]
movey = blanky
# prepare the base surface
drawBoard(board, message)
baseSurf = DISPLAYSURF.copy()
# draw a blank space over the moving tile on the baseSurf
Surface. moveLeft, moveTop = getLeftTopOfTile(movex,
movey)
pygame.draw.rect(baseSurf, BGCOLOR, (moveLeft, moveTop, TILESIZE,
TILESIZE))
for i in range(0, TILESIZE, animationSpeed):
# animate the tile sliding over
checkForQuit()
DISPLAYSURF.blit(baseSurf, (0, 0))
if direction == UP:
drawTile(movex, movey, board[movex][movey], 0, -i)
if direction == DOWN:
drawTile(movex, movey, board[movex][movey], 0, i)
if direction == LEFT:
drawTile(movex, movey, board[movex][movey], -i, 0)
if direction == RIGHT:
drawTile(movex, movey, board[movex][movey], i, 0)
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateNewPuzzle(numSlides):
# From a starting configuration, make numSlides number of moves
(and # animate these moves).
sequence = []
board = getStartingBoard()
drawBoard(board, '')
pygame.display.update()
pygame.time.wait(500) # pause 500 milliseconds for effect
lastMove = None
for i in range(numSlides):
move = getRandomMove(board, lastMove)
slideAnimation(board, move, 'Generating new puzzle...', animationSpeed=int(TILESIZE /
3))
makeMove(board, move)
sequence.append(move)
lastMove = move
return (board, sequence)
def resetAnimation(board, allMoves):
# make all of the moves in allMoves in reverse.
revAllMoves = allMoves[:] # gets a copy of the list
revAllMoves.reverse()
10
elif move == DOWN:
10
oppositeMove = UP
elif move == RIGHT:
oppositeMove = LEFT
elif move == LEFT:
oppositeMove = RIGHT
slideAnimation(board, oppositeMove, '', animationSpeed=int(TILESIZE /
2)) makeMove(board, oppositeMove)
if name == ' main ':
main()
OUTPUT:
11
Practical-4
Implementation of uninformed search techniques in Python.
Uninformed Search Algorithms
Uninformed search is a class of general-purpose search algorithms which operates in brute
force-way. Uninformed search algorithms do not have additional information about state or
search space other than how to traverse the tree, so it is also called blind search.
Following are the various types of uninformed search algorithms:
1. Breadth-first Search
2. Depth-first Search
3. Depth-limited Search
4. Iterative deepening depth-first search
5. Uniform cost search
6. Bidirectional Search
1. Breadth-first Search:
Breadth-first search is the most common search strategy for traversing a tree or graph.
This algorithm searches breadth wise in a tree or graph, so it is called breadth-first
search.
BFS algorithm starts searching from the root node of the tree and expands all successor
node at the current level before moving to nodes of next level.
The breadth-first search algorithm is an example of a general-graph search algorithm.
Breadth-first search implemented using FIFO queue data structure.
Example:
In the below tree structure, we have shown the traversing of the tree using BFS algorithm
from the root node S to goal node K. BFS search algorithm traverse in layers, so it will follow
the path which is shown by the dotted arrow, and the traversed path will be:
1. S---> A--->B---->C--->D---->G--->H--->E---->F---->I---->K
12
Time Complexity: Time Complexity of BFS algorithm can be obtained by the number of
nodes traversed in BFS until the shallowest Node. Where the d= depth of shallowest solution
and b is a node at every state.
T (b) = 1+b2+b3+ + bd= O (bd)
Space Complexity: Space complexity of BFS algorithm is given by the Memory size of
frontier which is O(bd).
Completeness: BFS is complete, which means if the shallowest goal node is at some finite
depth, then BFS will find a solution.
Optimality: BFS is optimal if path cost is a non-decreasing function of the depth of the
node. CODE:
graph = {
'5' : ['3','7'],
'3' : ['2', '4'],
'7' : ['8'],
'2' : [],
'4' : ['8'],
'8' : []
}
13
print (m, end = " ")
# Driver Code
print("Following is the Breadth-First Search")
bfs(visited, graph, '5') # function calling
OUTPUT:
Following is the Breadth-First Search
537248
14
Practical-5
Implementation of heuristic search techniques in Python.
problem.addVariable('a', range(10))
problem.addVariable('b', range(10))
problem.addConstraint(lambda a, b: a * 2 ==
b) solutions = problem.getSolutions()
print (solutions)
[{'a': 4, 'b': 8}, {'a': 3, 'b': 6}, {'a': 2, 'b': 4}, {'a': 1, 'b': 2}, {'a': 0, 'b': 0}]
def magic_square(matrix_ms):
iSize = len(matrix_ms[0])
sum_list = []
for col in range(iSize):
sum_list.append(sum(row[col] for row in matrix_ms))
sum_list.extend([sum (lines) for lines in matrix_ms])
dlResult = 0
for i in range(0,iSize):
dlResult +=matrix_ms[i][i]
sum_list.append(dlResult)
drResult = 0
for i in range(iSize-1,-1,-1):
drResult +=matrix_ms[i][i]
sum_list.append(drResult)
if len(set(sum_list))>1:
return False
15
return True
OUTPUT:
print(magic_square([[1,2,3], [4,5,6], [7,8,9]]))
You can observe that the output would be False as the sum is not up to the same number.
You can observe that the output would be True as the sum is the same number, that
is 15 here.
16
Practical-6
Python script for tokenizing text
data. CODE:
Line Tokenization
In the below example we divide a given text into different lines by using the function
sent_tokenize.
import nltk
sentence_data = "The First sentence is about Python. The Second: about Django. You
can learn Python,Django and Data Ananlysis here. "
nltk_tokens = nltk.sent_tokenize(sentence_data)
print (nltk_tokens)
OUTPUT:
Non-English Tokenization
In the below example we tokenize the German text.
import nltk
german_tokenizer = nltk.data.load('tokenizers/punkt/german.pickle')
german_tokens=german_tokenizer.tokenize('Wie geht es Ihnen? Gut, danke.')
print(german_tokens)
OUTPUT:
Word Tokenzitaion
We tokenize the words using word_tokenize function available as part of
nltk. import nltk
word_data = "It originated from the idea that there are readers who prefer learning new skills
from the comforts of their drawing rooms"
nltk_tokens = nltk.word_tokenize(word_data)
17
print (nltk_tokens)
OUTPUT:
When we run the above program we get the following output −
['It', 'originated', 'from', 'the', 'idea', 'that', 'there', 'are', 'readers',
'who', 'prefer', 'learning', 'new', 'skills', 'from', 'the',
'comforts', 'of', 'their', 'drawing', 'rooms']
18
Practical-7
Extracting the frequency of terms using a Bag of Words
model Code:
1. A Quick Example
Let’s look at an easy example to understand the concepts previously explained. We could be
interested in analyzing the reviews about Game of Thrones:
Review 1: Game of Thrones is an amazing tv
series! Review 2: Game of Thrones is the best tv
series!
Review 3: Game of Thrones is so great
In the table, I show all the calculations to obtain the Bag-Of-Words approach:
Each row corresponds to a different review, while the rows are the unique words, contained in
the three documents.
2. Implementation with Python
Let’s import the libraries and define the variables, that contain the
reviews: import pandas as pd
import numpy as np
import collections
doc1 = 'Game of Thrones is an amazing tv
series!' doc2 = 'Game of Thrones is the best tv
series!' doc3 = 'Game of Thrones is so great'
19
We need to remove punctuations, one of the steps I showed in the previous post about the
text pre-processing. We also transform the string into a list composed of words.
After we achieve the Vocabulary, or wordset, which is composed of the unique words found
in the three reviews.
wordset12 = np.union1d(l_doc1,l_doc2)
wordset = np.union1d(wordset12,l_doc3)
print(wordset)
We can finally define the function to extract the features in each document. Let’s explain step
by step:
we define a dictionary with the specified keys, which corresponds to the words of
the Vocabulary, and the specified value is 0.
we iterate over the words contained only in the document and we assign to each word
its frequency within the review.
def calculateBOW(wordset,l_doc):
tf_diz = dict.fromkeys(wordset,0)
for word in l_doc:
20
tf_diz[word]=l_doc.count(word)
return tf_diz
We can finally obtain the Bag-of-Words representations for the reviews. In the end, we
obtain a data frame, where each row corresponds to the extracted features of each document.
bow1 = calculateBOW(wordset,l_doc1)
bow2 = calculateBOW(wordset,l_doc2)
bow3 = calculateBOW(wordset,l_doc3)
df_bow = pd.DataFrame([bow1,bow2,bow3])
df_bow.head()
21
CountVectorizer provides the get_features_name method, which contains the uniques words
of the vocabulary, taken into account later to create the desired document-term matrix X. To
have an easier visualization, we transform it into a pandas data frame.
X = vectorizer.fit_transform([doc1,doc2,doc3])
df_bow_sklearn = pd.DataFrame(X.toarray(),columns=vectorizer.get_feature_names())
df_bow_sklearn.head()
22
Results obtained with Sklearn
23
We can also do another experiment. One possibility is to take into account the bigrams,
instead of the unigrams. For example, the two words, “tv series”, match very well together
and are repeated in every review:
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(stop_words='english',ngram_range=(2,2))
X = vectorizer.fit_transform([doc1,doc2,doc3])
df_bow_sklearn = pd.DataFrame(X.toarray(),columns=vectorizer.get_feature_names())
df_bow_sklearn.head()
24
Isn’t the combination of words interesting? It seems to make sense for “tv series”, while
“game thrones” bigram loses the meaning and the word “of” since it’s a stop word. So, in
some context, remove all the stop words isn’t always convenient.
25
Practical-8
Predict the category to which a given piece of text
belongs. Code:
#import sentimentintensityAnalyzer class
#from VaderSentiment.VaderSentiment import
SentimentIntensityAnalyzer from nltk.sentiment.vader import
SentimentIntensityAnalyzer
import nltk
from PIL import Image
#nltk.download()
from tkinter import*
root=Tk()
root.title("Sentiment Analysis")
root.configure(background='#FFE4C4')
label1=Label(root,text="SENTIMENT
ANALYSIS!",background='#FFE4C4',fg="#D2691E",font="Arial 50 bold").place(x=380,y=30)
label1=Label(root,text="Text:",background='#FFE4C4',fg='#D2691E',font="Arial 35
bold").place(x=550,y=240)
large_font=('verdana',30)
entry1Var=StringVar(value='large_font')
ent1=Entry(root,font='large_font')
ent1.place(x=750,y=250)
analyzer= SentimentIntensityAnalyzer()
#function is prime sentiment
def sentiment_scores():
sentence = ent1.get()
sid_obj=SentimentIntensityAnalyzer()
sentiment_dict=sid_obj.polarity_scores(sentence)
print("Overall Sentiment Dictionary is:",sentiment_dict)
print("Sentence was rated as:",sentiment_dict['neg']*100,"% Negative")
26
print("Sentence was rated as:",sentiment_dict['neu']*100,"% Neutral")
27
print("Sentence was rated as:",sentiment_dict['pos']*100,"% Positive")
print("Sentence was overall rated as:",end=' ')
if sentiment_dict['compound']>=0.05:
print("Positive")
result="Positive"
elif sentiment_dict['compound']<=-0.05:
print("Negative")
result="Negative"
else:
print("Neutral")
result="Neutral"
messagebox.showinfo(title='Result',message="Sentence was overall rated as:%s"%(result))
button1=Button(root,text="predict",command=sentiment_scores,width=10,background='#A052
2D',font="Arial 15",fg="#D2691E")
button1.place(x=600,y=400)
button2=Button(root,text="Quit",command=root.destroy,width=10,background='#A0522D',fon
t="Arial 15",fg="#D2691E")
button2.place(x=800,y=400)
root.mainloop()
OUTPUT:
28
Practical-9
Python code for visualizing audio speech
signal. Code:
from google.colab import
drive drive.mount('/GD')
!pip install librosa
# Load imports
import IPython.display as ipd
import librosa
import librosa.display
import matplotlib.pyplot as
plt In [0]:
ipd.Audio('/GD/My Drive/.../audio/numb.m4a')
In [0]:
ipd.Audio('/GD/My Drive/.../audio/Michael Jackson
Dangerous.m4a') In [0]:
ipd.Audio('/GD/My Drive/.../audio/BlueOneLove.m4a')
In [0]:
# Numb - Linkin Park
filename1 = '/GD/My Drive/../audio/numb.m4a'
plt.figure(figsize=(15,4))
data1,sample_rate1 = librosa.load(filename1, sr=22050, mono=True, offset=0.0, duration=50,
res_type='kaiser_best')
librosa.display.waveplot(data1,sr=sample_rate1, max_points=50000.0, x_axis='time', offset=0.0,
max_sr=1000)
Out[0]:
<matplotlib.collections.PolyCollection at 0x7f13feecc278>
In [0]:
print(data1)
print(len(data1))
print(sample_rate1)
[0. 0. 0. ... 0.12693058 0.10350046 0.28775436]
1102500
22050
In [0]:
# Dangerous - Michael Jackson
29
filename2 = '/GD/My Drive/.../audio/Michael Jackson
Dangerous.m4a' plt.figure(figsize=(15,4))
data2,sample_rate2 = librosa.load(filename2, sr=22050, mono=True, offset=0.0, duration=180,
res_type='kaiser_best')
librosa.display.waveplot(data2,sr=sample_rate2, max_points=50000.0, x_axis='time', offset=0.0,
max_sr=1000)
Out[0]:
<matplotlib.collections.PolyCollection at 0x7f13fee24898>
In [0]:
print(data2)
print(len(data2))
print(sample_rate2)
[0. 0. 0. ... 0.18555094 0.19224082 0.13644062]
3969000
22050
In [0]:
# One Love - Blue
filename3 = '/GD/My Drive/.../audio/BlueOneLove.m4a'
plt.figure(figsize=(15,4))
data3,sample_rate3 = librosa.load(filename3, mono=True, offset=0.0, duration=180,
res_type='kaiser_best')
librosa.display.waveplot(data3,sr=sample_rate3, max_points=50000.0, x_axis='time', offset=0.0,
max_sr=1000)
Out[0]:
<matplotlib.collections.PolyCollection at 0x7f13fed6cc18>
In [0]:
print(data3)
print(len(data3))
print(sample_rate3)
[0. 0. 0. ... 0.16682993 0.15884563 0.18074134]
1191424
22050
21
0
Practical-10
Python code for generating audio
signals Code:
Output:
How to play the audio the generated audio file on computer ?
1. Command line using SoX
play -t raw -r 44.1k -e signed -b 8 -c 1 test.wav
where -r = sampling rate -b = sampling precision (bits) -c = number of channels
2. Use Audacity (check video)
30
31
Practical-11
Create a perceptron with appropriate no. of input and output. Train it using fixed increment
learning algorithm until no change in weights is required. Output the final weights.
# Make a prediction with
weights def predict(row,
weights):
activation = weights[0]
for i in range(len(row)-1):
activation += weights[i + 1] *
row[i] return 1.0 if activation >= 0.0 else
0.0
# Estimate Perceptron weights using stochastic gradient
descent def train_weights(train, l_rate, n_epoch):
weights = [0.0 for i in range(len(train[0]))]
for epoch in range(n_epoch):
sum_error = 0.0
for row in train:
prediction = predict(row, weights)
error = row[-1] - prediction
sum_error += error**2
weights[0] = weights[0] + l_rate * error
for i in range(len(row)-1):
weights[i + 1] = weights[i + 1] + l_rate * error * row[i]
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))
return weights
# Calculate weights
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
l_rate = 0.1
n_epoch = 5
weights = train_weights(dataset, l_rate, n_epoch)
print(weights)
Output:
>epoch=0, lrate=0.100, error=2.000
>epoch=1, lrate=0.100, error=1.000
>epoch=2, lrate=0.100, error=0.000
>epoch=3, lrate=0.100, error=0.000
>epoch=4, lrate=0.100, error=0.000
32
[-0.1, 0.20653640140000007, -0.23418117710000003]
33
Practical-12
Implement AND function using ADALINE with bipolar inputs and outputs.
import numpy as np
# the features for the or model , here we have
# taken the possible values for combination
of # two inputs
features =
np.array( [
[-1, -1],
[-1, 1],
[1, -1],
[1, 1]
])
# labels for the and model, here the output for
# the features is taken as an array
labels = np.array([1, -1, -1, -1])
# to print the features and the labels for
# which the model has to be trained
print(features, labels)
# initialise weights, bias , learning rate, epoch
weight = [0.6, 0.5]
bias = 0.2
learning_rate = 0.3
epoch = 2
for i in range(epoch):
# epoch is the number of the the model is trained
# with the same data
print("epoch :", i+1)
# variable to check if there is no change in
previous # weight and present calculated weight
34
# initial error is kept as 0
sum_squared_error = 0.0
# for each of the possible input given in the
features for j in range(features.shape[0]):
# actual output to be obtained
actual = labels[j]
# the value of two features as given in the
features # array
x1 = features[j][0]
x2 = features[j][1]
# net unit value computation performed to obtain the
# sum of features multiplied with their weights
unit = (x1 * weight[0]) + (x2 * weight[1]) + bias
# error is computed so as to update the weights
error = actual - unit
# print statement to print the actual value , predicted
# value and the error
print("error =", error)
# summation of squared error is calculated
sum_squared_error += error * error
# updation of weights, summing up of product of learning rate
, # sum of squared error and feature value
weight[0] += learning_rate * error * x1
weight[1] += learning_rate * error * x2
# updation of bias, summing up of product of learning rate and
# sum of squared error
bias += learning_rate * error
print("sum of squared error = ", sum_squared_error/4, "\n\
n") Output:
[[-1 -1]
35
[-1 1]
[ 1 -1]
[ 1 1]] [ 1 -1 -1 -1]
epoch : 1
error = 1.9000000000000001
error = -1.67
error = -2.371
error = -0.5176999999999999
sum of squared error = 3.0721385725000006
epoch : 2
error = 1.2469899999999998
error = -1.097087
error = -0.7850131000000002
error = 0.88695703
sum of squared error = 1.0403805714767576
36
Practical-13
Implement AND function using MADALINE with bipolar inputs and outputs.
import numpy as np
# the features for the and model , here we have
# taken the possible values for combination of
# two inputs
features =
np.array( [
[-1, -1],
[-1, 1],
[1, -1],
[1, 1]
])
# labels for the and model, here the output for
# the features is taken as an array
labels = np.array([1, -1, -1, -1])
# to print the features and the labels for
# which the model has to be trained
print(features, labels)
# initialise weights, bias , learning rate, epoch
weight = [0.5, 0.5]
bias = 0.1
learning_rate = 0.2
epoch = 10
for i in range(epoch):
# epoch is the number of the the model is trained
# with the same data
print("epoch :", i+1)
# variable to check if there is no change in previous
37
# weight and present calculated weight
# initial error is kept as 0
sum_squared_error = 0.0
# for each of the possible input given in the
features for j in range(features.shape[0]):
# actual output to be obtained
actual = labels[j]
# the value of two features as given in the
features # array
x1 = features[j][0]
x2 = features[j][1]
# net unit value computation performed to obtain the
# sum of features multiplied with their weights
unit = (x1 * weight[0]) + (x2 * weight[1]) + bias
# error is computed so as to update the weights
error = actual - unit
# print statement to print the actual value , predicted
# value and the error
print("error =", error)
# summation of squared error is calculated
sum_squared_error += error * error
# updation of weights, summing up of product of learning rate
, # sum of squared error and feature value
weight[0] += learning_rate * error * x1
weight[1] += learning_rate * error * x2
# updation of bias, summing up of product of learning rate and
# sum of squared error
bias += learning_rate * error
print("sum of squared error = ", sum_squared_error/4, "\n\
n") Output:
38
[[-1 -1]
[-1 1]
[ 1 -1]
[ 1 1]] [ 1 -1 -1 -1]
epoch : 1
error = 1.9
error = -1.48
error = -1.776
error = -1.0688
sum of squared error = 2.52422736
epoch : 2
error = 1.19744
error = -0.972928
error = -0.9307136
error = 0.19269632000000003
sum of squared error = 0.8209527809376256
epoch : 3
error = 0.898243584
error = -0.7935019008
error = -0.7491738009599999
error = 0.565262385152
sum of squared error = 0.5793174377207398
epoch : 4
error = 0.7808850509824001
error = -0.7364650077388799
error = -0.716192009158656
error = 0.6728133676367873
sum of squared error = 0.5294427480312702
epoch : 5
error = 0.7374480972998247
error = -0.7198766979146055
error = -0.712504436233706
error = 0.7030911933443422
sum of squared error = 0.5160129885556626
epoch : 6
error = 0.7220737044184605
error = -0.715484545965144
error = -0.7131316632390716
error = 0.711374460062272
sum of squared error = 0.51197974041764
epoch : 7
error = 0.7168276156206818
39
error = -0.7144605661704628
error = -0.713785193666312
error = 0.7135644591164001
sum of squared error = 0.5107398177850964
epoch : 8
error = 0.7150930900389076
error = -0.7142727750325092
error = -0.7141001423040882
error = 0.7141189851216612
sum of squared error = 0.5103621656809864
epoch : 9
error = 0.7145356165072148
error = -0.7142600587995964
error = -0.7142229890073297
error = 0.7142513269114925
sum of squared error = 0.5102495037187852
epoch : 10
error = 0.7143611215465697
error = -0.7142711110129167
error = -0.7142659074971278
error = 0.71428015877592
sum of squared error = 0.5102167409596371
31
0
Practical-14
Construct a code and test auto associate network for input vector using HEBB rule
def hebbian_learning(samples):
print(f'{"INPUT":^5} {"TARGET":^9}{"WEIGHT CHANGES":^12}{"WEIGHTS":^16}')
w1, w2, b = 0, 0, 0
print(' ' * 45, f'({w1:2}, {w2:2}, {b:2})')
for x1, x2, y in
samples: w1 = w1 +
x1 * y
w2 = w2 + x2 *
yb=b+y
print(f'({x1:2}, {x2:2}) {y:2} ({x1:2}, {x2:2}, {y:2}) ({w1:2}, {w2:2}, {b:2})')
AND_samples = {
'binary_input_binary_output': [
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 0] ],
'binary_input_bipolar_output': [
[1, 1, 1],
[1, 0, -1],
[0, 1, -1],
[0, 0, -1] ],
'bipolar_input_bipolar_output': [
[ 1, 1, 1],
[ 1, -1, -1],
[-1, 1, -1],
[-1, -1, -1] ]
}
print('----------------', 'HEBBIAN LEARNING', '---------------')
40
print('AND with Binary Input and Binary Output')
hebbian_learning(AND_samples['binary_input_binary_output'])
print('AND with Binary Input and Bipolar Output')
hebbian_learning(AND_samples['binary_input_bipolar_output'])
print('AND with Bipolar Input and Bipolar Output')
hebbian_learning(AND_samples['bipolar_input_bipolar_output'])
Output:
---------------- HEBBIAN LEARNING --------------------
AND with Binary Input and Binary Output
INPUT TARGET WEIGHT CHANGES WEIGHTS
( 0, 0, 0)
( 1, 1) 1 ( 1, 1, 1) ( 1, 1, 1)
( 1, 0) 0 ( 1, 0, 0) ( 1, 1, 1)
( 0, 1) 0 ( 0, 1, 0) ( 1, 1, 1)
( 0, 0) 0 ( 0, 0, 0) ( 1, 1, 1)
AND with Binary Input and Bipolar Output
INPUT TARGET WEIGHT CHANGES WEIGHTS
( 0, 0, 0)
( 1, 1) 1 ( 1, 1, 1) ( 1, 1, 1)
( 1, 0) -1 ( 1, 0, -1) ( 0, 1, 0)
( 0, 1) -1 ( 0, 1, -1) ( 0, 0, -1)
( 0, 0) -1 ( 0, 0, -1) ( 0, 0, -2)
AND with Bipolar Input and Bipolar Output
INPUT TARGET WEIGHT CHANGES WEIGHTS
( 0, 0, 0)
( 1, 1) 1 ( 1, 1, 1) ( 1, 1, 1)
( 1,-1) -1 ( 1, -1, -1) ( 0, 2, 0)
(-1, 1) -1 (-1, 1, -1) ( 1, 1, -1)
(-1,-1) -1 (-1, -1, -1) ( 2, 2, -2)
`
41
Practical-15
Construct a code and test auto associate network for input vector using outer product rule.
Program
clc;
clear;
x=[–1 –1 –1 –1;–1 –1 1 1];
t=[1 1 1 1];
w=zeros (4, 4);
for i=1:2
w=w + x(i,1:4)'*x(i,1:4);
end
yin =
t*w; for
i=1:4
if yin(i)>0 y(i)=1;
else
y(i)=–1;
end
end
disp ('The calculated weight matrix');
disp (w);
if x(1,1:4)==y(1:4) | x(2,1:4)==y(1:4)
disp ('The vector is a Known Vector');
else
Output
The calculated weight
matrix 2 2 0 0
42
2 2 0 0
0 0 2 2
0 0 2 2
The vector is an unknown vector.
43
Practical-16
Construct and test hetero associative network for binary inputs and targets
Algorithm:
1. Enter input and output vector x and t
2. Initialize weight matrix.
3. Update weight matrix by using the formula wi(new)=wi(old)+xi*t
4. Display the calculated weight.
Program:
%Hetero-associative neural net for mapping input vectors to output
vectors clear;
clc;
x=[1 1 0 0 ; 1 0 1 0 ; 1 1 1 0 ; 0 1 1 0];
t=[1 0 ; 1 0 ; 0 1 ; 0 1];
w=zeros(4,2);
for i=1:4
w=w+x(i,1:4)’*t(i,1:2);
end
disp(‘ Weight matrix:’);
disp(w);
Output:
Weight matrix
2 1
1 2
1 2
0 0
44
Practical-17
Create a back propagation for a given input pattern perform 3 epochs of operation
# Calculate the derivative of an neuron output
def transfer_derivative(output):
return output * (1.0 - output)
45
Practical-18
Implement union, intersection, complement and difference operations on fuzzy sets. Also
create fuzzy relation by Cartesian product of any two fuzzy sets and perform maximin
composition on any two fuzzy relations.
1. Union :
Consider 2 Fuzzy Sets denoted by A and B, then let’s consider Y be the Union of them, then
for every member of A and B, Y will be:
degree_of_membership(Y)= max(degree_of_membership(A),
degree_of_membership(B)) EXAMPLE :
# Example to Demonstrate the
# Union of Two Fuzzy Sets
A = dict()
B = dict()
Y = dict()
2. Intersection :
Consider 2 Fuzzy Sets denoted by A and B, then let’s consider Y be the Intersection of them,
then for every member of A and B, Y will be:
46
EXAMPLE :
47
# Example to Demonstrate
# Intersection of Two Fuzzy
Sets A = dict()
B = dict()
Y = dict()
3. Complement :
Consider a Fuzzy Sets denoted by A , then let’s consider Y be the Complement of it, then
for every member of A , Y will be:
degree_of_membership(Y)= 1 -
degree_of_membership(A) EXAMPLE :
# Example to Demonstrate the
# Difference Between Two Fuzzy
Sets A = dict()
Y = dict()
A) for A_key in A:
Y[A_key]= 1-A[A_key]
48
print('Fuzzy Set Complement is :', Y)
Output
The Fuzzy Set is : {'a': 0.2, 'b': 0.3, 'c': 0.6, 'd': 0.6}
Fuzzy Set Complement is : {'a': 0.8, 'b': 0.7, 'c': 0.4, 'd': 0.4}
4. Difference :
Consider 2 Fuzzy Sets denoted by A and B, then let’s consider Y be the Intersection of them,
then for every member of A and B, Y will be:
degree_of_membership(Y)= min(degree_of_membership(A), 1-
degree_of_membership(B)) EXAMPLE :
# Example to Demonstrate the
# Difference Between Two Fuzzy
Sets A = dict()
B = dict()
Y = dict()
49
Practical-19
Practical of maximize the function f(x)=x2 using GA where x ranges from 0-25perfom 6
iterations
%program for Genetic algorithm to maximize the function f(x)
=sin(x) clear all;
clc;
%x ranges from 0 to 3.14
%five bits are enough to represent x in binary
representation n=input('Enter no. of population in each
iteration'); nit=input('Enter no. of iterations');
%Generate the initial
population
[oldchrom]=initbp(n,5);
%The population in binary is converted to
integer FieldD=[5;0;3.14;0;0;1;1]
for i=1:nit
phen=bindecod(oldchrom,FieldD,3);% phen gives the integer value of the
%binary population
%obtain fitness value
FitnV=sin(phen);
%apply roulette wheel selection
Nsel=4;
newchrix=selrws(FitnV, Nsel);
newchrom=oldchrom(newchrix,:);
%Perform Crossover
crossoverrate=1;
newchromc=recsp(newchrom,crossoverrate);%new population after crossover
%Perform mutation
vlub=0:31;
mutationrate=0.001;
newchromm=mutrandbin(newchromc,vlub,mutationrate);%new population
%after mutation
disp('For iteration');
i
disp('Population');
oldchrom
disp('X');
phen
disp('f(X)');
FitnV
oldchrom=newchromm;
end
Output:
41
0
50