0% found this document useful (0 votes)
13 views20 pages

Ai Final Lab Mannual

The document contains multiple Python programs addressing various algorithmic problems including the water jug problem, 4-Queens problem, A* search for pathfinding, minimax for two-player games, and image processing using OpenCV. It also includes implementations of machine learning classifiers such as Decision Trees and Naive Bayes, as well as K-Means clustering. Additionally, it covers NLP tasks using NLTK such as tokenization, stop word filtering, stemming, part of speech tagging, chunking, and named entity recognition.

Uploaded by

Manohar Gowda
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views20 pages

Ai Final Lab Mannual

The document contains multiple Python programs addressing various algorithmic problems including the water jug problem, 4-Queens problem, A* search for pathfinding, minimax for two-player games, and image processing using OpenCV. It also includes implementations of machine learning classifiers such as Decision Trees and Naive Bayes, as well as K-Means clustering. Additionally, it covers NLP tasks using NLTK such as tokenization, stop word filtering, stemming, part of speech tagging, chunking, and named entity recognition.

Uploaded by

Manohar Gowda
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

1. write python program to solve water jug problem.

from collections import deque


jug1_capacity = 4
jug2_capacity = 3
target_volume = 2
def water_jug_bfs():
queue = deque()
queue.append((0, 0))
visited = set()
visited.add((0, 0))

while queue:
(jug1, jug2) = queue.popleft()

if jug1 == target_volume or jug2 == target_volume:


print(f"Solution found: Jug1: {jug1}L, Jug2: {jug2}L")
return

actions = [
(jug1_capacity, jug2),
(jug1, jug2_capacity),
(0, jug2),
(jug1, 0),
(min(jug1 + jug2, jug1_capacity), max(0, jug2 - (jug1_capacity - jug1))),
(max(0, jug1 - (jug2_capacity - jug2)), min(jug1 + jug2, jug2_capacity))
]

for new_jug1, new_jug2 in actions:


if (new_jug1, new_jug2) not in visited:
queue.append((new_jug1, new_jug2))
visited.add((new_jug1, new_jug2))

print("No solution found")

water_jug_bfs()

Output-
Jug1: 0L, Jug2: 0L
Jug1: 0L, Jug2: 3L
Jug1: 3L, Jug2: 0L
Jug1: 3L, Jug2: 3L
Jug1: 4L, Jug2: 2L
Solution found!

2.write program to solve 4-Queens problem.

def print_board(board):
for row in board:
print(" ".join(row))
print()

def is_safe(board, row, col, n):


for i in range(row):
if board[i][col] == 'Q':
return False

for i, j in zip(range(row, -1, -1), range(col, -1, -1)):


if board[i][j] == 'Q':
return False

for i, j in zip(range(row, -1, -1), range(col, n)):


if board[i][j] == 'Q':
return False

return True

def solve_4_queens(board, row, n):

if row >= n:
print_board(board)
return True
res = False
for col in range(n):
if is_safe(board, row, col, n):
board[row][col] = 'Q'
res = solve_4_queens(board, row + 1, n) or res
board[row][col] = '.'

return res

def solve_n_queens(n):

board = [['.' for _ in range(n)] for _ in range(n)]

if not solve_4_queens(board, 0, n):


print("No solution exists")

if __name__ == "__main__":
n = 4 # Size of the chessboard (4x4)
solve_n_queens(n)

output-
3. write a python program to find optimum path from source to destination
using A* search.
import heapq
graph = {
'A': {'B': 1, 'C': 4},
'B': {'A': 1, 'C': 2, 'D': 5},
'C': {'A': 4, 'B': 2, 'D': 1},
'D': {'B': 5, 'C': 1, 'E': 3},
'E': {'D': 3}
}

heuristic = {
'A': 7,
'B': 6,
'C': 2,
'D': 1,
'E': 0
}

def a_star(start, goal):


queue = []
heapq.heappush(queue, (0, start, [start]))
cost_to_node = {start: 0}

while queue:
current_cost, current_node, path = heapq.heappop(queue)
if current_node == goal:
return path

for neighbor, travel_cost in graph[current_node].items():


new_cost = cost_to_node[current_node] + travel_cost

if neighbor not in cost_to_node or new_cost < cost_to_node[neighbor]:


cost_to_node[neighbor] = new_cost
estimated_total_cost = new_cost + heuristic[neighbor]
heapq.heappush(queue, (estimated_total_cost, neighbor, path +
[neighbor]))

return None

start_node = 'A'
goal_node = 'E'

path = a_star(start_node, goal_node)

if path:
print("Optimal path:", " -> ".join(path))
else:
print("No path found from start to goal.")

Output-
Optimal path: A -> B -> C -> D -> E
4. write python program to implement minmax search for 2 player games
def minimax(node, depth, is_maximizing):
if depth == 0 or isinstance(node, int):
return node
if is_maximizing:
best_value = float('-inf')
for child in node:
value = minimax(child, depth - 1, False)
best_value = max(best_value, value)
return best_value
else:
best_value = float('inf')
for child in node:
value = minimax(child, depth - 1, True)
best_value = min(best_value, value)
return best_value

game_tree = [
[3, 5, 6],
[9, 1, 2],
[0, -1, 10]
]
optimal_value = minimax(game_tree, depth=2, is_maximizing=True)
print("Optimal value for the maximizing player:", optimal_value)

output-Optimal value for the maximizing player: 3


5. Image processing using OpenvCV
a. Image resizing
import cv2
image = cv2.imread("C:\\Users\\bfgc2\\Pictures\\image.png")
width = 200
height = 800
resized_image = cv2.resize(image, (width, height))
cv2.imshow('Original image.jpg', image)
cv2.imshow('resized_image.jpg', resized_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

o/p-
b.Blurring of image
import cv2
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png')
cv2.imshow('Original Image', image)

Gaussian = cv2.GaussianBlur(image, (7, 7), 0)


cv2.imshow('Gaussian Blurring', Gaussian)

median = cv2.medianBlur(image, 5)
cv2.imshow('Median Blurring', median)

bilateral = cv2.bilateralFilter(image, 9, 75, 75)


cv2.imshow('Bilateral Blurring', bilateral)
cv2.waitKey(0)
cv2.destroyAllWindows()

o/p-
c. Grayscaling of image
import cv2

image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png')
cv2.imshow('Original', image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('Grayscale', gray_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

o/p-

d. Scaling and rotation


import cv2
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png')
height, width = image.shape[:2]
center = (width / 2, height / 2)
rotate_matrix = cv2.getRotationMatrix2D(center=center, angle=45, scale=1)
rotated_image = cv2.warpAffine(src=image, M=rotate_matrix, dsize=(width,
height))
cv2.imshow('Original image', image)
cv2.imshow('Rotated image', rotated_image)
cv2.waitKey(0)
cv2.imwrite('rotated_image.jpg', rotated_image)
o/p-

e. edge detection
import cv2
img = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png',flags=0)
sobelx = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=5) # Sobel
sobely = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=5) # Sobel
sobelxy = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=1, dy=1, ksize=5) #
cv2.imshow('Sobel X.jpg', sobelx)
cv2.imshow('Sobel Y', sobely)
cv2.imshow('Sobel X Y using Sobel() function', sobelxy)
cv2.waitKey(0)

o/p-
f. Segmentation using thresholding
import cv2 as cv
from matplotlib import pyplot as plt

img = cv.imread('C:\\Users\\bfgc2\\Pictures\\image.png',
cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
ret, thresh1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)
ret, thresh2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
ret, thresh3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)
ret, thresh4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)
ret, thresh5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)

titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO',


'TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]

for i in range(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray', vmin=0, vmax=255)
plt.title(titles[i])
plt.xticks([]), plt.yticks([])

plt.show()
o/p-
g.Background substraction
import cv2
import numpy as np
image = cv2.imread("C:\\Users\\bfgc2\\Pictures\\image.png")
original_image = image.copy()
mask = np.zeros(image.shape[:2], np.uint8)
bgd_model = np.zeros((1, 65), np.float64)
fgd_model = np.zeros((1, 65), np.float64)
height, width = image.shape[:2]
rect = (10, 10, width - 20, height - 20)
cv2.grabCut(image, mask, rect, bgd_model, fgd_model, iterCount=50,
mode=cv2.GC_INIT_WITH_RECT)
binary_mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
foreground = original_image * binary_mask[:, :, np.newaxis]
kernel = np.ones((5, 5), np.uint8)
binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel)
foreground = original_image * binary_mask[:, :, np.newaxis]
cv2.imshow("Original Image", original_image)
cv2.imshow("Foreground Image", foreground)
cv2.waitKey(0)
cv2.destroyAllWindows()
o/p-

h.Morphological operations
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png', 0) # Change
'input_image.jpg' to your image file path
kernel = np.ones((5,5), np.uint8)
erosion = cv2.erode(image, kernel, iterations=1)
dilation = cv2.dilate(image, kernel, iterations=1)
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
titles = ['Original Image', 'Erosion', 'Dilation', 'Opening', 'Closing']
images = [image, erosion, dilation, opening, closing]
for i in range(5):
plt.subplot(2, 3, i+1)
plt.imshow(images[i], cmap='gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])

plt.show()
o/p-
7. Use decision tree classifier to classify the dataset
Go to terminal and install some libraries using below command
pip install scikit-learn
pip install pandas

import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report,
confusion_matrix

iris = load_iris()
X = pd.DataFrame(iris.data, columns=iris.feature_names) # Features
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)

clf = DecisionTreeClassifier(random_state=42)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)


print("Accuracy:", accuracy)
print("Classification Report:\n", classification_report(y_test, y_pred))

print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))

Output-
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]

Process finished with exit code 0

8.Use Naive bayes classifier to classify the dataset.


import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, classification_report,
confusion_matrix

iris = load_iris()
X = pd.DataFrame(iris.data, columns=iris.feature_names) # Features
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)

nb = GaussianNB()
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)


print("Accuracy:", accuracy)
print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))

Output-
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]

Process finished with exit code 0


9.Implement K-Means clustering Algorithm
from sklearn.cluster import KMeans
import numpy as np
X = np.array([
[1.0, 2.0], [1.5, 1.8], [5.0, 8.0], [8.0, 8.0],
[1.0, 0.6], [9.0, 11.0], [8.0, 2.0], [10.0, 2.0],
[9.0, 3.0]
])
k=3
kmeans = KMeans(n_clusters=k, random_state=0)
kmeans.fit(X)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_

print("Final centroids:\n", centroids)


print("Cluster assignments:", labels)

output-
Final centroids:
[[7.33333333 9. ]
[9. 2.33333333]
[1.16666667 1.46666667]]
Cluster assignments: [2 2 0 0 2 0 1 1 1]

Process finished with exit code 0


10. using python NLTK,perform the following NLP tasks for any texual content.

1.Tokenizing

2.Filtering stop words

3.stemming

4.part of speech tagging

5.chunking

6.Named entity recognition

import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.tree import Tree
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')

# Sample textual content


text = """John is studying at Stanford University in California. He is working on Artificial Intelligence
projects."""

# 1. Tokenizing
words = word_tokenize(text)
sentences = sent_tokenize(text)
print("Word Tokens:", words)
print("Sentence Tokens:", sentences)

output-
Word Tokens: ['John', 'is', 'studying', 'at', 'Stanford', 'University', 'in', 'California', '.', 'He', 'is',
'working', 'on', 'Artificial', 'Intelligence', 'projects', '.']
Sentence Tokens: ['John is studying at Stanford University in California.', 'He is working on Artificial
Intelligence projects.']

# 2. Filtering stop words


stop_words = set(stopwords.words('english'))
filtered_words = [word for word in words if word.lower() not in stop_words]
print("Filtered Words:", filtered_words)
output-

Filtered Words: ['John', 'studying', 'Stanford', 'University', 'California', '.', 'working', 'Artificial',
'Intelligence', 'projects', '.']

# 3. Stemming
stemmer = PorterStemmer()
stemmed_words = [stemmer.stem(word) for word in filtered_words]
print("Stemmed Words:", stemmed_words)

Output-

Stemmed Words: ['john', 'studi', 'stanford', 'univers', 'california', '.', 'work', 'artifici', 'intellig', 'project',
'.']

# 4. Part of Speech Tagging


pos_tags = pos_tag(words)
print("POS Tags:", pos_tags)

Output-

POS Tags: [('John', 'NNP'), ('is', 'VBZ'), ('studying', 'VBG'), ('at', 'IN'), ('Stanford', 'NNP'), ('University',
'NNP'), ('in', 'IN'), ('California', 'NNP'), ('.', '.'), ('He', 'PRP'), ('is', 'VBZ'), ('working', 'VBG'), ('on', 'IN'),
('Artificial', 'NNP'), ('Intelligence', 'NNP'), ('projects', 'NNS'), ('.', '.')]

# 5. Chunking (e.g., NP Chunking)

chunk_grammar = "NP: {<DT>?<JJ>*<NN>}"


chunk_parser = nltk.RegexpParser(chunk_grammar)
chunked = chunk_parser.parse(pos_tags)
print("Chunking:")
print(chunked)

Output-

Chunking:

(S

John/NNP

is/VBZ

studying/VBG

at/IN
Stanford/NNP

University/NNP

in/IN

California/NNP

./.

He/PRP

is/VBZ

working/VBG

on/IN

Artificial/NNP

Intelligence/NNP

projects/NNS

./.)

# 6. Named Entity Recognition


ner_tree = ne_chunk(pos_tags)
print("Named Entity Recognition:")
for subtree in ner_tree:
if isinstance(subtree, Tree):
print(" ".join(c[0] for c in subtree), "-", subtree.label())

output-

John - PERSON

Stanford University - ORGANIZATION

California - GPE

Artificial Intelligence - ORGANIZATION

Process finished with exit code 0

You might also like