Ai Final Lab Mannual
Ai Final Lab Mannual
while queue:
(jug1, jug2) = queue.popleft()
actions = [
(jug1_capacity, jug2),
(jug1, jug2_capacity),
(0, jug2),
(jug1, 0),
(min(jug1 + jug2, jug1_capacity), max(0, jug2 - (jug1_capacity - jug1))),
(max(0, jug1 - (jug2_capacity - jug2)), min(jug1 + jug2, jug2_capacity))
]
water_jug_bfs()
Output-
Jug1: 0L, Jug2: 0L
Jug1: 0L, Jug2: 3L
Jug1: 3L, Jug2: 0L
Jug1: 3L, Jug2: 3L
Jug1: 4L, Jug2: 2L
Solution found!
def print_board(board):
for row in board:
print(" ".join(row))
print()
return True
if row >= n:
print_board(board)
return True
res = False
for col in range(n):
if is_safe(board, row, col, n):
board[row][col] = 'Q'
res = solve_4_queens(board, row + 1, n) or res
board[row][col] = '.'
return res
def solve_n_queens(n):
if __name__ == "__main__":
n = 4 # Size of the chessboard (4x4)
solve_n_queens(n)
output-
3. write a python program to find optimum path from source to destination
using A* search.
import heapq
graph = {
'A': {'B': 1, 'C': 4},
'B': {'A': 1, 'C': 2, 'D': 5},
'C': {'A': 4, 'B': 2, 'D': 1},
'D': {'B': 5, 'C': 1, 'E': 3},
'E': {'D': 3}
}
heuristic = {
'A': 7,
'B': 6,
'C': 2,
'D': 1,
'E': 0
}
while queue:
current_cost, current_node, path = heapq.heappop(queue)
if current_node == goal:
return path
return None
start_node = 'A'
goal_node = 'E'
if path:
print("Optimal path:", " -> ".join(path))
else:
print("No path found from start to goal.")
Output-
Optimal path: A -> B -> C -> D -> E
4. write python program to implement minmax search for 2 player games
def minimax(node, depth, is_maximizing):
if depth == 0 or isinstance(node, int):
return node
if is_maximizing:
best_value = float('-inf')
for child in node:
value = minimax(child, depth - 1, False)
best_value = max(best_value, value)
return best_value
else:
best_value = float('inf')
for child in node:
value = minimax(child, depth - 1, True)
best_value = min(best_value, value)
return best_value
game_tree = [
[3, 5, 6],
[9, 1, 2],
[0, -1, 10]
]
optimal_value = minimax(game_tree, depth=2, is_maximizing=True)
print("Optimal value for the maximizing player:", optimal_value)
o/p-
b.Blurring of image
import cv2
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png')
cv2.imshow('Original Image', image)
median = cv2.medianBlur(image, 5)
cv2.imshow('Median Blurring', median)
o/p-
c. Grayscaling of image
import cv2
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png')
cv2.imshow('Original', image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('Grayscale', gray_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
o/p-
e. edge detection
import cv2
img = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png',flags=0)
sobelx = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=5) # Sobel
sobely = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=5) # Sobel
sobelxy = cv2.Sobel(src=img, ddepth=cv2.CV_64F, dx=1, dy=1, ksize=5) #
cv2.imshow('Sobel X.jpg', sobelx)
cv2.imshow('Sobel Y', sobely)
cv2.imshow('Sobel X Y using Sobel() function', sobelxy)
cv2.waitKey(0)
o/p-
f. Segmentation using thresholding
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('C:\\Users\\bfgc2\\Pictures\\image.png',
cv.IMREAD_GRAYSCALE)
assert img is not None, "file could not be read, check with os.path.exists()"
ret, thresh1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)
ret, thresh2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
ret, thresh3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC)
ret, thresh4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO)
ret, thresh5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV)
for i in range(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray', vmin=0, vmax=255)
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
o/p-
g.Background substraction
import cv2
import numpy as np
image = cv2.imread("C:\\Users\\bfgc2\\Pictures\\image.png")
original_image = image.copy()
mask = np.zeros(image.shape[:2], np.uint8)
bgd_model = np.zeros((1, 65), np.float64)
fgd_model = np.zeros((1, 65), np.float64)
height, width = image.shape[:2]
rect = (10, 10, width - 20, height - 20)
cv2.grabCut(image, mask, rect, bgd_model, fgd_model, iterCount=50,
mode=cv2.GC_INIT_WITH_RECT)
binary_mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
foreground = original_image * binary_mask[:, :, np.newaxis]
kernel = np.ones((5, 5), np.uint8)
binary_mask = cv2.morphologyEx(binary_mask, cv2.MORPH_CLOSE, kernel)
foreground = original_image * binary_mask[:, :, np.newaxis]
cv2.imshow("Original Image", original_image)
cv2.imshow("Foreground Image", foreground)
cv2.waitKey(0)
cv2.destroyAllWindows()
o/p-
h.Morphological operations
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread('C:\\Users\\bfgc2\\Pictures\\image.png', 0) # Change
'input_image.jpg' to your image file path
kernel = np.ones((5,5), np.uint8)
erosion = cv2.erode(image, kernel, iterations=1)
dilation = cv2.dilate(image, kernel, iterations=1)
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
titles = ['Original Image', 'Erosion', 'Dilation', 'Opening', 'Closing']
images = [image, erosion, dilation, opening, closing]
for i in range(5):
plt.subplot(2, 3, i+1)
plt.imshow(images[i], cmap='gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
o/p-
7. Use decision tree classifier to classify the dataset
Go to terminal and install some libraries using below command
pip install scikit-learn
pip install pandas
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report,
confusion_matrix
iris = load_iris()
X = pd.DataFrame(iris.data, columns=iris.feature_names) # Features
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
clf = DecisionTreeClassifier(random_state=42)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
Output-
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
iris = load_iris()
X = pd.DataFrame(iris.data, columns=iris.feature_names) # Features
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
nb = GaussianNB()
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)
Output-
Accuracy: 1.0
Confusion Matrix:
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
output-
Final centroids:
[[7.33333333 9. ]
[9. 2.33333333]
[1.16666667 1.46666667]]
Cluster assignments: [2 2 0 0 2 0 1 1 1]
1.Tokenizing
3.stemming
5.chunking
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk import pos_tag, ne_chunk
from nltk.tree import Tree
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
# 1. Tokenizing
words = word_tokenize(text)
sentences = sent_tokenize(text)
print("Word Tokens:", words)
print("Sentence Tokens:", sentences)
output-
Word Tokens: ['John', 'is', 'studying', 'at', 'Stanford', 'University', 'in', 'California', '.', 'He', 'is',
'working', 'on', 'Artificial', 'Intelligence', 'projects', '.']
Sentence Tokens: ['John is studying at Stanford University in California.', 'He is working on Artificial
Intelligence projects.']
Filtered Words: ['John', 'studying', 'Stanford', 'University', 'California', '.', 'working', 'Artificial',
'Intelligence', 'projects', '.']
# 3. Stemming
stemmer = PorterStemmer()
stemmed_words = [stemmer.stem(word) for word in filtered_words]
print("Stemmed Words:", stemmed_words)
Output-
Stemmed Words: ['john', 'studi', 'stanford', 'univers', 'california', '.', 'work', 'artifici', 'intellig', 'project',
'.']
Output-
POS Tags: [('John', 'NNP'), ('is', 'VBZ'), ('studying', 'VBG'), ('at', 'IN'), ('Stanford', 'NNP'), ('University',
'NNP'), ('in', 'IN'), ('California', 'NNP'), ('.', '.'), ('He', 'PRP'), ('is', 'VBZ'), ('working', 'VBG'), ('on', 'IN'),
('Artificial', 'NNP'), ('Intelligence', 'NNP'), ('projects', 'NNS'), ('.', '.')]
Output-
Chunking:
(S
John/NNP
is/VBZ
studying/VBG
at/IN
Stanford/NNP
University/NNP
in/IN
California/NNP
./.
He/PRP
is/VBZ
working/VBG
on/IN
Artificial/NNP
Intelligence/NNP
projects/NNS
./.)
output-
John - PERSON
California - GPE