0% found this document useful (0 votes)
12 views

AI_Lab_File_Vivek_pandey

The document outlines several programming experiments, each with a specific aim and corresponding Python code implementation. The experiments include solving the Water-Jug problem, Tower of Hanoi, 8-Puzzle problem, Breadth First Search, Depth First Search, and the A* algorithm. Each section provides the code and a brief description of the algorithm used.

Uploaded by

yashgoel1810
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
12 views

AI_Lab_File_Vivek_pandey

The document outlines several programming experiments, each with a specific aim and corresponding Python code implementation. The experiments include solving the Water-Jug problem, Tower of Hanoi, 8-Puzzle problem, Breadth First Search, Depth First Search, and the A* algorithm. Each section provides the code and a brief description of the algorithm used.

Uploaded by

yashgoel1810
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 26

EXPERIMENT NO.

-1

AIM:
Write a Program to Implement Water-Jug problem.

PROGRAM:
from collections import deque

# Function to find the minimum operations to obtain d liters in one jug


def min_steps(m, n, d):
if d > max(m, n):
return -1

# Queue for BFS: (jug1, jug2, steps)


q = deque([(0, 0, 0)])

# For tracking the visited states


visited = [[False] * (n + 1) for _ in range(m + 1)]
visited[0][0] = True

while q:
jug1, jug2, steps = q.popleft()
if jug1 == d or jug2 == d:
return steps

# 1: Fill jug1
if not visited[m][jug2]:
visited[m][jug2] = True
q.append((m, jug2, steps + 1))

# 2: Fill jug2
if not visited[jug1][n]:
visited[jug1][n] = True
q.append((jug1, n, steps + 1))

# 3: Empty jug1
if not visited[0][jug2]:
visited[0][jug2] = True
q.append((0, jug2, steps + 1))

1
# 4: Empty jug2
if not visited[jug1][0]:
visited[jug1][0] = True
q.append((jug1, 0, steps + 1))

# 5: Pour jug1 into jug2


pour1to2 = min(jug1, n - jug2)
if not visited[jug1 - pour1to2][jug2 + pour1to2]:
visited[jug1 - pour1to2][jug2 + pour1to2] = True
q.append((jug1 - pour1to2, jug2 + pour1to2, steps + 1))

# 6: Pour jug2 into jug1


pour2to1 = min(jug2, m - jug1)
if not visited[jug1 + pour2to1][jug2 - pour2to1]:
visited[jug1 + pour2to1][jug2 - pour2to1] = True
q.append((jug1 + pour2to1, jug2 - pour2to1, steps + 1))

return -1

if __name__ == "__main__":
vol1 = int(input("Enter Volume of first jug: "))
vol2 = int(input("Enter Volume of second jug: "))
reqVol = int(input("Enter the required Volume in a jug: "))

result = min_steps(vol1, vol2, reqVol)


print()
if (result == -1):
print("This operation cannot be performed")
else:
print("Minimum operations to obtain", reqVol, "liters in one jug:", result)
OUTPUT:

2
EXPERIMENT NO.-2

AIM:
Write a Program to Implement Tower of Hanoi.

PROGRAM:
# Recursive Python function to solve tower of hanoi
def TowerOfHanoi(n, from_rod, to_rod, aux_rod):
if n == 0:
return

TowerOfHanoi(n-1, from_rod, aux_rod, to_rod)


print("Move disk", n, "from rod", from_rod, "to rod", to_rod)
TowerOfHanoi(n-1, aux_rod, to_rod, from_rod)

# Driver code
N=3

# A, B, C are the name of rods


TowerOfHanoi(N, 'A', 'C', 'B')

OUTPUT:

3
EXPERIMENT NO.-3

AIM:
Write a Program to Implement 8-Puzzle problem.

PROGRAM:
import copy
from heapq import heappush, heappop

n=3
row = [ 1, 0, -1, 0 ]
col = [ 0, -1, 0, 1 ]

class priorityQueue:
def __init__(self):
self.heap = []

def push(self, k):


heappush(self.heap, k)

def pop(self):
return heappop(self.heap)

def empty(self):
if not self.heap:
return True
else:
return False

class node:
def __init__(self, parent, mat, empty_tile_pos, cost, level):
self.parent = parent
self.mat = mat
self.empty_tile_pos = empty_tile_pos
self.cost = cost
self.level = level

def __lt__(self, nxt):


return self.cost < nxt.cost

4
def calculateCost(mat, final) -> int:
count = 0

for i in range(n):
for j in range(n):
if ((mat[i][j]) and
(mat[i][j] != final[i][j])):
count += 1

return count

def newNode(mat, empty_tile_pos, new_empty_tile_pos, level, parent, final) -> node:


new_mat = copy.deepcopy(mat)

x1 = empty_tile_pos[0]
y1 = empty_tile_pos[1]
x2 = new_empty_tile_pos[0]
y2 = new_empty_tile_pos[1]
new_mat[x1][y1], new_mat[x2][y2] = new_mat[x2][y2], new_mat[x1][y1]

cost = calculateCost(new_mat, final)


new_node = node(parent, new_mat, new_empty_tile_pos, cost, level)

return new_node

def printMatrix(mat):
for i in range(n):
for j in range(n):
print("%d " % (mat[i][j]), end = " ")
print()

def isSafe(x, y):


return x >= 0 and x < n and y >= 0 and y < n

def printPath(root):
if root == None:
return

printPath(root.parent)
printMatrix(root.mat)

5
print()

def solve(initial, empty_tile_pos, final):


pq = priorityQueue()
cost = calculateCost(initial, final)
root = node(None, initial, empty_tile_pos, cost, 0)

pq.push(root)
while not pq.empty():
minimum = pq.pop()

if minimum.cost == 0:
printPath(minimum)
return

for i in range(4):
new_tile_pos = [
minimum.empty_tile_pos[0] + row[i],
minimum.empty_tile_pos[1] + col[i], ]

if isSafe(new_tile_pos[0], new_tile_pos[1]):
child = newNode(minimum.mat, minimum.empty_tile_pos, new_tile_pos,
minimum.level + 1, minimum, final,)

pq.push(child)

initial = [ [ 1, 2, 3 ],
[ 8, 6, 0 ],
[ 7, 5, 4 ] ]

final = [ [ 1, 2, 3 ],
[ 8, 0, 4 ],
[ 7, 6, 5 ] ]

empty_tile_pos = [ 1, 2 ]

solve(initial, empty_tile_pos, final)

6
OUTPUT:

7
EXPERIMENT NO.-4

AIM:
Write a Program to Implement Breadth First Search.

PROGRAM:
from collections import deque

def bfs(adj, s, visited):


q = deque()
visited[s] = True
q.append(s)

while q:
curr = q.popleft()
print(curr, end=" ")

for x in adj[curr]:
if not visited[x]:
visited[x] = True
q.append(x)

def add_edge(adj, u, v):


adj[u].append(v)
adj[v].append(u) # Undirected graph

# Perform BFS for the entire graph


def bfs_disconnected(adj):
visited = [False] * len(adj)

for i in range(len(adj)):
if not visited[i]:
bfs(adj, i, visited)

V=5
adj = [[] for _ in range(V)]

8
# Add edges to the graph
add_edge(adj, 0, 1)
add_edge(adj, 0, 4)
add_edge(adj, 1, 2)
add_edge(adj, 1, 4)
add_edge(adj, 2, 3)
add_edge(adj, 2, 4)
add_edge(adj, 3, 4)

# Perform BFS traversal for the entire graph


print("BFS Traversal of given graph:")
bfs_disconnected(adj)

OUTPUT:

9
EXPERIMENT NO.-5

AIM:
Write a Program to Implement Depth First Search.

PROGRAM:
class Graph:
def __init__(self, vertices):
self.adj = [[] for _ in range(vertices)]

def add_edge(self, s, t):


self.adj[s].append(t)
self.adj[t].append(s)

def dfs_rec(self, visited, s):


visited[s] = True
print(s, end=" ")

for i in self.adj[s]:
if not visited[i]:
self.dfs_rec(visited, i)

def dfs(self):
visited = [False] * len(self.adj)

# Loop through all vertices to handle disconnected graph


for i in range(len(self.adj)):
if not visited[i]:
self.dfs_rec(visited, i)

if __name__ == "__main__":
# Number of vertices
V=5

graph = Graph(V)

# Define the edges of the graph


edges = [(0, 1), (0, 4), (1, 2), (1, 4), (2, 3), (2, 4), (3, 4)]

10
# Populate the adjacency list with edges
for edge in edges:
graph.add_edge(edge[0], edge[1])

# Perform DFS
print("DFS Traversal of given graph:")
graph.dfs()

OUTPUT:

11
EXPERIMENT NO.-6

AIM:
Write a Program to Implement A* algorithm.

PROGRAM:
import math
import heapq

class Cell:
def __init__(self):
self.parent_i = 0
self.parent_j = 0
# Total cost of the cell (g + h)
self.f = float('inf')
# Cost from start to this cell
self.g = float('inf')
# Heuristic cost from this cell to destination
self.h = 0

# Define the size of the grid


ROW = 4
COL = 8

def is_valid(row, col):


return (row >= 0) and (row < ROW) and (col >= 0) and (col < COL)

def is_unblocked(grid, row, col):


return grid[row][col] == 1

def is_destination(row, col, dest):


return row == dest[0] and col == dest[1]

def calculate_h_value(row, col, dest):


return ((row - dest[0]) ** 2 + (col - dest[1]) ** 2) ** 0.5

def trace_path(cell_details, dest):


print("The Path is ")
path = []

12
row = dest[0]
col = dest[1]

# Trace the path from destination to source using parent cells


while not (cell_details[row][col].parent_i == row and cell_details[row][col].parent_j == col):
path.append((row, col))
temp_row = cell_details[row][col].parent_i
temp_col = cell_details[row][col].parent_j
row = temp_row
col = temp_col

# Add the source cell to the path


path.append((row, col))
# Reverse the path to get the path from source to destination
path.reverse()

# Print the path


for i in path:
print("->", i, end=" ")
print()

def a_star_search(grid, src, dest):


# Check if the source and destination are valid
if not is_valid(src[0], src[1]) or not is_valid(dest[0], dest[1]):
print("Source or destination is invalid")
return

# Check if the source and destination are unblocked


if not is_unblocked(grid, src[0], src[1]) or not is_unblocked(grid, dest[0], dest[1]):
print("Source or the destination is blocked")
return

# Check if we are already at the destination


if is_destination(src[0], src[1], dest):
print("We are already at the destination")
return

# Initialize the closed list (visited cells)


closed_list = [[False for _ in range(COL)] for _ in range(ROW)]

13
# Initialize the details of each cell
cell_details = [[Cell() for _ in range(COL)] for _ in range(ROW)]

# Initialize the start cell details


i = src[0]
j = src[1]
cell_details[i][j].f = 0
cell_details[i][j].g = 0
cell_details[i][j].h = 0
cell_details[i][j].parent_i = i
cell_details[i][j].parent_j = j

# Initialize the open list (cells to be visited) with the start cell
open_list = []
heapq.heappush(open_list, (0.0, i, j))

# Initialize the flag for whether destination is found


found_dest = False

# Main loop of A* search algorithm


while len(open_list) > 0:
# Pop the cell with the smallest f value from the open list
p = heapq.heappop(open_list)

# Mark the cell as visited


i = p[1]
j = p[2]
closed_list[i][j] = True

# For each direction, check the successors


directions = [(0, 1), (0, -1), (1, 0), (-1, 0),
(1, 1), (1, -1), (-1, 1), (-1, -1)]
for dir in directions:
new_i = i + dir[0]
new_j = j + dir[1]

# If the successor is valid, unblocked, and not visited


if is_valid(new_i, new_j) and is_unblocked(grid, new_i, new_j) and not
closed_list[new_i][new_j]:
# If the successor is the destination

14
if is_destination(new_i, new_j, dest):
# Set the parent of the destination cell
cell_details[new_i][new_j].parent_i = i
cell_details[new_i][new_j].parent_j = j
print("The destination cell is found")
# Trace and print the path from source to destination
trace_path(cell_details, dest)
found_dest = True
return
else:
# Calculate the new f, g, and h values
g_new = cell_details[i][j].g + 1.0
h_new = calculate_h_value(new_i, new_j, dest)
f_new = g_new + h_new

# If the cell is not in the open list or the new f value is smaller
if cell_details[new_i][new_j].f == float('inf') or cell_details[new_i][new_j].f >
f_new:
# Add the cell to the open list
heapq.heappush(open_list, (f_new, new_i, new_j))
# Update the cell details
cell_details[new_i][new_j].f = f_new
cell_details[new_i][new_j].g = g_new
cell_details[new_i][new_j].h = h_new
cell_details[new_i][new_j].parent_i = i
cell_details[new_i][new_j].parent_j = j

# If the destination is not found after visiting all cells


if not found_dest:
print("Failed to find the destination cell")

def main():
# Define the grid (1 for unblocked, 0 for blocked)
grid = [
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1]
]

15
# Define the source and destination
src = [0, 0]
dest = [3, 7]

# Run the A* search algorithm


a_star_search(grid, src, dest)

if __name__ == "__main__":
main()

OUTPUT:

16
EXPERIMENT NO.-7

AIM:
Write a program to demonstrate the working of the decision tree based ID3. Use an
appropriate data set for building the decision tree and apply this knowledge to
classify a new sample.

PROGRAM:
import numpy as np
import math
import csv

def read_data(filename):
with open(filename, 'r') as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
headers = next(datareader)
metadata = []
traindata = []
for name in headers:
metadata.append(name)
for row in datareader:
traindata.append(row)

return (metadata, traindata)

class Node:
def __init__(self, attribute):
self.attribute = attribute
self.children = []
self.answer = ""

def __str__(self):
return self.attribute

def subtables(data, col, delete):


dict = {}
items = np.unique(data[:, col])
count = np.zeros((items.shape[0], 1), dtype=np.int32)

17
for x in range(items.shape[0]):
for y in range(data.shape[0]):
if data[y, col] == items[x]:
count[x] += 1

for x in range(items.shape[0]):
dict[items[x]] = np.empty((int(count[x]), data.shape[1]), dtype="|S32")
pos = 0
for y in range(data.shape[0]):
if data[y, col] == items[x]:
dict[items[x]][pos] = data[y]
pos += 1
if delete:
dict[items[x]] = np.delete(dict[items[x]], col, 1)

return items, dict

def entropy(S):
items = np.unique(S)

if items.size == 1:
return 0

counts = np.zeros((items.shape[0], 1))


sums = 0

for x in range(items.shape[0]):
counts[x] = sum(S == items[x]) / (S.size * 1.0)

for count in counts:


sums += -1 * count * math.log(count, 2)
return sums

def gain_ratio(data, col):


items, dict = subtables(data, col, delete=False)

total_size = data.shape[0]
entropies = np.zeros((items.shape[0], 1))
intrinsic = np.zeros((items.shape[0], 1))

18
for x in range(items.shape[0]):
ratio = dict[items[x]].shape[0]/(total_size * 1.0)
entropies[x] = ratio * entropy(dict[items[x]][:, -1])
intrinsic[x] = ratio * math.log(ratio, 2)

total_entropy = entropy(data[:, -1])


iv = -1 * sum(intrinsic)

for x in range(entropies.shape[0]):
total_entropy -= entropies[x]

return total_entropy / iv

def create_node(data, metadata):


if (np.unique(data[:, -1])).shape[0] == 1:
node = Node("")
node.answer = np.unique(data[:, -1])[0]
return node

gains = np.zeros((data.shape[1] - 1, 1))

for col in range(data.shape[1] - 1):


gains[col] = gain_ratio(data, col)

split = np.argmax(gains)

node = Node(metadata[split])
metadata = np.delete(metadata, split, 0)

items, dict = subtables(data, split, delete=True)

for x in range(items.shape[0]):
child = create_node(dict[items[x]], metadata)
node.children.append((items[x], child))

return node

def empty(size):
s = ""
for x in range(size):

19
s += " "
return s

def print_tree(node, level):


if node.answer != "":
print(empty(level), node.answer)
return
print(empty(level), node.attribute)
for value, n in node.children:
print(empty(level + 1), value)
print_tree(n, level + 2)

metadata, traindata = read_data("data.csv")


data = np.array(traindata)
node = create_node(data, metadata)
print_tree(node, 0)

OUTPUT:

20
EXPERIMENT NO.-8

AIM:
Write a program to implement the naïve Bayesian classifier algorithm.

PROGRAM:
import pandas as pd
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB

# load data from CSV


data = pd.read_csv('data.csv')
print("The first 5 values of data is :\n",data.head())

# obtain Train data and Train output


X = data.iloc[:,:-1]
print("\nThe First 5 values of train data is\n",X.head())
y = data.iloc[:,-1]
print("\nThe first 5 values of Train output is\n",y.head())

# Convert them in numbers


le_outlook = LabelEncoder()
X.Outlook = le_outlook.fit_transform(X.Outlook)
le_Temperature = LabelEncoder()
X.Temperature = le_Temperature.fit_transform(X.Temperature)
le_Humidity = LabelEncoder()
X.Humidity = le_Humidity.fit_transform(X.Humidity)
le_Windy = LabelEncoder()
X.Windy = le_Windy.fit_transform(X.Windy)

print("\nNow the Train data is :\n",X.head())

le_PlayTennis = LabelEncoder()
y = le_PlayTennis.fit_transform(y)
print("\nNow the Train output is\n",y)

from sklearn.model_selection import train_test_split


X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.20)

21
classifier = GaussianNB()
classifier.fit(X_train,y_train)

from sklearn.metrics import accuracy_score


print("Accuracy is:",accuracy_score(classifier.predict(X_test),y_test))

OUTPUT:

22
EXPERIMENT NO.-9

AIM:
Write a program to implement k-Nearest Neighbour algorithm.

PROGRAM:

from sklearn.datasets import load_iris


from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import numpy as np

dataset=load_iris()
X_train,X_test,y_train,y_test=train_test_split(dataset["data"],dataset["target"],random_state=0)

kn=KNeighborsClassifier(n_neighbors=1)
kn.fit(X_train,y_train)

for i in range(len(X_test)):
x=X_test[i]
x_new=np.array([x])
prediction=kn.predict(x_new)

print("TARGET=",y_test[i],dataset["target_names"][y_test[i]],"PREDICTED=",prediction,datas
et["target_names"][prediction])
print(kn.score(X_test,y_test))

23
OUTPUT:

24
EXPERIMENT NO.-10

AIM:
Build an Artificial Neural Network by implementing the Backpropagation
algorithm and test the same using appropriate data sets.

PROGRAM:
import numpy as np

X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # X = (hours sleeping, hours studying)
y = np.array(([92], [86], [89]), dtype=float) # y = score on test
# scale units
X = X/np.amax(X, axis=0) # maximum of X array
y = y/100 # max test score is 100

class Neural_Network(object):
def __init__(self):
# Parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
# Weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2) weight matrix from
input to hidden layer
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1) weight matrix from
hidden to output layer

def forward(self, X):


#forward propagation through our network
self.z = np.dot(X, self.W1) # dot product of X (input) and first set of 3x2 weights
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and second set of
3x1 weights
o = self.sigmoid(self.z3) # final activation function
return o

def sigmoid(self, s):


return 1/(1+np.exp(-s)) # activation function

25
def sigmoidPrime(self, s):
return s * (1 - s) # derivative of sigmoid

def backward(self, X, y, o):


# backward propgate through the network
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights
contributed to output error
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to
z2 error
self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights

def train (self, X, y):


o = self.forward(X)
self.backward(X, y, o)

NN = Neural_Network()
print ("\nInput: \n" + str(X))
print ("\nActual Output: \n" + str(y))
print ("\nPredicted Output: \n" + str(NN.forward(X)))
print ("\nLoss: \n" + str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss)
NN.train(X, y)

OUTPUT:

26

You might also like