Ad3311 Lab Manual
Ad3311 Lab Manual
AIM
ALGORITHM
1. The code starts by creating a Solution class and then defining the method solve.
2. The function takes in a board as an argument, which is a list of tuples representing the
positions on the board.
3. It iterates through each position in the list and creates a dictionary with that position's value
set to 0.
4. Then it iterates through all possible moves for that position and returns their number of
occurrences in dict.
5. After this, it loops over all nodes on the board until it finds one where there are no more
moves left to make (i.e., when len(current_nodes) == 0).
6. This node will be returned as -1 if found or else its index will be stored into pos_0 so that we
can find out what move was made at that point later on.
7. The next step is finding out what move was made at every node by looping over all possible
moves for each node using self's find_next function, which takes in a single node as an
argument and returns any other nodes connected to it via path-finding algorithms like DFS or
BFS (see below).
8. For example, if pos_0 = 1 then self would call: moves = { 0: [1], 1:
9. The code will print the number of paths in a solution.
PROGRAM
class Solution:
def solve(self, board):
state_dict = {} # Better name than 'dict' (which shadows the built-in dict type)
flatten = []
# Flatten the board into a 1D list and convert to tuple for immutability
for i in range(len(board)):
flatten += board[i]
flatten = tuple(flatten)
while True:
# Get all the nodes at the current depth level (cnt)
current_nodes = [x for x in state_dict if state_dict[x] == cnt]
results = []
pos_0 = node.index(0) # Find the position of the blank (0)
# Generate possible next moves by swapping the blank with its valid neighbors
for move in moves[pos_0]:
new_node = list(node)
new_node[move], new_node[pos_0] = new_node[pos_0], new_node[move]
results.append(tuple(new_node)) # Convert back to tuple for immutability
return results
# Example usage:
ob = Solution()
matrix = [
[3, 1, 2],
[4, 7, 5],
[6, 8, 0]
]
OUTPUT:
NO OF MOVES== 4
RESULT:
Thus the program to implement 8 puzzles search strategy is implemented and executed successfully
EX.No. 1.b Implement basic search strategies – 8-Queens Problem
DATE:
AIM
ALGORITHM
PROGRAM
def print_board(board):
"""Helper function to print the board."""
for row in board:
print(" ".join("Q" if col == 1 else "." for col in row))
print()
# Check diagonals
for k in range(N):
for l in range(N):
if (k + l == row + col) or (k - l == row - col):
if board[k][l] == 1:
return True
return False
# Backtrack
board[i][j] = 0
return False
if N_queens(board, N, N):
print_board(board)
else:
print("No solution exists.")
RESULT
Thus the program to implement 8 queens search strategy is implemented and executed successfully.
EX.No.3 Implement basic search strategies – Crypt arithmetic
DATE:
AIM
To implement basic search strategies – Crypt arithmetic.
ALGORITHM
# If there are more than 10 unique letters, it's not possible to assign digits
if len(letters) > 10:
print('0 Solutions!')
return
solutions = []
_solve(word1, word2, result, letters, {}, solutions)
if solutions:
print('\nSolutions:')
for soln in solutions:
print(f'{soln[0]}\t{soln[1]}')
else:
print('0 Solutions!')
if __name__ == '__main__':
print('CRYPTARITHMETIC PUZZLE SOLVER')
print('WORD1 + WORD2 = RESULT')
word1 = input('Enter WORD1: ').upper()
word2 = input('Enter WORD2: ').upper()
result = input('Enter RESULT: ').upper()
RESULT
Thus the program to implement crypt arithmetic search strategy is implemented and executed successfully
EX.No. 2 Implement A* Algorithm
DATE:
AIM
To Implement A* Algorithm.
ALGORITHM
PROGRAM
# Base Class
class State(object):
def __init (self, value, parent, start=0, goal=0):
self.children = []
self.parent = parent
self.value = value
self.dist = 0
if parent:
self.start = parent.start
self.goal = parent.goal
self.path = parent.path[:] # Copy the parent's path
self.path.append(value) # Add the current value to the path
else:
self.path = [value]
self.start = start
self.goal = goal
def GetDistance(self):
pass
def CreateChildren(self):
pass
def GetDistance(self):
if self.value == self.goal:
return 0
dist = 0
for i in range(len(self.goal)):
letter = self.goal[i]
if letter in self.value:
dist += abs(i - self.value.index(letter))
else:
dist += len(self.goal) # If letter is not found, add maximum possible distance
return dist
def CreateChildren(self):
if not self.children:
for i in range(len(self.goal) - 1):
val = self.value
# Swap adjacent letters
val = val[:i] + val[i+1] + val[i] + val[i+2:]
child = State_String(val, self) # Create new child state
self.children.append(child)
# A* Solver Class
class A_Star_Solver:
def __init (self, start, goal):
self.path = []
self.visitedQueue = []
self.priorityQueue = PriorityQueue()
self.start = start
self.goal = goal
def Solve(self):
startState = State_String(self.start, None, self.start, self.goal)
count = 0
self.priorityQueue.put((0, count, startState)) # Start state with priority 0
if not self.path:
print("Goal is not possible: " + self.goal)
else:
print("Goal found! Path to solution:")
for i in range(len(self.path)):
print("{0}) {1}".format(i, self.path[i]))
Starting....
0)secure
1)secrue
2)sercue
3)srecue
4)rsecue
5)rescue
RESULT
AIM
ALGORITHM
# Returns optimal value for the current player (Initially called for root and maximizing player)
def minimax(depth, nodeIndex, maximizingPlayer, values, alpha, beta):
# Terminating condition. i.e, leaf node is reached
if depth == 3:
return values[nodeIndex]
if maximizingPlayer:
best = MIN
# Recur for left and right children (Maximizing player)
for i in range(2):
val = minimax(depth + 1, nodeIndex * 2 + i, False, values, alpha, beta)
best = max(best, val)
alpha = max(alpha, best)
# Alpha-Beta Pruning
if beta <= alpha:
break
return best
else:
best = MAX
# Recur for left and right children (Minimizing player)
for i in range(2):
val = minimax(depth + 1, nodeIndex * 2 + i, True, values, alpha, beta)
best = min(best, val)
beta = min(beta, best)
# Alpha-Beta Pruning
if beta <= alpha:
break
return best
# Driver Code
if name__ == " main ":
values = [3, 5, 6, 9, 1, 2, 0, -1] # Leaf node values
print("The optimal value is:", minimax(0, 0, True, values, MIN, MAX))
OUTPUT
The optimal value is : 5
RESULT
Thus the program to implement Minimax algorithm for game playing is implemented and executed
successfully
EX.No.4 Solve constraint satisfaction problems
DATE:
AIM
ALGORITHM
def backtrack(assignment):
# Check if assignment is complete
if len(assignment) == len(VARIABLES):
return assignment
def select_unassigned_variable(assignment):
# Select the next unassigned variable (can be optimized with MRV)
for var in VARIABLES:
if var not in assignment:
return var
return None
print("Solution:")
print(solution)
OUTPUT
{'csc': 'Monday', 'maths': 'Tuesday', 'phy': 'Tuesday', 'che': 'Monday', 'tam': 'MoMonday', 'eng': 'Wednesday',
'bio': 'Tuesday'}
RESULT
Thus the program to solve constraint satisfaction problem is implemented and executed successfully.
EX.No. 5 Propositional Model Checking Algorithms
DATE:
AIM
ALGORITHM
1. Class Literal, it has attributes name and sign to denote whether the literal is positive or negative in use.
2. The neg function returns a new literal with the same name but the opposite sign of its parent literal.
3. The repr function returns the string of the literal name,( or the string with a negative sign) each time the
instance of the literal is called.
4. The CNFConvert function converts the KiB from a list of sets to a list of list for easier computing
5. The VariableSet function finds all the used literals in the KB, and in order to assist with running the DPLL.
6. The Negativeofx function is for holding the negative form of the literal, for use in the DPLL algorithm
7. The pickX function picks a literal from the variable set and works with it as a node in the tree.
8. Now define the functions splitfalseliterals() and splitTrueLiteral().
9. Create the function dpll() that performs the dpll algorithm recursively.
10. Finally call the function to execute the code.
PROGRAM
import re
class Literal:
def __init (self, name, sign=True):
self.name = str(name)
self.sign = sign
def CNFconvert(KB):
# Converts the KB from a list of sets to a list of lists for easier computing
storage = []
for i in KB:
i = list(i)
i = [str(j) if isinstance(j, str) else j for j in i] # Ensure literals are properly handled
storage.append(i)
return storage
def VariableSet(KB):
# Finds all the used literals in the KB
KB = CNFconvert(KB)
storage = []
for obj in KB:
for item in obj:
if item[0] == '-' and item[1:] not in storage:
storage.append(str(item[1:]))
elif item not in storage and item[0] != '-':
storage.append(str(item))
return storage
def Negativeofx(x):
# Holds the negative form of the literal
return str(x[1:]) if x[0] == '-' else '-' + str(x)
def unitResolution(clauses):
literalholder = {}
i=0
while i < len(clauses):
newClauses = []
clause = clauses[i]
if len(clause) == 1:
literal = str(clause[0])
pattern = re.match("-", literal)
if pattern:
nx = literal[1:]
literalholder[nx] = False
else:
nx = "-" + literal
literalholder[literal] = True
for item in clauses:
if item != clauses[i]:
if nx in item:
item.remove(nx)
newClauses.append(item)
i=0
clauses = newClauses
return literalholder, clauses
def DPLL(KB):
KB = CNFconvert(KB)
varList = VariableSet(KB)
result = dpll(KB, varList)
if result == 'notsatisfiable':
return [False, {}]
else:
for i in varList:
if i in result and result[i] == True:
result[i] = 'true'
elif i in result and result[i] == False:
result[i] = 'false'
else:
result[i] = 'free'
return [True, result]
# Example usage
A = Literal('A')
B = Literal('B')
C = Literal('C')
D = Literal('D')
KB = [{A, B}, {A, -C}, {-A, B, D}]
print(DPLL(KB))
OUTPUT
RESULT
Thus the program to implement Propositional Model checking Algorithm is implemented and executed
successfully.
EX.No. 6 Implement Forward Chaining Algorithm
DATE:
AIM
ALGORITHM
PROGRAM
def main():
print("*-----Forward--Chaining ---- *", end='')
display()
x = int(input())
print(" \n", end='')
if x == 1 or x == 2:
print(" Chance Of Frog ", end='')
elif x == 3 or x == 4:
print(" Chance of Canary ", end='')
else:
print("\n-------In Valid Option Select ---------", end='')
if x >= 1 and x <= 4:
print("\n X is ", end='')
print(database[x-1], end='')
print("\n Color Is 1.Green 2.Yellow", end='')
print("\n Select Option ", end='')
k = int(input())
if k == 1 and (x == 1 or x == 2): # frog0 and green1
print(" yes it is ", end='')
print(knowbase[0], end='')
print(" And Color Is ", end='')
print(knowbase[2], end='')
elif k == 2 and (x == 3 or x == 4): # canary1 and yellow3
print(" yes it is ", end='')
print(knowbase[1], end='')
print(" And Color Is ", end='')
print(knowbase[3], end='')
else:
print("\n---InValid Knowledge Database", end='')
OUTPUT
*-----Forward--Chaining ---- *
X is
1.Croaks
2.Eat Flies
3.shrimps
4.Sings
Select One 1
Chance Of Frog
X is Croaks
Color Is
1.Green
2.Yellow
Select Option 1
yes it is Frog And Color Is Green
RESULT
Thus the program to implement Forward Chaining Algorithm is implemented and executed successful
EX.No. 7 Implement backward Chaining Algorithm
DATE:
AIM
ALGORITHM
1. The code starts with a function called display () which prints out the text "X is 1.frog 2.canary"
2. And then asks for input from the user, asking them to select one of two options: Chance of eating flies or
Chance of shrimping.
3. If they choose either option, it will print out what that option would be in green or yellow color respectively.
4. The next line is x = int (input ()) which takes the value entered by the user and assigns it to variable x.
5. The if statement checks whether x equals 1 or 2
6. So if they enter 1 as their answer, it will print out "Chance of eating flies"
7. Otherwise it will print "Chance of shrimping".
PROGRAM
def display():
"""Displays options for the user to choose from."""
print("\nX is:")
print("1. Croaks")
print("2. Eat Flies")
print("3. Shrimps")
print("4. Sings")
print("\nSelect One (1-4):", end=' ')
def main():
"""Main function to drive the forward chaining logic."""
print("*-----Forward-Chaining---- *")
display()
try:
x = int(input()) # User selects an option from the database
except ValueError:
print("\n---Invalid Input. Please enter a number (1-4 for X and 1-2 for Color).---")
*-----Backward--Chaining ---- *
X is
1.frog
2.canary
Select One 1
Chance Of eating flies
X is Frog
1.green
2.yellow
1
yes it is in Green colour and will Croak
RESULT
Thus the program to implement backward chaining algorithm is implemented and executed successfully.
EX.No. 8 Implement Naïve Bayes Models
DATE:
AIM
ALGORITHM
PROGRAM
# Split the dataset into training and testing sets (1/3 for testing)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33)
OUTPUT
Predicted Values:
[1 0 2 2 2 0 1 0 1 1 2 2 0 2 2 1 1 2 1 0 2 2 0 2 1 0 1 2 1 2 2 0 0 2 1 0 1
2 1 2 2 1 2 2 1 1 2 2 0 1]
Actual Values:
[1 0 2 2 2 0 1 0 1 1 2 2 0 2 2 1 1 2 1 0 2 2 0 2 2 0 1 1 1 1 2 0 0 2 1 0 1
2 1 2 2 1 2 2 2 1 2 2 0 1]
Confusion Matrix:
[[11 0 0]
[ 0 15 2]
[ 0 2 20]]
RESULT
Thus the program to implement Naïve Bayes Model is implemented and executed successf
EX.No. 9 Implement Bayesian Networks and perform inferences
DATE:
AIM:
To Implement Bayesian Networks and perform inferences.
ALGORITHM:
36
19. Next, it calculates the cross entropy loss between target and predicted values.
20. Then, it calculates the cost function which is then minimized using Adam optimizer.
21. Finally, it prints out the predicted value and total cost after every iteration of optimization process.
22. The code starts by defining a function called draw_graph that takes in a predicted value.
23. The code then creates two subplots on the same figure, one for each of the predictions.
24. The first subplot is created with fig_1 and has an index of 1, which means it's the second plot in this figure.
25. This plot will have two axes: x-axis and y-axis.
26. The x-axis represents time, while the y-axis represents accuracy percentage (0% to 100%).
27. The second subplot is created with fig_2 and has an index of 2, which means it's the third plot in this figure.
28. This plot will also have two axes: x-axis and y-axis but they represent different values than those on fig_1 .
29. The code is a function that takes in an input of predicted and returns the accuracy, cross entropy, and KL
values.
30. The first line of code calculates the size of the tensor using target_tensor.size(0) which will be equal to 1
because it is a one-dimensional tensor.
31. Next, we have the function draw_graph which draws a graph with two subplots; one for accuracy and one for
cross entropy.
32. The last line prints out some statistics on how accurate this prediction was.
PROGRAM
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchbnn as bnn
import matplotlib.pyplot as plt
from sklearn import datasets
# Convert to tensors
data_tensor = torch.from_numpy(data).float()
target_tensor = torch.from_numpy(target).long()
# Loss functions
cross_entropy_loss = nn.CrossEntropyLoss()
klloss = bnn.BKLLoss(reduction='mean', last_layer_only=False)
# Hyperparameters
klweight = 0.01
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Forward pass
models = model(data_tensor)
38
# Total loss
total_cost = cross_entropy + klweight * kl
# Zero the gradients, perform the backward pass, and update the weights
optimizer.zero_grad()
total_cost.backward()
optimizer.step()
# Calculate accuracy
final = target_tensor.size(0)
correct = (predicted == target_tensor).sum().item()
accuracy = 100 * float(correct) / final
# Real data
fig_1 = fig.add_subplot(1, 2, 1)
z1_plot = fig_1.scatter(data[:, 0], data[:, 1], c=target, marker='v')
plt.colorbar(z1_plot, ax=fig_1)
fig_1.set_title("REAL")
# Predicted data
fig_2 = fig.add_subplot(1, 2, 2)
z2_plot = fig_2.scatter(data[:, 0], data[:, 1], c=predicted, marker='v')
plt.colorbar(z2_plot, ax=fig_2)
fig_2.set_title("PREDICT")
39
plt.show()
OUTPUT:
- Accuracy: 33.33%
- CE : 2.31, KL : 2.91
- Accuracy: 95.33%
- CE : 0.14, KL : 3.19
- Accuracy: 95.33%
- CE : 0.10, KL : 3.74
- Accuracy: 98.00%
- CE : 0.06, KL : 3.79
- Accuracy: 94.00%
- CE : 0.16, KL : 3.86
- Accuracy: 98.00%
- CE : 0.06, KL : 3.76
- Accuracy: 98.00%
- CE : 0.06, KL : 3.62
- Accuracy: 98.67%
- CE : 0.05, KL : 3.47
- Accuracy: 98.67%
- CE : 0.06, KL : 3.34
- Accuracy: 98.67%
- CE : 0.06, KL : 3.24
40
RESULT
Thus, the program to implement Bayesian Networks and perform inferences is implemented and executed
successfully.
41