AIML LAB Final
AIML LAB Final
1. Implementation of A* Algorithm
class Graph:
def __init__(self,adjac_lis):
self.adjac_lis = adjac_lis
def get_neighbours(self,v):
return self.adjac_lis[v]
def h(self,n):
H={'A':1,'B':1, 'C':1,'D':1}
return H[n]
def a_star_algorithm(self,start,stop):
open_lst = set([start])
closed_lst = set([])
dist ={}
dist[start] = 0
prenode ={}
prenode[start] =start
while len(open_lst)>0:
n = None
for v in open_lst:
if n==None or dist[v]+self.h(v)<dist[n]+self.h(n):
n=v;
if n==None:
print("path doesnot exist")
return None
if n==stop:
reconst_path=[]
while prenode[n]!=n:
reconst_path.append(n)
n = prenode[n]
reconst_path.append(start)
reconst_path.reverse()
print("path found:{}".format(reconst_path))
return reconst_path
for (m,weight) in self.get_neighbours(n):
if m not in open_lst and m not in closed_lst:
open_lst.add(m)
prenode[m] = n
dist[m] = dist[n]+weight
else:
if dist[m]>dist[n]+weight:
dist[m] = dist[n]+weight
prenode[m]=n
if m in closed_lst:
closed_lst.remove(m)
open_lst.add(m)
open_lst.remove(n)
closed_lst.add(n)
print("Path doesnot exist")
return None
adjac_lis ={'A':[('B',1),('C',3),('D',7)],'B':[('D',5)],'C':[('D',12)]}
graph1=Graph(adjac_lis)
graph1.a_star_algorithm('A', 'D')
Output
path found:['A', 'B', 'D']
['A', 'B', 'D']
2. Implementation of AO* Algorithm
def recAOStar(n):
global finalPath
print("Expanding Node:",n)
and_nodes = []
or_nodes =[]
if(n in allNodes):
if 'AND' in allNodes[n]:
and_nodes = allNodes[n]['AND']
if 'OR' in allNodes[n]:
or_nodes = allNodes[n]['OR']
if len(and_nodes)==0 and len(or_nodes)==0:
return
solvable = False
marked ={}
while not solvable:
if len(marked)==len(and_nodes)+len(or_nodes):
min_cost_least,min_cost_group_least =
least_cost_group(and_nodes,or_nodes,{})
solvable = True
change_heuristic(n,min_cost_least)
optimal_child_group[n] = min_cost_group_least
continue
min_cost,min_cost_group =
least_cost_group(and_nodes,or_nodes,marked)
is_expanded = False
if len(min_cost_group)>1:
if(min_cost_group[0] in allNodes):
is_expanded = True
recAOStar(min_cost_group[0])
if(min_cost_group[1] in allNodes):
is_expanded = True
recAOStar(min_cost_group[1])
else:
if(min_cost_group in allNodes):
is_expanded = True
recAOStar(min_cost_group)
if is_expanded:
min_cost_verify, min_cost_group_verify = least_cost_group(and_nodes,
or_nodes, {})
if min_cost_group == min_cost_group_verify:
solvable = True
change_heuristic(n, min_cost_verify)
optimal_child_group[n] = min_cost_group
else:
solvable = True
change_heuristic(n, min_cost)
optimal_child_group[n] = min_cost_group
marked[min_cost_group]=1
return heuristic(n)
def least_cost_group(and_nodes, or_nodes, marked):
node_wise_cost = {}
for node_pair in and_nodes:
if not node_pair[0] + node_pair[1] in marked:
cost = 0
cost = cost + heuristic(node_pair[0]) + heuristic(node_pair[1]) + 2
node_wise_cost[node_pair[0] + node_pair[1]] = cost
for node in or_nodes:
if not node in marked:
cost = 0
cost = cost + heuristic(node) + 1
node_wise_cost[node] = cost
min_cost = 999999
min_cost_group = None
for costKey in node_wise_cost:
if node_wise_cost[costKey] < min_cost:
min_cost = node_wise_cost[costKey]
min_cost_group = costKey
return [min_cost, min_cost_group]
def heuristic(n):
return H_dist[n]
def change_heuristic(n, cost):
H_dist[n] = cost
return
def print_path(node):
print(optimal_child_group[node], end="")
node = optimal_child_group[node]
if len(node) > 1:
if node[0] in optimal_child_group:
print("->", end="")
print_path(node[0])
if node[1] in optimal_child_group:
print("->", end="")
print_path(node[1])
else:
if node in optimal_child_group:
print("->", end="")
print_path(node)
H_dist = {
'A': -1,
'B': 4,
'C': 2,
'D': 3,
'E': 6,
'F': 8,
'G': 2,
'H': 0,
'I': 0,
'J': 0
}
allNodes = { Output
'A': {'AND': [('C', 'D')], 'OR': ['B']},
'B': {'OR': ['E', 'F']}, Expanding Node: A
'C': {'OR': ['G'], 'AND': [('H', 'I')]}, Expanding Node: B
'D': {'OR': ['J']} Expanding Node: C
} Expanding Node: D
optimal_child_group = {} Nodes which gives optimal cost are
optimal_cost = recAOStar('A') CD->HI->J
print('Nodes which gives optimal cost are') Optimal Cost is :: 5
print_path('A')
print('\nOptimal Cost is :: ', optimal_cost)
Output
3. For A Given Set Of Training Data Examples Stored In A .Csv File,
Implement And Demonstrate The Candidate-Elimination Algorithm
To Output A Description Of The Set Of All Hypotheses Consistent With The Training Examples:
import numpy as np
import pandas as pd
# Loading Data from a CSV File
data = pd.DataFrame(data=pd.read_csv('trainingdata.csv'))
print(data)
# Separating concept features from Target
concepts = np.array(data.iloc[:,0:-1])
print(concepts)
# Isolating target into a separate DataFrame
# copying last column to target array
target = np.array(data.iloc[:,-1])
print(target)
def learn(concepts, target):
'''
learn() function implements the learning method of the Candidate elimination
algorithm.
Arguments:
concepts - a data frame with all the features
target - a data frame with corresponding output values
'''
# Initialise S0 with the first instance from concepts
# .copy() makes sure a new list is created instead of just pointing to the same
memory location
specific_h = concepts[0].copy()
print("\nInitialization of specific_h and general_h")
print(specific_h)
#h=["#" for i in range(0,5)]
#print(h)
general_h = [["?" for i in range(len(specific_h))] for i in
range(len(specific_h))]
print(general_h)
# The learning iterations
for i, h in enumerate(concepts):
# Checking if the hypothesis has a positive target
if target[i] == "Yes":
for x in range(len(specific_h)):
# Change values in S & G only if values change
if h[x] != specific_h[x]:
specific_h[x] = '?'
general_h[x][x] = '?'
# Checking if the hypothesis has a positive target
if target[i] == "No":
for x in range(len(specific_h)):
# For negative hyposthesis change values only in G
if h[x] != specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'
print("\nSteps of Candidate Elimination Algorithm",i+1)
print(specific_h)
print(general_h)
# find indices where we have empty rows, meaning those that are unchanged
indices = [i for i, val in enumerate(general_h) if val == ['?', '?', '?', '?', '?', '?']]
for i in indices:
# remove those rows from general_h
general_h.remove(['?', '?', '?', '?', '?', '?'])
# Return final values
return specific_h, general_h
s_final, g_final = learn(concepts, target)
print("\nFinal Specific_h:", s_final, sep="\n")
print("\nFinal General_h:", g_final, sep="\n")
output
sky airTemp humidity wind water forecast enjoySport
0 Sunny Warm Normal Strong Warm Same Yes
1 Sunny Warm High Strong Warm Same Yes
2 Rainy Cold High Strong Warm Change No
3 Sunny Warm High Strong Cool Change Yes
[['Sunny' 'Warm' 'Normal' 'Strong' 'Warm' 'Same']
['Sunny' 'Warm' 'High' 'Strong' 'Warm' 'Same']
['Rainy' 'Cold' 'High' 'Strong' 'Warm' 'Change']
['Sunny' 'Warm' 'High' 'Strong' 'Cool' 'Change']]
['Yes' 'Yes' 'No' 'Yes']
def empty(size):
s = ""
for x in range(size):
s += " "
return s
def print_tree(node, level):
if node.answer != "":
print(empty(level), node.answer)
return
print(empty(level), node.attribute)
for value, n in node.children:
print(empty(level + 1), value)
print_tree(n, level + 2)
metadata, traindata = read_data("tennisdata.csv")
data = np.array(traindata)
node = create_node(data, metadata)
print_tree(node, 0)
output
Outlook
Overcast Sunny
b'Yes' Humidity
Rain b'High'
Windy b'No'
b'Strong' b'Normal'
b'No' b'Yes'
b'Weak'
b'Yes'
5. Build An Artificial Neural Network By Implementing The
Backpropagation Algorithm And Test The Same Using Appropriate
Datasets.
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # X = (hours sleeping, hours
studying)
y = np.array(([92], [86], [89]), dtype=float) # y = score on test
# scale units
X = X/np.amax(X, axis=0) # maximum of X array
y = y/100 # max test score is 100
class Neural_Network(object):
def __init__(self):
# Parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
# Weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2)
weight matrix from input to hidden layer
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1)
weight matrix from hidden to output layer
def forward(self, X):
#forward propagation through our network
self.z = np.dot(X, self.W1) # dot product of X (input) and first set
of 3x2 weights
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2)
and second set of 3x1 weights
o = self.sigmoid(self.z3) # final activation function
return o
def sigmoid(self, s):
return 1/(1+np.exp(-s)) # activation function
def sigmoidPrime(self, s):
return s * (1 - s) # derivative of sigmoid
def backward(self, X, y, o):
# backward propgate through the network
self.o_error = y - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of
sigmoid to
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our
hidden layer weights contributed to output error
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying
derivative of sigmoid to z2 error
self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input -->
hidden) weights
self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden -->
output) weights
def train (self, X, y):
o = self.forward(X)
self.backward(X, y, o)
NN = Neural_Network()
print ("\nInput: \n" + str(X))
print ("\nActual Output: \n" + str(y))
print ("\nPredicted Output: \n" + str(NN.forward(X)))
print ("\nLoss: \n" + str(np.mean(np.square(y - NN.forward(X))))) # mean
sum squared loss)
NN.train(X, y)
Output
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.66284228]
[0.66027834]
[0.59831052]]
Loss:
0.0637005291727847
6. Write A Program To Implement The Naïve Bayesian Classifier For A
Sample Training Data Set Stored As A .Csv File. Compute The
Accuracy Of The Classifier, Considering Few Test Data Sets.
# import necessary libarities
import pandas as pd
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
# load data from CSV
data = pd.read_csv('tennisdata.csv')
print("THe first 5 values of data is :\n",data.head())
# obtain Train data and Train output
X = data.iloc[:,:-1]
print("\nThe First 5 values of train data is\n",X.head())
y = data.iloc[:,-1]
print("\nThe first 5 values of Train output is\n",y.head())
# Convert then in numbers
le_outlook = LabelEncoder()
X.Outlook = le_outlook.fit_transform(X.Outlook)
le_Temperature = LabelEncoder()
X.Temperature = le_Temperature.fit_transform(X.Temperature)
le_Humidity = LabelEncoder()
X.Humidity = le_Humidity.fit_transform(X.Humidity)
le_Windy = LabelEncoder()
X.Windy = le_Windy.fit_transform(X.Windy)
print("\nNow the Train data is :\n",X.head())
le_PlayTennis = LabelEncoder()
y = le_PlayTennis.fit_transform(y)
print("\nNow the Train output is\n",y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.20)
classifier = GaussianNB()
classifier.fit(X_train,y_train)
from sklearn.metrics import accuracy_score
print("Accuracy is:",accuracy_score(classifier.predict(X_test),y_test))
output
THe first 5 values of data is :
Outlook Temperature Humidity Windy PlayTennis
0 Sunny Hot High False No
1 Sunny Hot High True No
2 Overcast Hot High False Yes
3 Rainy Mild High False Yes
4 Rainy Cool Normal False Yes
The First 5 values of train data is
Outlook Temperature Humidity Windy
0 Sunny Hot High False
1 Sunny Hot High True
2 Overcast Hot High False
3 Rainy Mild High False
4 Rainy Cool Normal False
The first 5 values of Train output is
0 No
1 No
2 Yes
3 Yes
4 Yes
Name: PlayTennis, dtype: object
Now the Train data is :
Outlook Temperature Humidity Windy
02100
12101
20100
31200
41010
Now the Train output is
[0 0 1 1 1 0 1 0 1 1 1 1 1 0]
Accuracy is: 0.3333333333333333