0% found this document useful (0 votes)
19 views

AIML Lab Program

The document contains 5 programs related to artificial intelligence and machine learning algorithms. The first program defines an A* search algorithm to find the shortest path between nodes in a graph. The second program implements AO* search to solve path finding problems. The third program performs concept learning using the Candidate Elimination algorithm. The fourth program constructs a decision tree using the ID3 algorithm. The fifth program demonstrates backpropagation for neural network training.

Uploaded by

4al20is006
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views

AIML Lab Program

The document contains 5 programs related to artificial intelligence and machine learning algorithms. The first program defines an A* search algorithm to find the shortest path between nodes in a graph. The second program implements AO* search to solve path finding problems. The third program performs concept learning using the Candidate Elimination algorithm. The fourth program constructs a decision tree using the ID3 algorithm. The fifth program demonstrates backpropagation for neural network training.

Uploaded by

4al20is006
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 11

1st Pgm: (A *)

def aStarAlgo(start_node, stop_node):


open_set=set(start_node)
closed_set=set()
g={}
parents={}
g[start_node]=0
parents[start_node]=start_node

while len(open_set)>0:
n=None
for v in open_set:
if n==None or g[v]+heuristic(v)<g[n]+heuristic(n):
n=v
if n==stop_node or Graph_nodes[n]==None:
pass
else:
for(m,weight) in get_neighbours(n):
if m not in open_set and m not in closed_set:
open_set.add(m)
parents[m]=n
g[m]=g[n]+weight
else:
if g[m]>g[n]+weight:
g[m]=g[n]+weight
parents[m]=n
if m in closed_set:
closed_set.remove(m)
open_set.add(m)

if n==None:
print('Path does not exists!')
return None

if n==stop_node:
path=[]
while parents[n]!=n:
path.append(n)
n=parents[n]
path.append(start_node)
path.reverse()
print('Path found : {}'.format(path))
return path

open_set.remove(n)
closed_set.add(n)

print('Path does not exists!')


return None

def get_neighbours(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None

def heuristic(n):
H_dist={
'S':14,
'B':12,
'C':11,
'D':6,
'F':11,
'E':4,
'G':0,
}
return H_dist[n]
Graph_nodes={
'A':[('B',2),('E',3)],
'B':[('A',2),('C',1),('G',9)],
'C':[('B',1)],
'D':[('E',6),('G',1)],
'E':[('A',3),('D',6)],
'G':[('B',9),('D',1)],
}
aStarAlgo('A','G')

2nd Pgm: (AO *)

class graph:
def __init__(self,graph, heuristicNodeList,startNode):
self.graph = graph
self.H= heuristicNodeList
self.start=startNode
self.parent={}
self.status={}
self.solutionGraph={}

def applyAOStar(self):
self.aoStar(self.start, False)

def getNeighbors(self, v):


return self.graph.get(v ,'')

def getStatus(self, v):


return self.status.get(v,0)

def setStatus(self,v,val):
self.status[v]=val

def getHeuristicNodeValue(self, n):


return self.H.get(n,0)

def setHeuristicNodeValue(self, n, value):


self.H[n]=value

def printSolution(self):
print('FOR GRAPH SOLUTION, TRAVERSE THE GRAPH FROM THE
STARTNODE:',self.start)

print('--------------------------------------------------------------------')
print(self.solutionGraph)

print('--------------------------------------------------------------------')

def computeMinimumCostChildNodes(self, v):


minimumCost=0
costToChildNodeListDict={}
costToChildNodeListDict[minimumCost]=[]
flag = True
for nodeInfoTupleList in self.getNeighbors(v):
cost =0
nodeList=[]
for c, weight in nodeInfoTupleList:
cost=cost+self.getHeuristicNodeValue(c) + weight
nodeList.append(c)
if flag ==True:
minimumCost=cost
costToChildNodeListDict[minimumCost]=nodeList
flag=False
else:
if minimumCost>cost:
minimumCost =cost
costToChildNodeListDict[minimumCost]=nodeList

return minimumCost, costToChildNodeListDict[minimumCost]

def aoStar(self, v, backTracking):


print('HEURISTIC VALUES:',self.H)
print('SOLUTION GRAPH:',self.solutionGraph)
print('PROCESSINGT NODE:', v)
print('-------------------------------------------------------')
if self.getStatus(v) >=0:
minimumCost, childNodeList = self.computeMinimumCostChildNodes(v)
self.setHeuristicNodeValue(v, minimumCost)
self.setStatus(v, len(childNodeList))
solved = True
for childNode in childNodeList:
self.parent[childNode]=v
if self.getStatus(childNode)!=-1:
solved=solved & False
if solved==True:
self.setStatus(v,-1)
self.solutionGraph[v]=childNodeList

if v!=self.start:
self.aoStar(self.parent[v], True)

if backTracking == False:
for childNode in childNodeList:
self.setStatus(childNode, 0)
self.aoStar(childNode, False)

h1= {'A':1, 'B':6, 'C':2, 'D':12, 'E':2, 'F':1, 'G':5, 'H':7, 'I':7, 'J':1}
graph1= {'A':[[('B',1), ('C',1)],[('D',1)]],
'B':[[('G',1)], [('H',1)]],
'C':[[('J',1)]],
'D':[[('E',1),('F',1)]],
'G':[[('I',1)]]
}
G1 = graph(graph1,h1,'A')
G1.applyAOStar()
G1.printSolution()

3rd Pgm:(Candidate elimination)


import numpy as np
import pandas as pd
data=pd.DataFrame(data=pd.read_csv('dataset.csv'))
concepts=np.array(data.iloc[:,0:-1])
target=np.array(data.iloc[:,-1])
def learn(concepts,target):
print("initialization of specific_h and general_h")
l=len(concepts[0])
specific_h=['0']*l
print(specific_h)
general_h=['?']*l
print(general_h,"\n")
specific_h=concepts[0].copy()
general_h=[["?" for i in range(len(specific_h))] for i in
range(len(specific_h))]
for i, h in enumerate(concepts):
if target[i]=="Yes":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
specific_h[x]='?'
general_h[x][x]='?'
if target[i]=="No":
for x in range(len(specific_h)):
if h[x] != specific_h[x]:
general_h[x][x]=specific_h[x]
else:
general_h[x][x]='?'
print("Steps ",i+1," of Candidate Elimination Algorithm")
print(specific_h)
print(general_h)
indices=[i for i, val in enumerate(general_h) if val ==
['?','?','?','?','?','?']]
for i in indices:
general_h.remove(['?','?','?','?','?','?'])
return specific_h,general_h
s_final, g_final = learn(concepts,target)
print("\nFinal Specific_h : ",s_final,sep="\n")
print("Final General_h : ",g_final,sep="\n")

4th Pgm:(DT)

import pandas as pd
import math
import numpy as np
data= pd.read_csv("tennis.csv")
features=[feat for feat in data]
features.remove("answer")

class Node:
def __init__ (self):
self.children=[]
self.value=""
self.isLeaf= False
self.pred=""
def entropy(examples):
pos=0.0
neg=0.0
for _, row in examples.iterrows():
if row["answer"]=="yes":
pos+=1
else:
neg+=1
if pos==0.0 or neg==0.0:
return 0.0
else:
p=pos/(pos+neg)
n= neg/(pos+neg)
return -(p * math.log(p,2)+ n * math.log(n,2))
def info_gain(examples,attr):
uniq= np.unique(examples[attr])
gain=entropy(examples)
for u in uniq:
subdata= examples[examples[attr]==u]
sub_e =entropy(subdata)
gain -=(float(len(subdata))/ float(len(examples))) * sub_e
return gain

def ID3(examples,attrs):
root=Node()
max_gain=0
max_feat=""
for feature in attrs:
gain = info_gain(examples,feature)
if gain > max_gain:
max_gain=gain
max_feat=feature
root.value=max_feat
uniq=np.unique(examples[max_feat])
for u in uniq:
subdata=examples[examples[max_feat]==u]
if entropy(subdata)==0.0:
newnode=Node()
newnode.isLeaf=True
newnode.value=u
newnode.pred = np.unique(subdata["answer"])
root.children.append(newnode)
else:
dummynode=Node()
dummynode.value=u
new_attrs=attrs.copy()
new_attrs.remove(max_feat)
child=ID3(subdata,new_attrs)
dummynode.children.append(child)
root.children.append(dummynode)
return root
def printtree(root:Node,depth=0):
for i in range(depth):
print("\t", end="")
print(root.value,end="")
if root.isLeaf:
print("->",root.pred)
print()
for child in root.children:
printtree(child,depth+1)
def classify(root:Node,new):
for child in root.children:
if child.value==new[root.value]:
if child.isLeaf:
print("predicted lable for new example",new,"is:",child.pred)
exit
else:
classify(child.children[0],new)
root=ID3(data,features)
print("decision tree is:")
printtree(root)
print("------------------")

new={"outlook":"sunny","temperature":"hot","humidity":"high","wind":"strong"}
classify(root,new)
new1={"outlook":"overcast","temperature":"hot","humidity":"high","wind":"strong"}
classify(root,new1)

5th Pgm:(Back propagation)

import numpy as np
x=np.array(([2,9],[1,5],[3,6]),dtype=float)
y=np.array(([92],[86],[89]),dtype=float)
x=x/np.amax(x,axis=0)
y=y/100
def sigmoid(x):
return 1/(1+np.exp(-x))
def derivatives_sigmoid(x):
return x*(1-x)
epoch=5000
lr=0.1
inputlayer_neurons=2
hiddenlayer_neurons=3
output_neurons=1
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))

for i in range(epoch):
hinp1=np.dot(x,wh)
hinp=hinp1+bh
hlayer_act=sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp=outinp1+bout
output=sigmoid(outinp)
EO=y-output
outgrad=derivatives_sigmoid(output)
d_output=EO*outgrad
EH=d_output.dot(wout.T)
hiddengrad=derivatives_sigmoid(hlayer_act)
d_hiddenlayer=EH*hiddengrad
wout+=hlayer_act.T.dot(d_output)*lr
wh+=x.T.dot(d_hiddenlayer)*lr

print("Input : \n"+str(x))
print("Output : \n"+str(y))
print("Predicted Output : \n",output)

6th Pgm:(Neive Basian)

import csv
import random
import math

def loadcsv(filename):
reader = csv.reader(open(filename,"r"))
dataset = []
for row in reader:
inlist = []
for i in range(len(row)):
inlist.append(float(row[i]))
dataset.append(inlist)
return dataset

def splitDataset(dataset,splitratio):
trainSize = int(len(dataset) * splitratio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]

def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
#print(separated)
return separated

def mean(numbers):
return sum(numbers)/float(len(numbers))

def stdev(numbers):
if len(numbers)==0:
return 0
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers]) / float(len(numbers)-1)
return math.sqrt(variance)

def summarize(dataset):
summaries = [ (mean(attribute), stdev(attribute)) for attribute in
zip(*dataset)]
del summaries[-1]
return summaries

def summarizeByClass(dataset):
separated = separateByClass(dataset)
#print(separated)
summaries = {}
for classValue, instances in separated.items():
summaries[classValue] = summarize(instances)
return summaries

def calculateProbability(x, mean, stdev):


exponent = math.exp( -(math.pow(x-mean, 2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent

def calculateClassProbabilities(summaries, inputVector):


probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities

def predict(summaries, inputVector):


probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestprob = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestprob:
bestprob = probability
bestLabel = classValue
return bestLabel

def getpredictions(summaries, testset):


predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions

def getAccuracy(testSet, predictions):


correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct +=1
return (correct / float(len(testSet))) * 100.0
filename = 'PI_Diabetes.csv'
splitRatio = 0.9
dataset = loadcsv(filename)
print("\n The length of the Data Set: ",len(dataset))
print("\n The Data Set Splitting into Training and Testing \n")
trainingset, testSet = splitDataset(dataset, splitRatio)
print("\n Number of Rows in Training Set:{0} rows".format(len(trainingset)))
print("\n Number of Rows in Testining Set:{0} rows".format(len(testSet)))
summaries = summarizeByClass(trainingset)
print("\n Model summaries:\n",summaries)

predictions = getpredictions(summaries, testSet)


print("\n Predictions:\n",predictions)
accuracy = getAccuracy(testSet, predictions)
print("\n Accuracy:{0}%".format(accuracy))

7th pgm:(KMeans & GMM)

import matplotlib.pyplot as plt


from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.mixture import GaussianMixture
import sklearn.metrics as sm
import pandas as pd
import numpy as np
iris_dataset = pd.read_csv('iris(1).csv')
iris_dataset['Targets'] = iris_dataset.Class.map({'Iris-setosa':0, 'Iris-
versicolor':1, 'Iris-virginica':2})
x = iris_dataset[['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']]
y = iris_dataset[['Targets']]

model = KMeans(n_clusters = 3)
model.fit(x)
print('Model Labels:\n',model.labels_)

scaler = preprocessing.StandardScaler()
scaler.fit(x)
xs = scaler.transform(x)
gmm = GaussianMixture(n_components = 3)
gmm.fit(xs)
y_gmm = gmm.predict(xs)
print('GMM Labels:\n',y_gmm)
plt.figure(figsize = (10,10))

colormap = np.array(['red','lime','black'])
plt.subplot(2,2,1)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[y.Targets], s=40)
plt.title('Real Classification')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')

plt.subplot(2,2,2)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[model.labels_], s=40)
plt.title('K Means Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')

plt.subplot(2,2,3)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[y_gmm], s=40)
plt.title('GMM Based Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')

print('Evaluation of K-Means with ground truth classification of Iris Dataset')


print('Rand Index:%f' % sm.adjusted_rand_score(y.Targets, model.labels_))
print('Homogenity Score:%f' % sm.homogeneity_score(y.Targets,model.labels_))
print('Completeness Score:%f' % sm.completeness_score(y.Targets, model.labels_))
print('V-Measure:%f' %sm.v_measure_score(y.Targets,model.labels_))
print('Evaluation of GMM with ground truth classification of Iris dataset')
print('Rand Index:%f ' % sm.adjusted_rand_score(y.Targets, y_gmm))
print('Homogenity Score:%f' % sm.homogeneity_score(y.Targets,y_gmm))
print('Completeness Score:%f' % sm.completeness_score(y.Targets,y_gmm))
print('V-Measure:%f' %sm.v_measure_score(y.Targets,y_gmm))

8th Pgm:(KNN)
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
iris = datasets.load_iris()
print("Iris Dataset Loaded....")
x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size =
0.1)
print("Dataset is split into training and testing samples...")
print("Size of training data and its label:",x_train.shape,y_train.shape)
print("Size of testing data and its label:",x_test.shape,y_test.shape)
for i in range(len(iris.target_names)):
print("Label1 ", i,"-",str(iris.target_names[i]))
classifier = KNeighborsClassifier(n_neighbors=1)
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
print("Results of classssification using K-nn with K=1")
for r in range(0,len(x_test)):

print("Sample:",str(x_test[r]),"Actual_label:",str(y_test[r]),"Predicted_label:",st
r(y_pred[r]))
print("Classification accuracy:",classifier.score(x_test,y_test))

9th Pgm : (weighted regression)

import matplotlib.pyplot as plt


import pandas as pd
import numpy as np

def kernel(point,xmat,k):
m,n = np.shape(xmat)
weights = np.mat(np.eye((m)))
for j in range(m):
diff = point-X[j]
weights[j,j] = np.exp(diff*diff.T/(-2.0*k**2))
return weights

def localWeight(point,xmat,ymat,k):
wei = kernel(point,xmat,k)
W = (X.T*(wei * X)).I*(X.T*(wei * ymat.T))
return W

def localWeightRegression(xmat,ymat,k):
m,n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*localWeight(xmat[i],xmat,ymat,k)
return ypred

def grapPlot(X,ypred):
sortindex = X[:,1].argsort(0)
xsort = X[sortindex][:,0]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(bill,tip,color='green')
ax.plot(xsort[:,1],ypred[sortindex],color = 'red',linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show()

data = pd.read_csv('Tips.csv')
bill = np.array(data.total_bill)
tip = np.array(data.tip)
mbill = np.mat(bill)
mtip = np.mat(tip)
m = np.shape(mbill)[1]
one = np.mat(np.ones(m))
X = np.hstack((one.T,mbill.T))
print('\n ypred for k=3')
ypred = localWeightRegression(X,mtip,3)
grapPlot(X,ypred)
print('\n ypred for k=9')
ypred = localWeightRegression(X,mtip,9)
grapPlot(X,ypred)

You might also like