AIML Lab Program
AIML Lab Program
while len(open_set)>0:
n=None
for v in open_set:
if n==None or g[v]+heuristic(v)<g[n]+heuristic(n):
n=v
if n==stop_node or Graph_nodes[n]==None:
pass
else:
for(m,weight) in get_neighbours(n):
if m not in open_set and m not in closed_set:
open_set.add(m)
parents[m]=n
g[m]=g[n]+weight
else:
if g[m]>g[n]+weight:
g[m]=g[n]+weight
parents[m]=n
if m in closed_set:
closed_set.remove(m)
open_set.add(m)
if n==None:
print('Path does not exists!')
return None
if n==stop_node:
path=[]
while parents[n]!=n:
path.append(n)
n=parents[n]
path.append(start_node)
path.reverse()
print('Path found : {}'.format(path))
return path
open_set.remove(n)
closed_set.add(n)
def get_neighbours(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None
def heuristic(n):
H_dist={
'S':14,
'B':12,
'C':11,
'D':6,
'F':11,
'E':4,
'G':0,
}
return H_dist[n]
Graph_nodes={
'A':[('B',2),('E',3)],
'B':[('A',2),('C',1),('G',9)],
'C':[('B',1)],
'D':[('E',6),('G',1)],
'E':[('A',3),('D',6)],
'G':[('B',9),('D',1)],
}
aStarAlgo('A','G')
class graph:
def __init__(self,graph, heuristicNodeList,startNode):
self.graph = graph
self.H= heuristicNodeList
self.start=startNode
self.parent={}
self.status={}
self.solutionGraph={}
def applyAOStar(self):
self.aoStar(self.start, False)
def setStatus(self,v,val):
self.status[v]=val
def printSolution(self):
print('FOR GRAPH SOLUTION, TRAVERSE THE GRAPH FROM THE
STARTNODE:',self.start)
print('--------------------------------------------------------------------')
print(self.solutionGraph)
print('--------------------------------------------------------------------')
if v!=self.start:
self.aoStar(self.parent[v], True)
if backTracking == False:
for childNode in childNodeList:
self.setStatus(childNode, 0)
self.aoStar(childNode, False)
h1= {'A':1, 'B':6, 'C':2, 'D':12, 'E':2, 'F':1, 'G':5, 'H':7, 'I':7, 'J':1}
graph1= {'A':[[('B',1), ('C',1)],[('D',1)]],
'B':[[('G',1)], [('H',1)]],
'C':[[('J',1)]],
'D':[[('E',1),('F',1)]],
'G':[[('I',1)]]
}
G1 = graph(graph1,h1,'A')
G1.applyAOStar()
G1.printSolution()
4th Pgm:(DT)
import pandas as pd
import math
import numpy as np
data= pd.read_csv("tennis.csv")
features=[feat for feat in data]
features.remove("answer")
class Node:
def __init__ (self):
self.children=[]
self.value=""
self.isLeaf= False
self.pred=""
def entropy(examples):
pos=0.0
neg=0.0
for _, row in examples.iterrows():
if row["answer"]=="yes":
pos+=1
else:
neg+=1
if pos==0.0 or neg==0.0:
return 0.0
else:
p=pos/(pos+neg)
n= neg/(pos+neg)
return -(p * math.log(p,2)+ n * math.log(n,2))
def info_gain(examples,attr):
uniq= np.unique(examples[attr])
gain=entropy(examples)
for u in uniq:
subdata= examples[examples[attr]==u]
sub_e =entropy(subdata)
gain -=(float(len(subdata))/ float(len(examples))) * sub_e
return gain
def ID3(examples,attrs):
root=Node()
max_gain=0
max_feat=""
for feature in attrs:
gain = info_gain(examples,feature)
if gain > max_gain:
max_gain=gain
max_feat=feature
root.value=max_feat
uniq=np.unique(examples[max_feat])
for u in uniq:
subdata=examples[examples[max_feat]==u]
if entropy(subdata)==0.0:
newnode=Node()
newnode.isLeaf=True
newnode.value=u
newnode.pred = np.unique(subdata["answer"])
root.children.append(newnode)
else:
dummynode=Node()
dummynode.value=u
new_attrs=attrs.copy()
new_attrs.remove(max_feat)
child=ID3(subdata,new_attrs)
dummynode.children.append(child)
root.children.append(dummynode)
return root
def printtree(root:Node,depth=0):
for i in range(depth):
print("\t", end="")
print(root.value,end="")
if root.isLeaf:
print("->",root.pred)
print()
for child in root.children:
printtree(child,depth+1)
def classify(root:Node,new):
for child in root.children:
if child.value==new[root.value]:
if child.isLeaf:
print("predicted lable for new example",new,"is:",child.pred)
exit
else:
classify(child.children[0],new)
root=ID3(data,features)
print("decision tree is:")
printtree(root)
print("------------------")
new={"outlook":"sunny","temperature":"hot","humidity":"high","wind":"strong"}
classify(root,new)
new1={"outlook":"overcast","temperature":"hot","humidity":"high","wind":"strong"}
classify(root,new1)
import numpy as np
x=np.array(([2,9],[1,5],[3,6]),dtype=float)
y=np.array(([92],[86],[89]),dtype=float)
x=x/np.amax(x,axis=0)
y=y/100
def sigmoid(x):
return 1/(1+np.exp(-x))
def derivatives_sigmoid(x):
return x*(1-x)
epoch=5000
lr=0.1
inputlayer_neurons=2
hiddenlayer_neurons=3
output_neurons=1
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
for i in range(epoch):
hinp1=np.dot(x,wh)
hinp=hinp1+bh
hlayer_act=sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp=outinp1+bout
output=sigmoid(outinp)
EO=y-output
outgrad=derivatives_sigmoid(output)
d_output=EO*outgrad
EH=d_output.dot(wout.T)
hiddengrad=derivatives_sigmoid(hlayer_act)
d_hiddenlayer=EH*hiddengrad
wout+=hlayer_act.T.dot(d_output)*lr
wh+=x.T.dot(d_hiddenlayer)*lr
print("Input : \n"+str(x))
print("Output : \n"+str(y))
print("Predicted Output : \n",output)
import csv
import random
import math
def loadcsv(filename):
reader = csv.reader(open(filename,"r"))
dataset = []
for row in reader:
inlist = []
for i in range(len(row)):
inlist.append(float(row[i]))
dataset.append(inlist)
return dataset
def splitDataset(dataset,splitratio):
trainSize = int(len(dataset) * splitratio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
#print(separated)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
if len(numbers)==0:
return 0
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers]) / float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [ (mean(attribute), stdev(attribute)) for attribute in
zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
#print(separated)
summaries = {}
for classValue, instances in separated.items():
summaries[classValue] = summarize(instances)
return summaries
model = KMeans(n_clusters = 3)
model.fit(x)
print('Model Labels:\n',model.labels_)
scaler = preprocessing.StandardScaler()
scaler.fit(x)
xs = scaler.transform(x)
gmm = GaussianMixture(n_components = 3)
gmm.fit(xs)
y_gmm = gmm.predict(xs)
print('GMM Labels:\n',y_gmm)
plt.figure(figsize = (10,10))
colormap = np.array(['red','lime','black'])
plt.subplot(2,2,1)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[y.Targets], s=40)
plt.title('Real Classification')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.subplot(2,2,2)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[model.labels_], s=40)
plt.title('K Means Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.subplot(2,2,3)
plt.scatter(x.Petal_Length,x.Petal_Width,c=colormap[y_gmm], s=40)
plt.title('GMM Based Clustering')
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
8th Pgm:(KNN)
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
iris = datasets.load_iris()
print("Iris Dataset Loaded....")
x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size =
0.1)
print("Dataset is split into training and testing samples...")
print("Size of training data and its label:",x_train.shape,y_train.shape)
print("Size of testing data and its label:",x_test.shape,y_test.shape)
for i in range(len(iris.target_names)):
print("Label1 ", i,"-",str(iris.target_names[i]))
classifier = KNeighborsClassifier(n_neighbors=1)
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
print("Results of classssification using K-nn with K=1")
for r in range(0,len(x_test)):
print("Sample:",str(x_test[r]),"Actual_label:",str(y_test[r]),"Predicted_label:",st
r(y_pred[r]))
print("Classification accuracy:",classifier.score(x_test,y_test))
def kernel(point,xmat,k):
m,n = np.shape(xmat)
weights = np.mat(np.eye((m)))
for j in range(m):
diff = point-X[j]
weights[j,j] = np.exp(diff*diff.T/(-2.0*k**2))
return weights
def localWeight(point,xmat,ymat,k):
wei = kernel(point,xmat,k)
W = (X.T*(wei * X)).I*(X.T*(wei * ymat.T))
return W
def localWeightRegression(xmat,ymat,k):
m,n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*localWeight(xmat[i],xmat,ymat,k)
return ypred
def grapPlot(X,ypred):
sortindex = X[:,1].argsort(0)
xsort = X[sortindex][:,0]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(bill,tip,color='green')
ax.plot(xsort[:,1],ypred[sortindex],color = 'red',linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show()
data = pd.read_csv('Tips.csv')
bill = np.array(data.total_bill)
tip = np.array(data.tip)
mbill = np.mat(bill)
mtip = np.mat(tip)
m = np.shape(mbill)[1]
one = np.mat(np.ones(m))
X = np.hstack((one.T,mbill.T))
print('\n ypred for k=3')
ypred = localWeightRegression(X,mtip,3)
grapPlot(X,ypred)
print('\n ypred for k=9')
ypred = localWeightRegression(X,mtip,9)
grapPlot(X,ypred)