AI&Ml Programs
AI&Ml Programs
CONTENTS
Page
Sl.No Name of the Experiment
No
1 Implementation of Uninformed search algorithms (BFS, DFS)
2 Implementation of Informed search algorithms (A*, memory-bounded A*)
3 Implement naïve Bayes models
4 Implement Bayesian Networks
5 Build Regression models
6 Build decision trees and random forests
7 Build SVM models
8 Implement ensembling techniques
9 Implement clustering algorithms
10 Implement EM for Bayesian networks
11 Build simple NN models
Program to implement the given graph traversal using DFS
# Driver Code
print("Following is the Depth-First Search")
dfs(visited, graph, '5')
Program to implement shortest path between two nodes of the given graph using DFS
graph = {
'5' : ['3','7'],
'3' : ['2', '4'],
'7' : ['8'],
'2' : [],
'4' : ['8'],
'8' : []
}
visited = [] # List for visited
nodes. queue = [] #Initialize a queue
def bfs(visited, graph, node): #function for BFS
visited.append(node)
queue.append(node)
while queue: # Creating loop to visit each
node m = queue.pop(0)
print (m, end = " ")
for neighbour in graph[m]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
# Driver Code
if n == stop_node or Graph_nodes[n] ==
None: pass
else:
for (m, weight) in get_neighbors(n):
#for each node m,compare its distance from start i.e g(m) to
the #from start through n node
else:
if g[m] > g[n] + weight:
#update g(m)
g[m] = g[n] + weight
#change parent of m to
n
parents[m] = n
if n == stop_node:
path = []
while parents[n] !=
n: path.append(n)
n = parents[n]
path.append(start_node)
path.reverse()
def get_neighbors(v):
if v in
Graph_nodes:
return Graph_nodes[v]
else:
return None
def heuristic(n):
H_dist = {
'A': 10,
'B': 8,
'C': 5,
'D': 7,
'E': 3,
'F': 6,
'G': 5,
'H': 3,
'I': 1,
'J': 0
}
return H_dist[n]
}
aStarAlgo('A', 'J')
Program to implement Memory-bounded A* Algorithm
def SMA_star(problem):
start_state = problem['start']
goal_state = problem['goal']
graph = problem['graph']
heuristic =
problem['heuristic']
memory_limit = 5
Queue = PriorityQueue()
Queue.put((0, start_state, None, 0, []))
def heuristic(state):
# Define the heuristic function for estimating the remaining cost
h = {'A': 5, 'B': 4, 'C': 3, 'D': 2, 'E': 1, 'F': 0}
return h[state]
# Import libraries
import pgmpy.models
import pgmpy.inference
import networkx as nx
import matplotlib.pyplot as plt
import pylab as plt
from pgmpy import factors
import numpy as np
# Probability that John calls (True, False) given that the alarm has sounded
cpd_john = pgmpy.factors.discrete.TabularCPD('JohnCalls', 2, [[0.90, 0.05],
[0.10, 0.95]],
evidence=['Alarm'],
evidence_card=[2])
# Probability that Mary calls (True, False) given that the alarm has sounded
cpd_mary = pgmpy.factors.discrete.TabularCPD('MaryCalls', 2, [[0.70, 0.01],
[0.30, 0.99]],
evidence=['Alarm'],
evidence_card=[2])
infer = pgmpy.inference.VariableElimination(model)
# Calculate the probability of a burglary if John and Mary calls (0: True, 1: False)
posterior_probability = infer.query(['Burglary'], evidence={'JohnCalls': 0, 'MaryCalls': 0})
# Calculate the probability of alarm starting if there is a burglary and an earthquake (0: True, 1: False)
posterior_probability = infer.query(['Alarm'], evidence={'Burglary': 0, 'Earthquake': 0})
dataset = pd.read_csv('Salary_Data.csv')
dataset.head()
# data preprocessing
X = dataset.iloc[:, :-1].values #independent variable array
y = dataset.iloc[:,1].values #dependent variable vector
y_test
plt.scatter(X_test, y_test,
color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue') # plotting the regression line
plt.title("Salary vs Experience (Testing set)")
plt.xlabel("Years of experience")
plt.ylabel("Salaries")
plt.show()
Program to implement SVM model
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
//Split the X and Y Dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
// build SVM model to the Training set, The default value of the kernel is ‘rbf’.
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
#Utility
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score,accuracy_score
from sklearn.preprocessing import StandardScaler
# machine learning
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
#seed
seed = 40
df =
pd.read_csv("heart.csv")
#target
target = df["output"]
# evalution
accuracy = round(accuracy_score(y_test, pred_final) * 100, 3)
auc = round(roc_auc_score(y_test, pred_final), 3)
# evalution
accuracy = round(accuracy_score(y_test, pred_final) * 100, 3)
auc = round(roc_auc_score(y_test, pred_final), 3)
# voting classifier
final_model = VotingClassifier(
estimators=[("rf", model_1), ("lr", model_2), ("knn",
model_4)], voting="hard",
)
# training
final_model.fit(X_train, y_train)
# prediction
prediction = final_model.predict(X_test)
# evaluation
accuracy = round(accuracy_score(y_test, prediction) * 100, 3)
auc = round(roc_auc_score(y_test, prediction), 3)
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
data = {
'x': [25, 34, 22, 27, 33, 33, 31, 22, 35, 34, 67, 54, 57, 43, 50, 57, 59, 52, 65, 47, 49, 48, 35, 33, 44, 45,
38, 43, 51, 46],
'y': [79, 51, 53, 78, 59, 74, 73, 57, 69, 75, 51, 32, 40, 47, 53, 36, 35, 58, 59, 50, 25, 20, 14, 12, 20, 5, 29,
27, 8, 7]
}
df = pd.DataFrame(data)
kmeans = KMeans(n_clusters=3).fit(df)
centroids = kmeans.cluster_centers_
print(centroids)
networks Program:
import numpy as np
import pandas as pd
import csv
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
heartDisease = pd.read_csv('heart.csv')
heartDisease =
heartDisease.replace('?',np.nan)
model= BayesianModel([('age','heartdisease'),('sex','heartdisease'),('exang','heartdisease'),('cp','heartdisease'),
('hear tdisease','restecg'),('heartdisease','chol')])
print('\nLearning CPD using Maximum likelihood estimators')
model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)
# forward propagation
def forward_propagation(self, inputs):
return self.tanh(dot(inputs, self.weight_matrix))
# Driver Code
if name == " main ":
neural_network =
NeuralNetwork()
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # two inputs [sleep,study]
y = np.array(([92], [86], [89]), dtype=float) # one output [Expected % in Exams]
X = X/np.amax(X,axis=0) # maximum of X array longitudinally
y = y/100
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Variable initialization
epoch=5000 #Setting training
iterations lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in data
set hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act =
sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)
#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T)
#how much hidden layer weights contributed to
error hiddengrad =
derivatives_sigmoid(hlayer_act) d_hiddenlayer =
EH * hiddengrad