0% found this document useful (0 votes)
9 views25 pages

AI&Ml Programs

The document outlines a series of experiments and implementations related to artificial intelligence and machine learning, including various search algorithms (DFS, BFS, A*), naive Bayes models, Bayesian networks, regression models, decision trees, SVM models, and clustering algorithms. It provides code examples for implementing these algorithms in Python, demonstrating their functionality through graph traversal and pathfinding tasks. Additionally, it includes methods for model evaluation and accuracy calculation.

Uploaded by

prawinsk2005
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views25 pages

AI&Ml Programs

The document outlines a series of experiments and implementations related to artificial intelligence and machine learning, including various search algorithms (DFS, BFS, A*), naive Bayes models, Bayesian networks, regression models, decision trees, SVM models, and clustering algorithms. It provides code examples for implementing these algorithms in Python, demonstrating their functionality through graph traversal and pathfinding tasks. Additionally, it includes methods for model evaluation and accuracy calculation.

Uploaded by

prawinsk2005
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 25

CS3491 ARTIFICIAL INTELLIGENCE AND MACHINE LEARNING

CONTENTS

Page
Sl.No Name of the Experiment
No
1 Implementation of Uninformed search algorithms (BFS, DFS)
2 Implementation of Informed search algorithms (A*, memory-bounded A*)
3 Implement naïve Bayes models
4 Implement Bayesian Networks
5 Build Regression models
6 Build decision trees and random forests
7 Build SVM models
8 Implement ensembling techniques
9 Implement clustering algorithms
10 Implement EM for Bayesian networks
11 Build simple NN models
Program to implement the given graph traversal using DFS

#Using a Python dictionary to act as an adjacency


list graph = {
'5' : ['3','7'],
'3' : ['2', '4'],
'7' : ['8'],
'2' : [],
'4' : ['8'],
'8' : []
}

visited = set() # Set to keep track of visited nodes of

graph. def dfs(visited, graph, node): #function for dfs


if node not in
visited: print
(node)
visited.add(node)
for neighbour in graph[node]:
dfs(visited, graph, neighbour)

# Driver Code
print("Following is the Depth-First Search")
dfs(visited, graph, '5')
Program to implement shortest path between two nodes of the given graph using DFS

graph = {'A': set(['B', 'C']),


'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E'])}

def dfs_paths(graph, start, goal):


stack = [(start, [start])]
while stack:
(vertex, path) = stack.pop()
for next in graph[vertex] - set(path):
if next == goal:
yield path + [next]
else:
stack.append((next, path + [next]))
print(list(dfs_paths(graph, 'A', 'F')))
Program to implement the given graph traversal using BFS

graph = {
'5' : ['3','7'],
'3' : ['2', '4'],
'7' : ['8'],
'2' : [],
'4' : ['8'],
'8' : []
}
visited = [] # List for visited
nodes. queue = [] #Initialize a queue
def bfs(visited, graph, node): #function for BFS
visited.append(node)
queue.append(node)
while queue: # Creating loop to visit each
node m = queue.pop(0)
print (m, end = " ")
for neighbour in graph[m]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)

# Driver Code

print("Following is the Breadth-First Search")


bfs(visited, graph, '5') # function calling
Program to implement shortest path between two nodes of the given graph using BFS

graph = {'A': set(['B', 'C']),


'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E'])}

def bfs_paths(graph, start, goal):


queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
for next in graph[vertex] - set(path):
if next == goal:
yield path + [next]
else:
queue.append((next, path + [next]))

print(list(bfs_paths(graph, 'A', 'F')))


Program to implement A Star Search Algorithm in python

def aStarAlgo(start_node, stop_node):

open_set = set(start_node) # {A},


len{open_set}=1 closed_set = set()
g = {} # store the distance from starting
node parents = {}
g[start_node] = 0
parents[start_node] = start_node # parents['A']='A"

while len(open_set) > 0 :


n = None

for v in open_set: # v='B'/'F'


if n == None or g[v] + heuristic(v) < g[n] +
heuristic(n): n = v # n='A'

if n == stop_node or Graph_nodes[n] ==
None: pass
else:
for (m, weight) in get_neighbors(n):

# nodes 'm' not in first and last set are added to


first # n is set its parent

if m not in open_set and m not in closed_set:


open_set.add(m) # m=B weight=6 {'F','B','A'} len{open_set}=2
parents[m] = n # parents={'A':A,'B':A} len{parent}=2
g[m] = g[n] + weight # g={'A':0,'B':6, 'F':3} len{g}=2

#for each node m,compare its distance from start i.e g(m) to
the #from start through n node
else:
if g[m] > g[n] + weight:
#update g(m)
g[m] = g[n] + weight
#change parent of m to
n
parents[m] = n

#if m in closed set,remove and add to open


if m in closed_set:
closed_set.remove(m)
open_set.add(m)
if n == None:
print('Path does not exist!')
return None

# if the current node is the stop_node


# then we begin reconstructin the path from it to the start_node

if n == stop_node:
path = []

while parents[n] !=
n: path.append(n)
n = parents[n]

path.append(start_node)

path.reverse()

print('Path found: {}'.format(path))


return path

# remove n from the open_list, and add it to


closed_list # because all of his neighbors were
inspected

open_set.remove(n) # {'F','B'} len=2


closed_set.add(n) #{A} len=1

print('Path does not exist!')


return None

#define fuction to return neighbor and its


distance #from the passed node

def get_neighbors(v):
if v in
Graph_nodes:
return Graph_nodes[v]
else:
return None

#for simplicity we ll consider heuristic distances given


#and this function returns heuristic distance for all
nodes

def heuristic(n):
H_dist = {
'A': 10,
'B': 8,
'C': 5,
'D': 7,
'E': 3,
'F': 6,
'G': 5,
'H': 3,
'I': 1,
'J': 0
}

return H_dist[n]

#Describe your graph here


Graph_nodes = {

'A': [('B', 6), ('F', 3)],


'B': [('C', 3), ('D', 2)],
'C': [('D', 1), ('E', 5)],
'D': [('C', 1), ('E', 8)],
'E': [('I', 5), ('J', 5)],
'F': [('G', 1),('H', 7)]
, 'G': [('I', 3)],
'H': [('I', 2)],
'I': [('E', 5), ('J', 3)],

}
aStarAlgo('A', 'J')
Program to implement Memory-bounded A* Algorithm

from queue import PriorityQueue

def SMA_star(problem):
start_state = problem['start']
goal_state = problem['goal']
graph = problem['graph']
heuristic =
problem['heuristic']
memory_limit = 5
Queue = PriorityQueue()
Queue.put((0, start_state, None, 0, []))

while not Queue.empty():


_, current_node, parent_node, depth, path = Queue.get()
if current_node == goal_state:
return path + [current_node]
successors = graph[current_node]
if depth == memory_limit:
f = float('inf')
else:
f = float('inf')
for successor in successors:
g = graph[current_node][successor]
h = heuristic(successor)
f_s = max(g + h, heuristic(current_node))
if f_s < f:
f = f_s
if parent_node is not None:
parent_successors = graph[parent_node]
parent_successors[current_node] = f
if all([node in parent_successors for node in graph[parent_node]]):
g_parent = min([graph[parent_node][node] for node in
graph[parent_node]]) h_parent = heuristic(parent_node)
f_parent = max(g_parent + h_parent,
heuristic(current_node)) Queue.put((f_parent, parent_node,
None, depth-1, path[:-1]))
for successor in successors:
g = graph[current_node][successor]
h = heuristic(successor)
f_s = max(g + h, heuristic(current_node))
Queue.put((f_s, successor, current_node, depth+1, path+
[current_node])) if Queue.qsize() > memory_limit:
node_to_remove = Queue.get()
parent_successors = graph[node_to_remove[2]]
del parent_successors[node_to_remove[1]]
if not all([node in parent_successors for node in graph[node_to_remove[2]]]):
Queue.put(node_to_remove[:3] + (depth-1, path[:-1]))

return None # Failure

# Define the problem


start_state = 'A'
goal_state = 'F'
graph = {
'A': {'B': 2, 'C': 3},
'B': {'D': 4, 'E': 2},
'C': {'F': 1},
'D': {'F': 2},
'E': {'F':
3}, 'F': {}
}

def heuristic(state):
# Define the heuristic function for estimating the remaining cost
h = {'A': 5, 'B': 4, 'C': 3, 'D': 2, 'E': 1, 'F': 0}
return h[state]

# Create the problem object


problem = {'start': start_state, 'goal': goal_state, 'graph': graph, 'heuristic': heuristic}

# Call the SMA_star function


solution = SMA_star(problem)

# Print the solution


if solution is not None:
print(' -> '.join(solution))
else:
print('No solution found.')
Program to implement naïve Bayes models
import csv
import random
import math
def loadcsv(filename):
lines = csv.reader(open(filename,
"r")); dataset = list(lines)
for i in range(len(dataset)):
#converting strings into numbers for processing
dataset[i] = [float(x) for x in
dataset[i]]
return dataset
def splitdataset(dataset, splitratio):
#67% training size
trainsize = int(len(dataset) *
splitratio); trainset = []
copy = list(dataset);
while len(trainset) < trainsize:
#generate indices for the dataset list randomly to pick ele for training data
index = random.randrange(len(copy));
trainset.append(copy.pop(index))
return [trainset, copy]
def separatebyclass(dataset):
separated = {} #dictionary of classes 1 and 0
#creates a dictionary of classes 1 and 0 where the values are
#the instances belonging to each class
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset): #creates a dictionary of classes
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)];
del summaries[-1] #excluding labels +ve or -ve
return summaries
def summarizebyclass(dataset):
separated = separatebyclass(dataset);
#print(separated)
summaries = {}
for classvalue, instances in separated.items():
#for key,value in dic.items()
#summaries is a dic of tuples(mean,std) for each class value
summaries[classvalue] =
summarize(instances)
#summarize is used to cal to mean and
std return summaries
def calculateprobability(x, mean, stdev):
exponent =
math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2)))) return (1 /
(math.sqrt(2*math.pi) * stdev)) * exponent
def calculateclassprobabilities(summaries, inputvector):
probabilities = {} # probabilities contains the all prob of all class of test data
for classvalue, classsummaries in summaries.items():
#class and attribute information as mean and sd
probabilities[classvalue] = 1
for i in range(len(classsummaries)):
mean, stdev = classsummaries[i]
#take mean and sd of every attribute for class 0 and 1 seperaely
x = inputvector[i] #testvector's first attribute
probabilities[classvalue] *= calculateprobability(x, mean, stdev);
#use normal dist
return probabilities
def predict(summaries, inputvector): #training and test data is passed
probabilities = calculateclassprobabilities(summaries,
inputvector) bestLabel, bestProb = None, -1
for classvalue, probability in probabilities.items():#assigns that class which has he highest prob
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classvalue
return bestLabel
def getpredictions(summaries, testset):
predictions = []
for i in range(len(testset)):
result = predict(summaries, testset[i])
predictions.append(result)
return predictions
def getaccuracy(testset, predictions):
correct = 0
for i in range(len(testset)):
if testset[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testset))) * 100.0
def main():
filename = 'naivedata.csv'
splitratio = 0.67
dataset = loadcsv(filename);
trainingset, testset = splitdataset(dataset, splitratio)
print('Split {0} rows into train={1} and test={2} rows'.format(len(dataset), len(trainingset),
len(testset)))
# prepare model
summaries =
summarizebyclass(trainingset);
#print(summaries)
# test model
predictions = getpredictions(summaries, testset)
#find the predictions of test data with the training data
accuracy = getaccuracy(testset, predictions)
print('Accuracy of the classifier is : {0}%'.format(accuracy))
main()
Program to Implement Bayesian Networks

# Import libraries

import pgmpy.models
import pgmpy.inference
import networkx as nx
import matplotlib.pyplot as plt
import pylab as plt
from pgmpy import factors
import numpy as np

# Create a bayesian network


model = pgmpy.models.BayesianModel([('Burglary', 'Alarm'),
('Earthquake', 'Alarm'),('Alarm', 'JohnCalls'), ('Alarm', 'MaryCalls')])

# Define conditional probability distributions


(CPD) # Probability of burglary (True, False)
cpd_burglary = pgmpy.factors.discrete.TabularCPD('Burglary', 2, [[0.001],[0.999]])

# Probability of earthquake (True, False)


cpd_earthquake = pgmpy.factors.discrete.TabularCPD('Earthquake', 2, [[0.002],[0.998]])

# Probability of alarm going of (True, False) given a burglary and/or earthquake


cpd_alarm = pgmpy.factors.discrete.TabularCPD('Alarm', 2, [[0.95, 0.94, 0.29, 0.001],
[0.05, 0.06, 0.71, 0.999]],
evidence=['Burglary', 'Earthquake'],
evidence_card=[2, 2])

# Probability that John calls (True, False) given that the alarm has sounded
cpd_john = pgmpy.factors.discrete.TabularCPD('JohnCalls', 2, [[0.90, 0.05],
[0.10, 0.95]],
evidence=['Alarm'],
evidence_card=[2])

# Probability that Mary calls (True, False) given that the alarm has sounded
cpd_mary = pgmpy.factors.discrete.TabularCPD('MaryCalls', 2, [[0.70, 0.01],
[0.30, 0.99]],
evidence=['Alarm'],
evidence_card=[2])

# Add CPDs to the network structure


model.add_cpds(cpd_burglary, cpd_earthquake, cpd_alarm, cpd_john, cpd_mary)

# Check if the model is valid, throw an exception


otherwise model.check_model()

# Print probability distributions


print('Probability distribution, P(Burglary)')
print(cpd_burglary)
print()
print('Probability distribution, P(Earthquake)')
print(cpd_earthquake)
print()
print('Joint probability distribution, P(Alarm | Burglary, Earthquake)')
print(cpd_alarm)
print()
print('Joint probability distribution, P(JohnCalls | Alarm)')
print(cpd_john)
print()
print('Joint probability distribution, P(MaryCalls | Alarm)')
print(cpd_mary)
print()

infer = pgmpy.inference.VariableElimination(model)
# Calculate the probability of a burglary if John and Mary calls (0: True, 1: False)
posterior_probability = infer.query(['Burglary'], evidence={'JohnCalls': 0, 'MaryCalls': 0})

# Print posterior probability


print('Posterior probability of Burglary if JohnCalls(True) and MaryCalls(True)')
print(posterior_probability)
print()

# Calculate the probability of alarm starting if there is a burglary and an earthquake (0: True, 1: False)
posterior_probability = infer.query(['Alarm'], evidence={'Burglary': 0, 'Earthquake': 0})

# Print posterior probability


print('Posterior probability of Alarm sounding if Burglary(True) and Earthquake(True)')
print(posterior_probability)
print()
Program to implement simple linear regression

import numpy as nmp


import matplotlib.pyplot as mtplt

def estimate_coeff(p, q):


# Here, we will estimate the total number of points or observation
n1 = nmp.size(p)
# Now, we will calculate the mean of a and b
vector m_p = nmp.mean(p)
m_q = nmp.mean(q)

# here, we will calculate the cross deviation and deviation about a


SS_pq = nmp.sum(q * p) - n1 * m_q * m_p
SS_pp = nmp.sum(p * p) - n1 * m_p * m_p

# here, we will calculate the regression coefficients


b_1 = SS_pq / SS_pp
b_0 = m_q - b_1 *

m_p return (b_0, b_1)

def plot_regression_line(p, q, b):


# Now, we will plot the actual points or observation as scatter plot
mtplt.scatter(p, q, color = "m",
marker = "o", s = 30)

# here, we will calculate the predicted response


vector q_pred = b[0] + b[1] * p

# here, we will plot the regression


line mtplt.plot(p, q_pred, color =
"g")

# here, we will put the labels


mtplt.xlabel('p')
mtplt.ylabel('q')
# here, we will define the function to show
plot mtplt.show()
def main():
# entering the observation points or data
p = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
q = np.array([11, 13, 12, 15, 17, 18, 18, 19, 20, 22])

# now, we will estimate the


coefficients b = estimate_coeff(p, q)
print("Estimated coefficients are :\nb_0 = {} \
\nb_1 = {}".format(b[0], b[1]))
# Now, we will plot the regression
line plot_regression_line(p, q, b)
if name == " main ":
main()
Program to implement regression model

# importing the dataset


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

dataset = pd.read_csv('Salary_Data.csv')
dataset.head()

# data preprocessing
X = dataset.iloc[:, :-1].values #independent variable array
y = dataset.iloc[:,1].values #dependent variable vector

# splitting the dataset


from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=1/3,random_state=0)
# fitting the regression model
from sklearn.linear_model import
LinearRegression regressor = LinearRegression()
regressor.fit(X_train,y_train) #actually produces the linear eqn for the data

# predicting the test set results


y_pred = regressor.predict(X_test)
y_pred

y_test

# visualizing the results


#plot for the TRAIN

plt.scatter(X_train, y_train, color='red') # plotting the observation line


plt.plot(X_train, regressor.predict(X_train), color='blue') # plotting the regression line
plt.title("Salary vs Experience (Training set)") # stating the title of the graph
plt.xlabel("Years of experience") # adding the name of x-axis
plt.ylabel("Salaries") # adding the name of y-axis
plt.show() # specifies end of graph

#plot for the TEST

plt.scatter(X_test, y_test,

color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue') # plotting the regression line
plt.title("Salary vs Experience (Testing set)")

plt.xlabel("Years of experience")
plt.ylabel("Salaries")
plt.show()
Program to implement SVM model

//Import the Libraries

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

//Load the Dataset


dataset = pd.read_csv('Social_Network_Ads.csv')

//Split Dataset into X and Y

X = dataset.iloc[:, [2, 3]].values


y = dataset.iloc[:, 4].values

//Split the X and Y Dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)

// normalize the data within a particular range


from sklearn.preprocessing import
StandardScaler sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

// build SVM model to the Training set, The default value of the kernel is ‘rbf’.
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)

//Predict the Test Set Results


y_pred = classifier.predict(X_test)

// Make the Confusion Matrix

from sklearn.metrics import confusion_matrix, accuracy_score


cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test,y_pred)
Program to implement Basic ensemble learning techniques

#Utility
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score,accuracy_score
from sklearn.preprocessing import StandardScaler
# machine learning
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
#seed
seed = 40
df =

pd.read_csv("heart.csv")

#target
target = df["output"]

# getting scaled train data


train = df.drop("output", axis=1)
scaled_train = StandardScaler().fit_transform(train)

# Splitting the data into training and validation


X_train, X_test, y_train, y_test =
train_test_split(
train, target, test_size=0.20, random_state=seed
)
#building
model_1 = RandomForestClassifier(random_state=seed)
model_2 = LogisticRegression(random_state=seed, max_iter=1000)
model_3 = SGDClassifier(random_state=seed)
# training
model_1.fit(X_train, y_train)
model_2.fit(X_train, y_train)
model_3.fit(X_train, y_train)
# predicting
pred_1 = model_1.predict(X_test)
pred_2 = model_2.predict(X_test)
pred_3 = model_3.predict(X_test)
# averaging
pred_final = np.round((pred_1 + pred_2 + pred_3) / 3)

# evalution
accuracy = round(accuracy_score(y_test, pred_final) * 100, 3)
auc = round(roc_auc_score(y_test, pred_final), 3)

print(f" Accuracy: {accuracy}%")


print(f" AUC score: {auc}")
#Weighted Average,
# highest weight to best-performing models and the lowest weight to lower-performing models while
taking an average
# model_1 model_2 model_3
#Weightage 30% 60% 10%

pred_final = np.round(0.3*pred_1 + 0.6*pred_2 + 0.1*pred_3)

# evalution
accuracy = round(accuracy_score(y_test, pred_final) * 100, 3)
auc = round(roc_auc_score(y_test, pred_final), 3)

print(f" Accuracy: {accuracy}


%") print(f" AUC score: {auc}")
#Max Voting
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier

# building the model


model_4 = KNeighborsClassifier()

# voting classifier
final_model = VotingClassifier(
estimators=[("rf", model_1), ("lr", model_2), ("knn",
model_4)], voting="hard",
)
# training
final_model.fit(X_train, y_train)

# prediction
prediction = final_model.predict(X_test)

# evaluation
accuracy = round(accuracy_score(y_test, prediction) * 100, 3)
auc = round(roc_auc_score(y_test, prediction), 3)

print(f" Accuracy: {accuracy}%")


print(f" AUC score: {auc}")
Program to implement k-Means algorithm

import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans

data = {
'x': [25, 34, 22, 27, 33, 33, 31, 22, 35, 34, 67, 54, 57, 43, 50, 57, 59, 52, 65, 47, 49, 48, 35, 33, 44, 45,
38, 43, 51, 46],
'y': [79, 51, 53, 78, 59, 74, 73, 57, 69, 75, 51, 32, 40, 47, 53, 36, 35, 58, 59, 50, 25, 20, 14, 12, 20, 5, 29,
27, 8, 7]
}

df = pd.DataFrame(data)

kmeans = KMeans(n_clusters=3).fit(df)
centroids = kmeans.cluster_centers_
print(centroids)

plt.scatter(df['x'], df['y'], c=kmeans.labels_.astype(float), s=50, alpha=0.5)


plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
plt.show()
Program to Implement EM for Bayesian

networks Program:
import numpy as np
import pandas as pd
import csv
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination

heartDisease = pd.read_csv('heart.csv')
heartDisease =
heartDisease.replace('?',np.nan)

print('Sample instances from the dataset are given below')


print(heartDisease.head())

print('\n Attributes and datatypes')


print(heartDisease.dtypes)

model= BayesianModel([('age','heartdisease'),('sex','heartdisease'),('exang','heartdisease'),('cp','heartdisease'),
('hear tdisease','restecg'),('heartdisease','chol')])
print('\nLearning CPD using Maximum likelihood estimators')
model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)

print('\n Inferencing with Bayesian Network:')


HeartDiseasetest_infer =
VariableElimination(model)

print('\n 1. Probability of HeartDisease given evidence= restecg')


q1=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'restecg':1})
print(q1)

print('\n 2. Probability of HeartDisease given evidence= cp ')


q2=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'cp':2})
print(q2)
Program to implement single neuron neural network

# single neuron neural network


from numpy import exp, array, random, dot, tanh

# Class to create a neural


# network with single neuron
class NeuralNetwork():

def init (self):

# Using seed to make sure it'll


# generate same weights in every run
random.seed(1)

# 3x1 Weight matrix


self.weight_matrix = 2 * random.random((3, 1)) - 1

# tanh as activation function


def tanh(self, x):
return tanh(x)

# derivative of tanh function.


# Needed to calculate the
gradients. def tanh_derivative(self,
x):
return 1.0 - tanh(x) ** 2

# forward propagation
def forward_propagation(self, inputs):
return self.tanh(dot(inputs, self.weight_matrix))

# training the neural network.


def train(self, train_inputs, train_outputs,
num_train_iterations):

# Number of iterations we want


to # perform for this set of input.
for iteration in range(num_train_iterations):
output =
self.forward_propagation(train_inputs)

# Calculate the error in the output.


error = train_outputs - output
# multiply the error by input and then
# by gradient of tanh function to calculate
# the adjustment needs to be made in
weights adjustment = dot(train_inputs.T,
error *
self.tanh_derivative(output))

# Adjust the weight matrix


self.weight_matrix += adjustment

# Driver Code
if name == " main ":

neural_network =

NeuralNetwork()

print ('Random weights at the start of


training') print
(neural_network.weight_matrix)

train_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])


train_outputs = array([[0, 1, 1, 0]]).T

neural_network.train(train_inputs, train_outputs, 10000)

print ('New weights after training')


print (neural_network.weight_matrix)

# Test the neural network with a new


situation. print ("Testing network on new
examples ->")
print (neural_network.forward_propagation(array([1, 0, 0])))
Program to implement Backpropagation algorithm

import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float) # two inputs [sleep,study]
y = np.array(([92], [86], [89]), dtype=float) # one output [Expected % in Exams]
X = X/np.amax(X,axis=0) # maximum of X array longitudinally
y = y/100

#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))

#Derivative of Sigmoid Function


def derivatives_sigmoid(x):
return x * (1 - x)

#Variable initialization
epoch=5000 #Setting training
iterations lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in data
set hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer

#weight and bias initialization


wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons)) #weight of the link
from input node to hidden node
bh=np.random.uniform(size=(1,hiddenlayer_neurons)) # bias of the link from input node to
hidden node
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons)) #weight of the link from
hidden node to output node
bout=np.random.uniform(size=(1,output_neurons)) #bias of the link from hidden node to output
node

#draws a random range of numbers uniformly of dim


x*y for i in range(epoch):

#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act =
sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)

#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T)
#how much hidden layer weights contributed to
error hiddengrad =
derivatives_sigmoid(hlayer_act) d_hiddenlayer =
EH * hiddengrad

# dotproduct of nextlayererror and currentlayerop


wout += hlayer_act.T.dot(d_output) *lr
wh += X.T.dot(d_hiddenlayer) *lr

print("Input: \n" + str(X))


print("Actual Output: \n" + str(y))

You might also like