Aiml Sample Programs
Aiml Sample Programs
graph = {
'5' : ['3','7'],
'3' : ['2', '4'],
'7' : ['8'],
'2' : [],
'4' : ['8'],
'8' : []
}
visited = [] # List for visited nodes.
queue = [] #Initialize a queue
def bfs(visited, graph, node):
#function for BFS
visited.append(node)
queue.append(node)
while queue: # Creating loop to visit each node
m = queue.pop(0)
print (m, end = " ")
for neighbour in graph[m]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
# Driver Code
print("Following is the Breadth-First Search")
bfs(visited, graph, '5')
import math
import random
import pandas as pd
import numpy as np
def encode_class(mydata):
classes = []
for i in range(len(mydata)):
if mydata[i][-1] not in classes:
classes.append(mydata[i][-1])
for i in range(len(classes)):
for j in range(len(mydata)):
if mydata[j][-1] == classes[i]:
mydata[j][-1] = i
return mydata
def splitting(mydata, ratio):
train_num = int(len(mydata) * ratio)
train = []
test = list(mydata)
while len(train) < train_num:
index = random.randrange(len(test))
train.append(test.pop(index))
return train, test
def groupUnderClass(mydata):
data_dict = {}
for i in range(len(mydata)):
if mydata[i][-1] not in data_dict:
data_dict[mydata[i][-1]] = []
data_dict[mydata[i][-1]].append(mydata[i])
return data_dict
def MeanAndStdDev(numbers):
avg = np.mean(numbers)
stddev = np.std(numbers)
return avg, stddev
def MeanAndStdDevForClass(mydata):
info = {}
data_dict = groupUnderClass(mydata)
for classValue, instances in data_dict.items():
info[classValue] = [MeanAndStdDev(attribute) for attribute in zip(*instances)]
return info
def calculateGaussianProbability(x, mean, stdev):
epsilon = 1e-10
expo = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev + epsilon, 2))))
return (1 / (math.sqrt(2 * math.pi) * (stdev + epsilon))) * expo
import pgmpy.models
import pgmpy.inference
import networkx as nx
import pylab as plt
# Create a bayesian network
model = pgmpy.models.BayesianModel([('Burglary', 'Alarm'),
('Earthquake', 'Alarm'),
('Alarm', 'JohnCalls'),
('Alarm', 'MaryCalls')])
cpd_burglary = pgmpy.factors.discrete.TabularCPD('Burglary', 2, [[0.001], [0.999]])
cpd_earthquake = pgmpy.factors.discrete.TabularCPD('Earthquake', 2, [[0.002], [0.998]])
cpd_alarm = pgmpy.factors.discrete.TabularCPD('Alarm', 2, [[0.95, 0.94, 0.29, 0.001],
[0.05, 0.06, 0.71, 0.999]],
evidence=['Burglary', 'Earthquake'],
evidence_card=[2, 2])
cpd_john = pgmpy.factors.discrete.TabularCPD('JohnCalls', 2, [[0.90, 0.05],
[0.10, 0.95]],
evidence=['Alarm'],
evidence_card=[2])
cpd_mary = pgmpy.factors.discrete.TabularCPD('MaryCalls', 2, [[0.70, 0.01],
[0.30, 0.99]],
evidence=['Alarm'],
evidence_card=[2])
model.add_cpds(cpd_burglary, cpd_earthquake, cpd_alarm, cpd_john, cpd_mary)
model.check_model()
# Print probability distributions
print('Probability distribution, P(Burglary)')
print(cpd_burglary)
print()
print('Probability distribution, P(Earthquake)')
print(cpd_earthquake)
print()
print('Joint probability distribution, P(Alarm | Burglary, Earthquake)')
print(cpd_alarm)
print()
print('Joint probability distribution, P(JohnCalls | Alarm)')
print(cpd_john)
print()
print('Joint probability distribution, P(MaryCalls | Alarm)')
print(cpd_mary)
print()
infer = pgmpy.inference.VariableElimination(model)
# Calculate the probability of a burglary if John and Mary calls (0: True, 1: False)
posterior_probability = infer.query(['Burglary'], evidence={'JohnCalls': 0, 'MaryCalls': 0})
# Print posterior probability
print('Posterior probability of Burglary if JohnCalls(True) and MaryCalls(True)')
print(posterior_probability)
print()
# Calculate the probability of alarm starting if there is a burglary and an earthquake (0: True, 1: False)
posterior_probability = infer.query(['Alarm'], evidence={'Burglary': 0, 'Earthquake': 0})
# Print posterior probability
print('Posterior probability of Alarm sounding if Burglary(True) and Earthquake(True)')
print(posterior_probability)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('/content/tips.csv')
features = np.array(df.total_bill)
labels = np.array(df.tip)
def kernel(data, point, xmat, k):
m,n = np.shape(xmat)
ws = np.mat(np.eye((m)))
for j in range(m):
diff = point - data[j]
ws[j,j] = np.exp(diff*diff.T/(-2.0*k**2))
return ws
def local_weight(data, point, xmat, ymat, k):
wei = kernel(data, point, xmat, k)
return (data.T*(wei*data)).I*(data.T*(wei*ymat.T))
def local_weight_regression(xmat, ymat, k):
m,n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*local_weight(xmat, xmat[i],xmat,ymat,k)
return ypred
m = features.shape[0]
mtip = np.mat(labels)
data = np.hstack((np.ones((m, 1)), np.mat(features).T))
ypred = local_weight_regression(data, mtip, 0.5)
indices = data[:,1].argsort(0)
xsort = data[indices][:,0]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(features, labels, color='blue')
ax.plot(xsort[:,1],ypred[indices], color = 'red', linewidth=3)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show()
Output:
Ex. No: 6 ) Build Decision trees and Random Forest
import pandas as pd
fromsklearn.tree import DecisionTreeRegressor
fromsklearn.ensemble import RandomForestRegressor
fromsklearn.model_selection import train_test_split
fromsklearn.metrics import mean_squared_error
# Load data
data = pd.read_csv('data.csv')
# Split data into training and test sets
X = data.drop(['target'], axis=1)
y = data['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Build decision tree
dt = DecisionTreeRegressor()
dt.fit(X_train, y_train)
# Predict on test set
y_pred = dt.predict(X_test)
# Evaluate performance
mse = mean_squared_error(y_test, y_pred)
print(f"Decision Tree Mean Squared Error: {mse:.4f}")
# Build random forest
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
# Predict on test set
y_pred = rf.predict(X_test)
# Evaluate performance
mse = mean_squared_error(y_test, y_pred)
print(f"Random Forest Mean Squared Error: {mse:.4f}")
Output:
Decision Tree Classifier Accuracy: 1.0
Random Forest Classifier Accuracy: 1.0
Output:
0.977777
Output:
Output:
Output:
import tensorflow as tf
from tensorflow import keras
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize the input data
x_train = x_train / 255.0
x_test = x_test / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
# Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
# Evaluate the model
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', test_acc)
Output:
Ex.No: 12) BUILD DEEP LEARNING NN MODELS
import tensorflow as tf
from tensorflow import keras
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize the input data
x_train = x_train / 255.0
x_test = x_test / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Train the model
model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
# Evaluate the model
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', test_acc)
Output: