2nd Year
2nd Year
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
GRAPH={'A':['B','C'],'B':['D','E'],'C':['F'],'D':[],'E':['F'],'F':[]}
visited=[]
queue=[]
def BFS(visited,graph,node):
visited.append(node)
queue.append(node)
print("BFS:\t")
while queue:
p=queue.pop(0)
print(p,end=" ")
visited.append(child)
queue.append(child)
BFS(visited,GRAPH,'A')
BFS:
ABCDEF
RESULT:
EX NO: UNINFORMED SEARCH ALGORITHMS (DFS)
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
GRAPH={'0':set(['1','2']),'1':set(['3','4']),'2':set(['0']),'3':set(['2','3']),'4':set([])}
if visited is None:
visited = set()
visited.add(node)
visited.add(node)
return visited
DFS(GRAPH,'0')
02134
RESULT:
EX NO: INFORMED SEARCH ALGORITHMS (A*)
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
def astaralgo(start_node,stop_node):
open_set = set(start_node)
closed_set= set()
g={}
parents = {}
g[start_node] = 0
parents[start_node] = start_node
n=None
for v in open_set:
n=v
if n==stop_node or Graph_nodes[n]==None:
pass
else:
for(m,weight) in get_neighbors(n):
open_set.add(m)
parents[m]=n
else:
parents[m]=n
if m in closed_set:
closed_set.remove(m)
open_set.add(m)
if n==None:
print("path does not exist!")
return None
if n==stop_node:
path=[]
while parents[n]!=n:
path.append(n)
n=parents[n]
path.append(start_node)
path.reverse()
return path
open_set.remove(n)
closed_set.add(n)
return None
def get_neighbors(v):
if v in Graph_nodes:
return Graph_nodes[v]
else:
return None
def h(n):
H_dist = {
'A' : 10,
'B' : 8,
'C' : 5,
'D' : 7,
'E' : 3,
'F' : 6,
'G' : 5,
'H' : 3,
'I' : 1,
'J' : 0
return H_dist[n]
Graph_nodes = {
'A' : [('B',6),('F',3)],
'B' : [('C',3),('D',2)],
'C' : [('E',5),('D',1)],
'E' : [('D',6)],
'D' : [('E',8)],
'F' : [('G',1),('H',7)],
'G' : [('I',3)],
'H' : [('I',2)],
'I' : [('J',3)]
astaralgo('A','J')
RESULT:
EX NO: INFORMED SEARCH ALGORITHMS (MEMORY-BOUNDED A*)
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import math
graph = {
'S': [('A', 1), ('B', 3)], 'A': [('B', 1), ('C', 3)], 'B': [('C', 1), ('D', 2)], 'C': [('D', 1), ('G', 2)], 'D': [('E',
3)],
min_fcost = {start: 0}
while stack:
if node == goal:
return path
continue
g = cost + min_fcost[node]
continue
f= g + heuristic[adjacent]
min_fcost[adjacent] = g
return None
start = 'S'
goal = 'G'
max_memory = 8
path = ma_star(graph, heuristic, start, goal, max_memory)
RESULT:
EX NO: NAÏVE BAYES MODELS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
df = pd.read_csv("/content/loan_data.csv")
df.head()
df.info()
<class 'pandas.core.frame.DataFrame'>
sns.countplot(data=df,x='purpose',hue='not.fully.paid')
plt.xticks(rotation=45, ha='right');
pre_df = pd.get_dummies(df,columns=['purpose'],drop_first=True)
pre_df
X = pre_df.drop('not.fully.paid', axis=1)
y = pre_df['not.fully.paid']
model.fit(X_train, y_train)
accuracy_score,
confusion_matrix,
ConfusionMatrixDisplay,
f1_score,
classification_report,
y_pred = model.predict(X_test)
print("Accuracy:", accuracy)
Accuracy: 0.8206263840556786
F1 Score: 0.8686606980013266
cm = confusion_matrix(y_test, y_pred)
disp.plot();
RESULT:
EX NO: BAYESIAN NETWORKS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import numpy as np
import pandas as pd
heartDisease = pd.read_csv("C:/Users/Student/Desktop/Heartdisease.csv")
print(heartDisease.head())
03060
12332
22271
33030
41030
print(heartDisease.dtypes)
gender int64
cp int64
trestbps int64
chol int64
fbs int64
restecg int64
thalach int64
exang int64
oldpeak float64
slope int64
ca object
thal object
heartdisease int64
dtype: object
model.fit(heartDisease, estimator=MaximumLikelihoodEstimator)
HeartDisease_test_infer = VariableElimination(model)
print(q1)
+-----------------+---------------------+
| heartdisease | phi(heartdisease) |
+=================+=====================+
| heartdisease(0) | 0.1386 |
+-----------------+---------------------+
| heartdisease(1) | 0.0000 |
+-----------------+---------------------+
| heartdisease(2) | 0.2403 |
+-----------------+---------------------+
| heartdisease(3) | 0.2174 |
+-----------------+---------------------+
| heartdisease(4) | 0.4036 |
+-----------------+---------------------+
print(q2)
+-----------------+---------------------+
| heartdisease | phi(heartdisease) |
+=================+=====================+
| heartdisease(0) | 0.4433 |
+-----------------+---------------------+
| heartdisease(1) | 0.1888 |
+-----------------+---------------------+
| heartdisease(2) | 0.1189 |
+-----------------+---------------------+
| heartdisease(3) | 0.1377 |
+-----------------+---------------------+
| heartdisease(4) | 0.1114 |
+-----------------+---------------------+
RESULT:
EX NO: BUILD REGRESSION MODELS : LINEAR REGRESSION
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
import numpy as np
data = pd.read_csv('Salary_Data.csv')
X = data[['YearsExperience']].values
y = data['Salary'].values
print(data.head())
YearsExperience Salary
0 1.1 39343.0
1 1.3 46205.0
2 1.5 37731.0
3 2.0 43525.0
4 2.2 39891.0
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=10,random_state=0)
regressor=LinearRegression()
regressor.fit(X_train,Y_train)
y_pre=regressor.predict(X_test)
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
plt.scatter(X_test, Y_test, color = 'red')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
RESULT:
EX NO: BUILD REGRESSION MODELS: MULTIPLE LINEAR REGRESSION
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
import numpy as np
data = pd.read_csv('Student_Performance.csv')
data.head()
Practiced']].values
y = data['Performance Index'].values
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=10,random_state=0)
regressor=LinearRegression()
regressor.fit(X_train,Y_train)
y_pred=regressor.predict(X_test)
regressor.score(X_test,Y_test)
0.984498422313455
label='Ideal')
plt.legend()
plt.show()
RESULT:
EX NO: BUILD REGRESSION MODELS: LOGISTIC REGRESSION
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
df=pd.read_csv("insurance_data.csv")
df.head()
X=df[["age"]]
y=df.bought_insurance
model=LogisticRegression()
model.fit(X_train.values.reshape(-1,1),y_train)
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
path='/content/drive/MyDrive/diabetes.csv'
df=pd.read_csv(path)
df.head()
df.shape
(768, 9)
feature_col=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigr
eeFunction','Age','Outcome']
X = df[feature_col]
Y = df.Outcome
model = DecisionTreeClassifier()
Y_pred = model.predict(X_test)
print("Accuracy: ",metrics.accuracy_score(Y_test,Y_pred))
Accuracy: 1.0
import pydotplus
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_col,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('diabetes.png')
Image(graph.create_png())
df=pd.read_csv(path)
df.head()
df.shape
(150, 5)
feature_col=['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
X=df[feature_col]
Y=df.species
model = DecisionTreeClassifier()
Y_pred = model.predict(X_test)
print("Accuracy: ",metrics.accuracy_score(Y_test,Y_pred))
import pydotplus
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_col,class_names=['setosa',
'versicolor', 'virginica'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('diabetes.png')
Image(graph.create_png())
RESULT:
EX NO: RANDOM FORESTS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
rf_class = RandomForestClassifier()
rf_class.fit(X_train_class, y_train_class)
y_pred_class = rf_class.predict(X_test_class)
rf_reg = RandomForestRegressor()
rf_reg.fit(X_train_reg, y_train_reg)
y_pred_reg = rf_reg.predict(X_test_reg)
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
df = pd.read_csv(path)
df.head(5)
X=df[['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide',
'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol']] # Features
y=df['quality']
model=SVC()
model.fit(X_train, y_train)
y_pred=model.predict(X_test)
print(confusion_matrix(y_test,y_pred))
[[ 0 0 1 3 0 0]
[ 0 0 3 8 0 0]
[ 0 0 67 76 0 0]
[ 0 0 19 103 0 0]
[ 0 0 2 35 0 0]
[ 0 0 1 2 0 0]]
print(classification_report(y_test, y_pred))
data_p=pd.DataFrame({'Actual':y_test, 'Predicted':y_pred})
data_p
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
Accuracy: 0.53125
import pandas as pd
import numpy as np
model_viz.fit(X, y)
plt.figure(figsize=(8, 6))
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = model_viz.decision_function(xy).reshape(XX.shape)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
RESULT:
EX NO: ENSEMBLING TECHNIQUES: CLASSIFIER
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import statistics as st
import warnings
warnings.filterwarnings('ignore')
X = df.drop('PlayTennis', axis = 1)
y = df['PlayTennis']
0.6666666666666666
RESULT:
EX NO: ENSEMBLING TECHNIQUES: REGRESSION
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
df.shape
(100, 13)
#print(df['Brand'].drop_duplicates())
Brand_c = df['Brand'].unique() # c:categorical
print(Brand_c)
# Label Encoding
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df['Brand']= label_encoder.fit_transform(df['Brand'])
Brand_l = df['Brand'].unique() # Brand_l: after label coding
print(Brand_l)
[ 9 3 2 6 4 8 5 10 0 1 7]
df['Model']= label_encoder.fit_transform(df['Model'])
Model_l = df['Model'].unique()
print(pd.DataFrame({'Model_c':Model_c, 'Model_l':Model_l}))
Model_c Model_l
0 Corolla 15
1 Civic 14
2 Mustang 30
3 Swift 42
4 Sonata 41
5 Nexon 31
6 Scorpio 40
7 Polo 33
8 A4 4
9 X1 53
10 C-Class 11
11 Endeavour 20
12 Creta 16
13 Harrier 27
14 Ertiga 21
15 City 13
16 Tiguan 47
17 Q3 34
18 5 Series 1
19 GLC 25
20 Innova 28
21 Figo 22
22 Verna 50
23 Altroz 7
24 Thar 44
25 Passat 32
26 A6 6
27 X3 54
28 E-Class 17
29 Fortuner 23
30 Aspire 9
31 Elantra 19
32 Safari 38
33 Vitara 51
34 WR-V 52
35 Ameo 8
36 A3 3
37 7 Series 2
38 GLE 26
39 Yaris 57
40 Ranger 36
41 Santro 39
42 Tigor 46
43 S-Cross 37
44 BR-V 10
45 T-Roc 43
46 Q7 35
47 X5 55
48 GLA 24
49 Camry 12
50 Venue 49
['Petrol' 'Diesel']
df['Fuel_Type']= label_encoder.fit_transform(df['Fuel_Type'])
FuelType_l = df['Fuel_Type'].unique()
print(pd.DataFrame({'FuelType_c':FuelType_c, 'FuelType_l':FuelType_l}))
FuelType_c FuelType_l
0 Petrol 1
1 Diesel 0
['Manual' 'Automatic']
df['Transmission']= label_encoder.fit_transform(df['Transmission'])
Transmission_l = df['Transmission'].unique()
print(pd.DataFrame({'Transmission_c':Transmission_c, 'Transmission_l':Transmission_l}))
Transmission_c Transmission_l
0 Manual 1
1 Automatic 0
df['Owner_Type']= label_encoder.fit_transform(df['Owner_Type'])
OwnerType_l = df['Owner_Type'].unique()
print(pd.DataFrame({'OwnerType_c':OwnerType_c, 'OwnerType_l':OwnerType_l}))
OwnerType_c OwnerType_l
0 First 0
1 Second 1
2 Third 2
# Class Label
y = data["Price"]
# Feature set
X = data[['Car_ID', 'Brand_l', 'Model_l', 'Year', 'Kilometers_Driven', 'FuelType_l', 'Transmission_l',
'OwnerType_l', 'Mileage', 'Engine', 'Power', 'Seats']]
# printing the mean squared error between real value and predicted value
print(mean_squared_error(y_test, pred_Avg))
y_test y_pred
62 1200000 805000.0
5 750000 725000.0
32 1300000 1450000.0
56 2800000 2960000.0
87 1800000 1990000.0
96 1400000 1400000.0
50 2700000 2690000.0
82 850000 820000.0
17 1900000 1605000.0
49 1800000 1990000.0
67 2500000 2050000.0
34 750000 760000.0
63 1600000 1900000.0
89 550000 540000.0
90 500000 505000.0
74 2000000 1960000.0
24 1200000 805000.0
41 450000 490000.0
22 850000 780000.0
9 2700000 2555000.0
42142500000.0
0.9258511480601742
# Ensemble Technique 3: Boosting
# ADABOOST
y_test y_pred
62 1200000 805000.0
5 750000 725000.0
32 1300000 1450000.0
56 2800000 2960000.0
87 1800000 1990000.0
96 1400000 1400000.0
50 2700000 2690000.0
82 850000 820000.0
17 1900000 1605000.0
49 1800000 1990000.0
67 2500000 2050000.0
34 750000 760000.0
63 1600000 1900000.0
89 550000 540000.0
90 500000 505000.0
74 2000000 1960000.0
42142500000.0
0.9189869247528335
RESULT:
EX NO: CLUSTERING ALGORITHM: KNN
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sklearn
df = pd.read_csv(“/content/drive/MyDrive/AIML/DATASET-AIML/Social_Network_Ads.csv”)
print(df)
print(df.shape)
400
KNeighborsClassifier()
y_pred = classifier.predict(X_test)
print(y_test,y_pred)
[0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 1 1 0 0 0 0
0010000100101100011001001010100001001
0 0 0 0 1 1] [0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0
0010000100101100111001001010100001001
0 0 0 0 1 1]
[[55 3]
[ 1 21]]
RESULT:
EX NO: CLUSTERING ALGORITHM: K MEANS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
plt.scatter(x, y)
plt.show()
[(4, 21), (5, 19), (10, 24), (4, 17), (3, 16), (11, 25), (14, 24), (6, 22), (10, 21), (12, 21)]
inertias = []
for i in range(1,11):
kmeans = KMeans(n_clusters=i)
kmeans.fit(data)
inertias.append(kmeans.inertia_)
plt.scatter(x, y, c=kmeans.labels_)
plt.show()
RESULT:
EX NO: EM FOR BAYESIAN NETWORKS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import numpy as np
mu1, sigma1 = 2, 1
X = np.concatenate([X1, X2])
sns.kdeplot(X)
plt.xlabel('X')
plt.ylabel('Density')
plt.show()
import numpy as np
# Initialize parameters
num_epochs = 20
log_likelihoods = []
gamma1 /= total
gamma2 /= total
pi1_hat = np.mean(gamma1)
pi2_hat = np.mean(gamma2)
# Compute log-likelihood
log_likelihoods.append(log_likelihood)
plt.xlabel('Epoch')
plt.ylabel('Log-Likelihood')
plt.show()
X_sorted = np.sort(X)
plt.xlabel('X')
plt.ylabel('Density')
plt.show()
RESULT:
EX NO: SIMPLE NN MODELS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
tf.__version__
{"type":"string"}
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 RowNumber 10000 non-null int64
1 CustomerId 10000 non-null int64
2 Surname 10000 non-null object
3 CreditScore 10000 non-null int64
4 Geography 10000 non-null object
5 Gender 10000 non-null object
6 Age 10000 non-null int64
7 Tenure 10000 non-null int64
8 Balance 10000 non-null float64
9 NumOfProducts 10000 non-null int64
10 HasCrCard 10000 non-null int64
11 IsActiveMember 10000 non-null int64
12 EstimatedSalary 10000 non-null float64
13 Exited 10000 non-null int64
dtypes: float64(2), int64(9), object(3)
memory usage: 1.1+ MB
X=df.iloc[:, 3:-1].values
y=df.iloc[:, -1].values
print(X)
print(y)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
Epoch 1/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 4s 4ms/step - accuracy: 0.7984 - loss:
0.5569
Epoch 2/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.7957 - loss:
0.4642
Epoch 3/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7962 - loss:
0.4495
Epoch 4/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7941 - loss:
0.4365
Epoch 5/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7948 - loss:
0.4449
Epoch 6/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8011 - loss:
0.4261
Epoch 7/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.7969 - loss:
0.4341
Epoch 8/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8093 - loss:
0.4222
Epoch 9/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8112 - loss:
0.4314
Epoch 10/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8187 - loss:
0.4205
Epoch 11/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8206 - loss:
0.4079
Epoch 12/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8228 - loss:
0.4037
Epoch 13/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8153 - loss:
0.4113
Epoch 14/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8185 - loss:
0.4078
Epoch 15/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8205 - loss:
0.4091
Epoch 16/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8256 - loss:
0.4032
Epoch 17/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8281 - loss:
0.3939
Epoch 18/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8375 - loss:
0.3902
Epoch 19/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8285 - loss:
0.3988
Epoch 20/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8415 - loss:
0.3791
Epoch 21/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8419 - loss:
0.3844
Epoch 22/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8456 - loss:
0.3760
Epoch 23/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8471 - loss:
0.3657
Epoch 24/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8568 - loss:
0.3605
Epoch 25/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8570 - loss:
0.3438
Epoch 26/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8556 - loss:
0.3488
Epoch 27/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8465 - loss:
0.3560
Epoch 28/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8561 - loss:
0.3550
Epoch 29/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8602 - loss:
0.3424
Epoch 30/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.8563 - loss:
0.3561
Epoch 31/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8606 - loss:
0.3404
Epoch 32/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.8612 - loss:
0.3388
Epoch 33/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8676 - loss:
0.3251
Epoch 34/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8592 - loss:
0.3437
Epoch 35/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8603 - loss:
0.3438
Epoch 36/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8645 - loss:
0.3438
Epoch 37/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8681 - loss:
0.3344
Epoch 38/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8619 - loss:
0.3438
Epoch 39/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8614 - loss:
0.3416
Epoch 40/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8651 - loss:
0.3367
Epoch 41/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8631 - loss:
0.3348
Epoch 42/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8571 - loss:
0.3488
Epoch 43/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8591 - loss:
0.3417
Epoch 44/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8581 - loss:
0.3417
Epoch 45/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8632 - loss:
0.3343
Epoch 46/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8608 - loss:
0.3418
Epoch 47/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8608 - loss:
0.3418
Epoch 48/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8657 - loss:
0.3341
Epoch 49/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8647 - loss:
0.3335
Epoch 50/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8591 - loss:
0.3367
Epoch 51/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8610 - loss:
0.3492
Epoch 52/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8626 - loss:
0.3373
Epoch 53/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8647 - loss:
0.3338
Epoch 54/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8632 - loss:
0.3304
Epoch 55/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8608 - loss:
0.3381
Epoch 56/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8540 - loss:
0.3467
Epoch 57/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8581 - loss:
0.3466
Epoch 58/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8565 - loss:
0.3452
Epoch 59/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8664 - loss:
0.3379
Epoch 60/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8612 - loss:
0.3429
Epoch 61/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8690 - loss:
0.3258
Epoch 62/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8603 - loss:
0.3418
Epoch 63/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8645 - loss:
0.3390
Epoch 64/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8574 - loss:
0.3419
Epoch 65/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8597 - loss:
0.3426
Epoch 66/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8700 - loss:
0.3288
Epoch 67/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8624 - loss:
0.3388
Epoch 68/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8657 - loss:
0.3301
Epoch 69/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8609 - loss:
0.3402
Epoch 70/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8734 - loss:
0.3198
Epoch 71/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8664 - loss:
0.3340
Epoch 72/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8657 - loss:
0.3323
Epoch 73/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8647 - loss:
0.3363
Epoch 74/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8671 - loss:
0.3336
Epoch 75/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8651 - loss:
0.3367
Epoch 76/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8602 - loss:
0.3431
Epoch 77/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8637 - loss:
0.3347
Epoch 78/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8652 - loss:
0.3326
Epoch 79/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8639 - loss:
0.3244
Epoch 80/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8654 - loss:
0.3303
Epoch 81/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8598 - loss:
0.3403
Epoch 82/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8694 - loss:
0.3307
Epoch 83/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8588 - loss:
0.3399
Epoch 84/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8629 - loss:
0.3320
Epoch 85/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8683 - loss:
0.3248
Epoch 86/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8691 - loss:
0.3348
Epoch 87/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8625 - loss:
0.3360
Epoch 88/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8671 - loss:
0.3346
Epoch 89/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8631 - loss:
0.3424
Epoch 90/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8672 - loss:
0.3263
Epoch 91/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.8629 - loss:
0.3347
Epoch 92/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8616 - loss:
0.3391
Epoch 93/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8658 - loss:
0.3354
Epoch 94/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8686 - loss:
0.3255
Epoch 95/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8617 - loss:
0.3353
Epoch 96/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8607 - loss:
0.3451
Epoch 97/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8618 - loss:
0.3402
Epoch 98/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8652 - loss:
0.3317
Epoch 99/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8641 - loss:
0.3331
Epoch 100/100
250/250 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8722 - loss:
0.3193
<keras.src.callbacks.history.History at 0x7f8d06a5a750>
Therefore, our ANN model predicts that this customer stays in the bank!
[[1503 92]
[ 188 217]]
0.86
RESULT:
EX NO: DEEP LEARNING NN MODELS
DATE:
AIM:
ALGORITHM:
PROGRAM AND OUTPUT:
import numpy as np
import keras
from keras import layers
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
Model: "sequential"
Epoch 1/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 48s 109ms/step - accuracy: 0.7681 -
loss: 0.7563 - val_accuracy: 0.9772 - val_loss: 0.0895
Epoch 2/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 78s 102ms/step - accuracy: 0.9606 -
loss: 0.1286 - val_accuracy: 0.9840 - val_loss: 0.0600
Epoch 3/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 88s 116ms/step - accuracy: 0.9731 -
loss: 0.0862 - val_accuracy: 0.9868 - val_loss: 0.0475
Epoch 4/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 76s 102ms/step - accuracy: 0.9773 -
loss: 0.0727 - val_accuracy: 0.9875 - val_loss: 0.0450
Epoch 5/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 80s 98ms/step - accuracy: 0.9809 -
loss: 0.0609 - val_accuracy: 0.9890 - val_loss: 0.0396
Epoch 6/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 83s 100ms/step - accuracy: 0.9830 -
loss: 0.0571 - val_accuracy: 0.9878 - val_loss: 0.0388
Epoch 7/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 81s 99ms/step - accuracy: 0.9823 -
loss: 0.0561 - val_accuracy: 0.9890 - val_loss: 0.0390
Epoch 8/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 100ms/step - accuracy: 0.9853 -
loss: 0.0474 - val_accuracy: 0.9910 - val_loss: 0.0334
Epoch 9/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 83s 102ms/step - accuracy: 0.9856 -
loss: 0.0455 - val_accuracy: 0.9907 - val_loss: 0.0341
Epoch 10/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 103ms/step - accuracy: 0.9867 -
loss: 0.0421 - val_accuracy: 0.9918 - val_loss: 0.0320
Epoch 11/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 80s 100ms/step - accuracy: 0.9880 -
loss: 0.0386 - val_accuracy: 0.9913 - val_loss: 0.0311
Epoch 12/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 100ms/step - accuracy: 0.9879 -
loss: 0.0378 - val_accuracy: 0.9910 - val_loss: 0.0297
Epoch 13/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9889 -
loss: 0.0351 - val_accuracy: 0.9923 - val_loss: 0.0273
Epoch 14/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9889 -
loss: 0.0336 - val_accuracy: 0.9910 - val_loss: 0.0314
Epoch 15/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9887 -
loss: 0.0337 - val_accuracy: 0.9922 - val_loss: 0.0285
<keras.src.callbacks.history.History at 0x79aaffd14510>