Aiml
Aiml
CODE:
graph={
'A':['B','C'],
'B':['D','E'],
'C':['B','F'],
'D':[],
'E':['F'],
'F':[]
}
visited=[]
queue=[]
def bfs(visited,graph,node):
visited.append(node)
queue.append(node)
while queue:
s=queue.pop(0)
print(s,end=" ")
bfs(visited,graph,'A')
OUTPUT:
ABCDEF
(B) IMPLEMENTATION OF UNINFORMED SEARCH ALGORITHMS:(DFS)
CODE:
def dfs(graph,start,visited=None):
if visited is None:
visited=set()
visited.add(start)
print(start)
graph={
'0':set(['1','2']),
'1':set(['0','3','4']),
'2':set(['0']),
'3':set(['1']),
'4':set(['2','3'])
}
dfs(graph,'0')
OUTPUT:
0
1
4
2
3
3
2
{'0', '1', '2', '3', '4'}
EX 2:
(A) IMPLEMENTATION OF INFORMED SEARCH ALGORITHMS:(A*)
CODE:
while open_set:
n = min(open_set, key=lambda v: g[v] + h(v))
if n == stop:
path = []
while n != start:
path.append(n)
n = parents[n]
path.append(start)
print("Path found:", path[::-1]) # This line reverses the path for correct order
return path[::-1] # Return reversed path directly
open_set.remove(n)
closed_set.add(n)
for m, weight in get_neighbors(n):
if m not in closed_set and (m not in open_set or g[m] > g[n] + weight):
open_set.add(m)
g[m], parents[m] = g[n] + weight, n
def get_neighbors(v):
return Graph_nodes.get(v, [])
def h(n):
return {'A': 11, 'B': 6, 'C': 99, 'D': 1, 'E': 7, 'G': 0}.get(n, float('inf'))
Graph_nodes = {
'A': [('B', 2), ('E', 3)],
'B': [('C', 1), ('G', 9)],
'C': None,
'E': [('D', 6)],
'D': [('G', 1)]
}
astaralgo('A', 'G')
OUTPUT:
CODE:
import math
graph = {
'S': [('A', 1), ('B', 3)],
'A': [('B', 1), ('C', 3)],
'B': [('C', 1), ('D', 2)],
'C': [('D', 1), ('G', 2)],
'D': [('E', 3)],
'E': [('G', 2)],
'G': []
}
heuristic = {
'S': 7,
'A': 6,
'B': 2,
'C': 4,
'D': 2,
'E': 1,
'G': 0
}
start = 'S'
goal = 'G'
max_memory = 8
OUTPUT:
EX 3:
CODE:
import pandas as pd
import numpy as np
path = '/content/drive/MyDrive/PlayTennis.csv'
df = pd.read_csv(path)
df
OUTPUT:
CODE:
CODE:
OUTPUT:
ENCODES CATEGORICAL FEATURES AS NUMERIC VALUES:
CODE:
OUTPUT:
TRAINS A GAUSSIAN NAIVE BAYES MODEL ON THE DATASET:
CODE:
OUTPUT:
PREDICTS OUTCOMES USING THE TRAINED MODEL:
CODE:
pred2 = model.predict([[1,0,1,0]])
print("Predicted result: ", pred2)
pred3 = model.predict([[2,2,0,0]])
print("Predicted result: ", pred3)
OUTPUT:
EX 4:
IMPLEMENT BAYESIAN NETWORK
PROGRAM:
import numpy as np
import pandas as pd
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
heartDisease =
pd.read_csv("C:/Users/Student/Desktop/Heartdisease.csv") heartDisease
= heartDisease.replace('?', np.nan)
print('Sample instances from the dataset are given below')
print(heartDisease.head())
EX 5:(A)
LINEAR REGRESSION MODEL
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
X = df[['Glucose']]
y = df['Outcome']
y_pred = model.predict(X_test)
y_pred
EX 5:(B)
MULTIPLE LINEAR REGRESSION
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Make predictions
y_pred = model.predict(X_test)
y_pred
EX 5:(C)
LOGISTIC REGRESSION
# import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('y_test', np.array(y_test))
print('y_pred', y_pred)
y_test [0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 1 0 0 1 1 0 0 0 0 0 1 0 0 1
0 1 1 1 1 0 1 1
1 0 1 0 0 0 1 0 1 1 0 0 0 0 1 1 1 0 0 0 0 0 1 1 0 0 1 0 0 0 1 0
1 0 0 0 1
0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 1 0 1 0
0 1 1 1 0
0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 1 1 1 0 1 1 0 1 1 0 1 1 1 0 0 0 0
0 0 0 1 0
0 1 0 0 1 0]
y_pred [0 0 0 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 1 1 0 0 1 0 1 1 0 0 0
0 1 1 1 1 1 1 1
0 1 1 0 1 1 0 0 1 1 0 0 1 0 1 1 0 0 0 1 0 0 1 1 0 0 0 0 1 0 1 0
1 1 0 0 0
0 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 0 1 0 1 0 1 1 1 0
0 1 0 1 0
0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 1 1 1 1 1 0 0 1 0 0 1 1 0 0 0 0
0 0 0 0 0
0 1 0 0 0 0]
Accuracy: 74.68%
# Confusion matrix
conf_matrix = confusion_matrix(y_test, y_pred)
Conf_matrix
array([[78, 21],
[18, 37]])
EX 6:(A)
BUILD DECISION TREE AND RANDOM FOREST:
DECISION TREE:
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
path = '/content/drive/MyDrive/AIML/2024-25/Lab/diabetes.csv'
df = pd.read_csv(path)
df.head()
OUTPUT:
df.shape
OUTPUT:
(768, 9)
# Splitting Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
OUTPUT:
Accuracy: 0.7012987012987013
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('diabetes.png')
Image(graph.create_png())
OUTPUT:
EX 6:(B)
RANDOM FOREST
CODE:
# Classification example
X_class, y_class = make_classification(n_samples=1000, n_features=20, n_classes=2,
random_state=42)
X_train_class, X_test_class, y_train_class, y_test_class = train_test_split(X_class, y_class,
test_size=0.2, random_state=23)
rf_class = RandomForestClassifier()
rf_class.fit(X_train_class, y_train_class)
y_pred_class = rf_class.predict(X_test_class)
print("Classification Accuracy Score:", accuracy_score(y_test_class, y_pred_class))
# Regression example
X_reg, y_reg = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=42)
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg, test_size=0.2,
random_state=23)
rf_reg = RandomForestRegressor()
rf_reg.fit(X_train_reg, y_train_reg)
y_pred_reg = rf_reg.predict(X_test_reg)
OUTPUT:
EX 7:
import pandas as pd
OUTPUT:
X=df[['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur
dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol']] # Features
y=df['quality'] # Labels
from sklearn.svm import SVC # If the hyperplane classifies the dataset linearly then the
algorithm we call it as SVC and the algorithm that separates the dataset by non-linear approach
then we call it as SVM.
model=SVC()
model.fit(X_train, y_train)
OUTPUT:
PREDICTION AND EVALUATION
y_pred=model.predict(X_test)
print(confusion_matrix(y_test,y_pred))
OUTPUT:
[[ 0 0 0 1 0 0]
[ 0 0 3 10 0 0]
[ 0 0 61 76 0 0]
[ 0 0 28 92 0 0]
[ 0 0 2 42 0 0]
[ 0 0 1 4 0 0]]
CLASSIFICATION REPORT
print(classification_report(y_test, y_pred))
OUTPUT:
data_p=pd.DataFrame({'Actual':y_test, 'Predicted':y_pred})
Data_p
OUTPUT:
MODEL ACCURACY
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
OUTPUT:
Accuracy: 0.478125
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
# ... (your existing code for data loading, model training, and evaluation)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
plt.show()
OUTPUT:
EX 8:(A)
IMPLEMENT ENSEMBLING TECHNIQUE
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
df=pd.read_csv('/content/drive/MyDrive/AIML/car_price.csv')
print(df.head())
print(df.shape)
OUTPUT:
[5 rows x 13 columns]
(100, 13)
Brand_c=df['Brand'].unique()
print(Brand_c)
OUTPUT:
df['Brand']=le.fit_transform(df['Brand'])
Brand_1=df["Brand"].unique()
print(Brand_1)
OUTPUT:
[ 9 3 2 6 4 8 5 10 0 1 7]
df['Brand']= le.fit_transform(df['Brand'])
Brand_1 = df['Brand'].unique()
print(pd.DataFrame({'Brand_c':Brand_c, 'Brand_1':Brand_1}))
OUTPUT:
Brand_c Brand_1
0 Toyota 9
1 Honda 3
2 Ford 2
3 Maruti 6
4 Hyundai 4
5 Tata 8
6 Mahindra 5
7 Volkswagen 10
8 Audi 0
9 BMW 1
10 Mercedes 7
Model_c=df["Model"].unique()
print(Model_c)
OUTPUT:
df['Model']=le.fit_transform(df['Model'])
Model_1=df["Model"].unique()
print(Model_1)
OUTPUT:
[15 14 30 42 41 31 40 33 4 53 11 20 16 27 21 13 47 34 1 25 28
22 50 7
44 32 6 54 17 23 9 19 38 51 52 8 3 2 26 57 36 39 46 37 10
43 35 55
24 12 49 45 56 48 5 0 29 18]
OUTPUT:
Model_c Model_l
0 Corolla 15
1 Civic 14
2 Mustang 30
3 Swift 42
4 Sonata 41
5 Nexon 31
6 Scorpio 40
7 Polo 33
8 A4 4
9 X1 53
10 C-Class 11
11 Endeavour 20
12 Creta 16
13 Harrier 27
14 Ertiga 21
15 City 13
16 Tiguan 47
17 Q3 34
18 5 Series 1
19 GLC 25
20 Innova 28
21 Figo 22
22 Verna 50
23 Altroz 7
24 Thar 44
25 Passat 32
26 A6 6
27 X3 54
28 E-Class 17
29 Fortuner 23
30 Aspire 9
31 Elantra 19
32 Safari 38
33 Vitara 51
34 WR-V 52
35 Ameo 8
36 A3 3
37 7 Series 2
38 GLE 26
39 Yaris 57
40 Ranger 36
41 Santro 39
42 Tigor 46
43 S-Cross 37
44 BR-V 10
45 T-Roc 43
46 Q7 35
47 X5 55
48 GLA 24
49 Camry 12
50 Venue 49
51 Tiago 45
52 XUV300 56
53 Vento 48
54 A5 5
55 3 Series 0
56 Innova Crysta 29
57 EcoSport 18
Fuel_type_c=df['Fuel_Type'].unique()
print(Fuel_type_c)
OUTPUT:
['Petrol' 'Diesel']
df['Fuel_Type']=le.fit_transform(df['Fuel_Type'])
Fuel_Type_1=df["Fuel_Type"].unique()
print(Fuel_Type_1)
OUTPUT:
[1 0]
df['Fuel_Type']= le.fit_transform(df['Fuel_Type'])
Fuel_Type_1 = df['Fuel_Type'].unique()
print(pd.DataFrame({'Fuel_type_c':Fuel_type_c, 'Fuel_Type_1':Fuel_Type_1}))
OUTPUT:
Fuel_type_c Fuel_Type_1
0 Petrol 1
1 Diesel 0
Transmission_c=df['Transmission'].unique()
print(Transmission_c)
OUTPUT:
['Manual' 'Automatic']
df['Transmission']=le.fit_transform(df['Transmission'])
Transmission_1=df["Transmission"].unique()
print(Transmission_1)
OUTPUT:
[1 0]
df['Transmission']= le.fit_transform(df['Transmission'])
Transmission_1 = df['Transmission'].unique()
print(pd.DataFrame({'Transmission_c':Transmission_c, 'Transmission_1':Transmission_1}))
OUTPUT:
Transmission_c Transmission_1
0 Manual 1
1 Automatic 0
Owner_Type_c=df['Owner_Type'].unique()
print(Owner_Type_c)
OUTPUT:
df['Owner_Type']=le.fit_transform(df['Owner_Type'])
Owner_Type_1=df["Owner_Type"].unique()
print(Owner_Type_1)
OUTPUT:
[0 1 2]
df['Owner_Type']= le.fit_transform(df['Owner_Type'])
Owner_Type_1 = df['Owner_Type'].unique()
print(pd.DataFrame({'Owner_Type_c':Owner_Type_c, 'Owner_Type_1':Owner_Type_1}))
OUTPUT:
Owner_Type_c Owner_Type_1
0 First 0
1 Second 1
2 Third 2
OUTPUT:
EX 8:(B)
ENSEMBLE LEARNING REGRESSION
LOADING DATA FOR ENSEMBLE AVERAGING
Df.shape
OUTPUT:
(100, 13)
#print(df['Brand'].drop_duplicates())
Brand_c = df['Brand'].unique() # c:categorical
print(Brand_c)
# Label Encoding
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df['Brand']= label_encoder.fit_transform(df['Brand'])
Brand_l = df['Brand'].unique() # Brand_l: after label coding
print(Brand_l)
OUTPUT:
[ 9 3 2 6 4 8 5 10 0 1 7]
df['Model']= label_encoder.fit_transform(df['Model'])
Model_l = df['Model'].unique()
print(pd.DataFrame({'Model_c':Model_c, 'Model_l':Model_l}))
OUTPUT:
Model_c Model_l
0 Corolla 15
1 Civic 14
2 Mustang 30
3 Swift 42
4 Sonata 41
5 Nexon 31
6 Scorpio 40
7 Polo 33
8 A4 4
9 X1 53
10 C-Class 11
11 Endeavour 20
12 Creta 16
13 Harrier 27
14 Ertiga 21
15 City 13
16 Tiguan 47
17 Q3 34
18 5 Series 1
19 GLC 25
20 Innova 28
21 Figo 22
22 Verna 50
23 Altroz 7
24 Thar 44
25 Passat 32
26 A6 6
27 X3 54
28 E-Class 17
29 Fortuner 23
30 Aspire 9
31 Elantra 19
32 Safari 38
33 Vitara 51
34 WR-V 52
35 Ameo 8
36 A3 3
37 7 Series 2
38 GLE 26
39 Yaris 57
40 Ranger 36
41 Santro 39
42 Tigor 46
43 S-Cross 37
44 BR-V 10
45 T-Roc 43
46 Q7 35
47 X5 55
48 GLA 24
49 Camry 12
50 Venue 49
51 Tiago 45
52 XUV300 56
53 Vento 48
54 A5 5
55 3 Series 0
56 Innova Crysta 29
57 EcoSport 18
OUTPUT:
['Petrol' 'Diesel']
ENCODING FUEL_TYPE & TRANSMISSION
df['Fuel_Type']= label_encoder.fit_transform(df['Fuel_Type'])
FuelType_l = df['Fuel_Type'].unique()
print(pd.DataFrame({'FuelType_c':FuelType_c, 'FuelType_l':FuelType_l}))
Transmission_c = df['Transmission'].unique() # c:categorical
print(Transmission_c)
OUTPUT:
['Manual' 'Automatic']
df['Transmission']= label_encoder.fit_transform(df['Transmission'])
Transmission_l = df['Transmission'].unique()
print(pd.DataFrame({'Transmission_c':Transmission_c, 'Transmission_l':Transmission_l}))
OUTPUT:
Transmission_c Transmission_l
0 Manual 1
1 Automatic 0
OUTPUT:
df['Owner_Type']= label_encoder.fit_transform(df['Owner_Type'])
OwnerType_l = df['Owner_Type'].unique()
print(pd.DataFrame({'OwnerType_c':OwnerType_c, 'OwnerType_l':OwnerType_l}))
OUTPUT:
OwnerType_c OwnerType_l
0 First 0
1 Second 1
2 Third 2
OUTPUT:
# Class Label
y = data["Price"]
# Feature set
X = data[['Car_ID', 'Brand_l', 'Model_l', 'Year', 'Kilometers_Driven', 'FuelType_l',
'Transmission_l', 'OwnerType_l', 'Mileage', 'Engine', 'Power', 'Seats']]
# printing the mean squared error between real value and predicted value
print(mean_squared_error(y_test, pred_Avg))
OUTPUT:
y_test y_pred
24 1200000 1220000.0
40 1500000 1660000.0
55 2600000 2630000.0
18 3000000 2570000.0
11 2000000 2100000.0
90 500000 520000.0
27 2800000 2485000.0
69 800000 905000.0
51 550000 530000.0
74 2000000 1865000.0
15 650000 790000.0
22 850000 910000.0
81 700000 765000.0
57 2900000 2600000.0
41 450000 480000.0
87 1800000 1870000.0
3 600000 505000.0
85 3200000 3500000.0
77 650000 680000.0
6 900000 1170000.0
32356250000.0
0.963739134698223
OUTPUT:
y_test y_pred
24 1200000 1220000.0
40 1500000 1660000.0
55 2600000 2630000.0
18 3000000 2570000.0
11 2000000 2100000.0
90 500000 520000.0
27 2800000 2485000.0
69 800000 905000.0
51 550000 530000.0
74 2000000 1865000.0
15 650000 790000.0
22 850000 910000.0
81 700000 765000.0
57 2900000 2600000.0
41 450000 480000.0
87 1800000 1870000.0
3 600000 505000.0
85 3200000 3500000.0
77 650000 680000.0
6 900000 1170000.0
32356250000.0
0.9653902329105001
EX 9:(A)
K-MEANS CLUSTERING
x=[2,4,6,8,10,12,14,16,18,20]
y=[23,56,44,56,67,76,65,54,34,32]
plt.scatter(x,y)
plt.show()
OUTPUT:
data=list(zip(x,y))
print(data)
OUTPUT:
[(2, 23), (4, 56), (6, 44), (8, 56), (10, 67), (12, 76), (14,
65), (16, 54), (18, 34), (20, 32)]
plt.plot(range(1,11),inertias,marker='.')
plt.title('Elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('Inertias')
plt.show()
OUTPUT:
K-MEANS CLUSTERING RESULTS VISUALIZATION
kmeans=KMeans(n_clusters=2)
kmeans.fit(data)
plt.scatter(x, y, c=kmeans.labels_)
plt.show()
OUTPUT:
EX 9:(B)
KNN CLUSTERING
import pandas as pd
data=pd.read_csv("/content/drive/MyDrive/AIML/Social_Network_Ads.csv")
print(data.head())
print(data.shape)
OUTPUT:
User ID Gender Age EstimatedSalary Purchased
0 15624510 Male 19 19000 0
1 15810944 Male 35 20000 0
2 15668575 Female 26 43000 0
3 15603246 Female 27 57000 0
4 15804002 Male 19 76000 0
(400, 5)
import numpy as np
import matplotlib.pyplot as plt
X=data.iloc[:,[1,2,3]].values
y=data.iloc[:,-1].values
OUTPUT:
y_pred=classifier.predict(X_test)
from sklearn.metrics import confusion_matrix,accuracy_score
cm=confusion_matrix(y_test,y_pred)
print(cm)
ac=accuracy_score(y_test,y_pred)
print(ac)
OUTPUT:
[[48 4]
[ 2 26]]
0.925
EX 10:
IMPLEMENT EM FOR BAYESIAN NETWORKS
PROGRAM:
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.stats import gaussian_kde
import seaborn as sns
# Generate a dataset with two Gaussian components
mu1, sigma1 = 2, 1
mu2, sigma2 = -1, 0.8
X1 = np.random.normal(mu1, sigma1, size=200)
X2 = np.random.normal(mu2, sigma2, size=600)
X = np.concatenate([X1, X2])
# Plot the density estimation using seaborn
sns.kdeplot(X)
plt.xlabel('X')
plt.ylabel('Density')
plt.title('Density Estimation of X')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
# Initialize parameters
mu1_hat, sigma1_hat = np.mean(X1), np.std(X1)
mu2_hat, sigma2_hat = np.mean(X2), np.std(X2)
pi1_hat, pi2_hat = len(X1) / len(X), len(X2) / len(X)
# Compute log-likelihood
log_likelihood = np.sum(np.log(pi1_hat * norm.pdf(X, mu1_hat, sigma1_hat) + pi2_hat *
norm.pdf(X, mu2_hat, sigma2_hat)))
log_likelihoods.append(log_likelihood)
EX 11:
Build Simple NN Models
PROGRAM:
import numpy as np
import keras
from keras import layers
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
# Train the model
batch_size = 128
epochs = 15
Epoch 1/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 48s 109ms/step - accuracy: 0.7681 -
loss: 0.7563 - val_accuracy: 0.9772 - val_loss: 0.0895
Epoch 2/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 78s 102ms/step - accuracy: 0.9606 -
loss: 0.1286 - val_accuracy: 0.9840 - val_loss: 0.0600
Epoch 3/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 88s 116ms/step - accuracy: 0.9731 -
loss: 0.0862 - val_accuracy: 0.9868 - val_loss: 0.0475
Epoch 4/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 76s 102ms/step - accuracy: 0.9773 -
loss: 0.0727 - val_accuracy: 0.9875 - val_loss: 0.0450
Epoch 5/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 80s 98ms/step - accuracy: 0.9809 -
loss: 0.0609 - val_accuracy: 0.9890 - val_loss: 0.0396
Epoch 6/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 83s 100ms/step - accuracy: 0.9830 -
loss: 0.0571 - val_accuracy: 0.9878 - val_loss: 0.0388
Epoch 7/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 81s 99ms/step - accuracy: 0.9823 -
loss: 0.0561 - val_accuracy: 0.9890 - val_loss: 0.0390
Epoch 8/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 100ms/step - accuracy: 0.9853 -
loss: 0.0474 - val_accuracy: 0.9910 - val_loss: 0.0334
Epoch 9/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 83s 102ms/step - accuracy: 0.9856 -
loss: 0.0455 - val_accuracy: 0.9907 - val_loss: 0.0341
Epoch 10/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 103ms/step - accuracy: 0.9867 -
loss: 0.0421 - val_accuracy: 0.9918 - val_loss: 0.0320
Epoch 11/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 80s 100ms/step - accuracy: 0.9880 -
loss: 0.0386 - val_accuracy: 0.9913 - val_loss: 0.0311
Epoch 12/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 100ms/step - accuracy: 0.9879 -
loss: 0.0378 - val_accuracy: 0.9910 - val_loss: 0.0297
Epoch 13/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9889 -
loss: 0.0351 - val_accuracy: 0.9923 - val_loss: 0.0273
Epoch 14/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9889 -
loss: 0.0336 - val_accuracy: 0.9910 - val_loss: 0.0314
Epoch 15/15
422/422 ━━━━━━━━━━━━━━━━━━━━ 82s 99ms/step - accuracy: 0.9887 -
loss: 0.0337 - val_accuracy: 0.9922 - val_loss: 0.0285
<keras.src.callbacks.history.History at 0x79aaffd14510>
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
len(X_train)
OUTPUT:
60000
MNIST TEST SAMPLE COUNT
len(X_test)
OUTPUT:
10000
X_train.shape
OUTPUT:
print(X_train[0])
OUTPUT:
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18
18 126 136
175 26 166 255 247 127 0 0 0 0]
[ 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253
253 253 253
225 172 253 242 195 64 0 0 0 0]
[ 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253
253 253 251
93 82 82 56 39 0 0 0 0 0]
[ 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198
182 247 241
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11
0 43 154
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225
160 108 1
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240
253 253 119
25 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45
186 253 253
150 27 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
16 93 252
253 187 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 249
253 249 64 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46
130 183 253
253 207 2 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229
253 253 253
250 182 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253
253 253 201
78 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253
198 81 2
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80
9 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0
0 0 0 0 0 0 0 0 0 0]]
plt.matshow(X_train[0])
OUTPUT:
<matplotlib.image.AxesImage at 0x7bb032329610>
print(y_train[0])
OUTPUT:
5
plt.matshow(X_train[2])
OUTPUT:
<matplotlib.image.AxesImage at 0x7bb02ff4bc90>
DIGIT LABEL OF 3RD IMAGE IN TRAINING DATASET
print(y_train[2])
OUTPUT:
SHAPE OF X_TRAIN_FLATTENED
X_train_flattened=X_train.reshape(len(X_train),28*28)
X_test_flattened=X_test.reshape(len(X_test),28*28)
X_train_flattened.shape
OUTPUT:
(60000, 784)
X_train_flattened[0]
OUTPUT:
array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 18,
18, 18,
126, 136, 175, 26, 166, 255, 247, 127, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 30, 36, 94, 154,
170, 253,
253, 253, 253, 253, 225, 172, 253, 242, 195, 64, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 49, 238, 253,
253, 253,
253, 253, 253, 253, 253, 251, 93, 82, 82, 56, 39,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18,
219, 253,
253, 253, 253, 253, 198, 182, 247, 241, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
80, 156, 107, 253, 253, 205, 11, 0, 43, 154, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 14, 1, 154, 253, 90, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 139, 253, 190, 2,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 190,
253, 70,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 35,
241, 225, 160, 108, 1, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 81, 240, 253, 253, 119, 25, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 45, 186, 253, 253, 150, 27,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 16, 93, 252,
253, 187,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 249,
253, 249, 64, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46, 130,
183, 253, 253, 207, 2, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39, 148,
229, 253, 253, 253, 250, 182, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
24, 114,
221, 253, 253, 253, 253, 201, 78, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
23, 66,
213, 253, 253, 253, 253, 198, 81, 2, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
18, 171,
219, 253, 253, 253, 253, 195, 80, 9, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55, 172,
226, 253, 253, 253, 253, 244, 133, 11, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
136, 253, 253, 253, 212, 135, 132, 16, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0], dtype=uint8)
SECOND FLATTENED IMAGE
X_train_flattened[2]
OUTPUT:
array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 67, 232, 39, 0, 0, 0, 0,
0, 0,
0, 0, 0, 62, 81, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 120, 180, 39, 0, 0,
0, 0,
0, 0, 0, 0, 0, 126, 163, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 153, 210, 40,
0, 0,
0, 0, 0, 0, 0, 0, 0, 220, 163, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 254,
162, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 222, 163,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
183, 254,
125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46,
245, 163,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
198, 254, 56, 0, 0, 0, 0, 0, 0, 0, 0,
0, 120,
254, 163, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 23, 231, 254, 29, 0, 0, 0, 0, 0, 0,
0, 0,
0, 159, 254, 120, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 163, 254, 216, 16, 0, 0, 0, 0,
0, 0,
0, 0, 0, 159, 254, 67, 0, 0, 0, 0, 0,
0, 0,
0, 0, 14, 86, 178, 248, 254, 91, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 159, 254, 85, 0, 0, 0,
47, 49,
116, 144, 150, 241, 243, 234, 179, 241, 252, 40, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 150, 253, 237, 207,
207, 207,
253, 254, 250, 240, 198, 143, 91, 28, 5, 233, 250,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 119,
177, 177,
177, 177, 177, 98, 56, 0, 0, 0, 0, 0, 102,
254, 220,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 169,
254, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 169, 254, 57, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 169, 254, 57, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 169, 255, 94, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 169, 254, 96, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 169, 254,
153, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
169, 255,
153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
96, 254, 153, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0], dtype=uint8)
X_train_flattened=X_train_flattened/255
X_test_flattened=X_test_flattened/255
X_train_flattened[0]
OUTPUT:
array([0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0.01176471, 0.07058824,
0.07058824,
0.07058824, 0.49411765, 0.53333333, 0.68627451,
0.10196078,
0.65098039, 1. , 0.96862745, 0.49803922,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0.11764706, 0.14117647, 0.36862745,
0.60392157,
0.66666667, 0.99215686, 0.99215686, 0.99215686,
0.99215686,
0.99215686, 0.88235294, 0.6745098 , 0.99215686,
0.94901961,
0.76470588, 0.25098039, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0.19215686,
0.93333333,
0.99215686, 0.99215686, 0.99215686, 0.99215686,
0.99215686,
0.99215686, 0.99215686, 0.99215686, 0.98431373,
0.36470588,
0.32156863, 0.32156863, 0.21960784, 0.15294118,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0.07058824, 0.85882353, 0.99215686,
0.99215686,
0.99215686, 0.99215686, 0.99215686, 0.77647059,
0.71372549,
0.96862745, 0.94509804, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0.31372549, 0.61176471, 0.41960784, 0.99215686,
0.99215686,
0.80392157, 0.04313725, 0. , 0.16862745,
0.60392157,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0.05490196,
0.00392157, 0.60392157, 0.99215686, 0.35294118,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0.54509804,
0.99215686, 0.74509804, 0.00784314, 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0.04313725, 0.74509804,
0.99215686,
0.2745098 , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0.1372549 , 0.94509804, 0.88235294,
0.62745098,
0.42352941, 0.00392157, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0.31764706, 0.94117647, 0.99215686, 0.99215686,
0.46666667,
0.09803922, 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0.17647059,
0.72941176, 0.99215686, 0.99215686, 0.58823529,
0.10588235,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0.0627451 ,
0.36470588,
0.98823529, 0.99215686, 0.73333333, 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0.97647059,
0.99215686,
0.97647059, 0.25098039, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0.18039216,
0.50980392,
0.71764706, 0.99215686, 0.99215686, 0.81176471,
0.00784314,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0.15294118,
0.58039216, 0.89803922, 0.99215686, 0.99215686,
0.99215686,
0.98039216, 0.71372549, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0.09411765, 0.44705882, 0.86666667, 0.99215686,
0.99215686,
0.99215686, 0.99215686, 0.78823529, 0.30588235,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0.09019608, 0.25882353, 0.83529412,
0.99215686,
0.99215686, 0.99215686, 0.99215686, 0.77647059,
0.31764706,
0.00784314, 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0.07058824, 0.67058824,
0.85882353,
0.99215686, 0.99215686, 0.99215686, 0.99215686,
0.76470588,
0.31372549, 0.03529412, 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0.21568627,
0.6745098 ,
0.88627451, 0.99215686, 0.99215686, 0.99215686,
0.99215686,
0.95686275, 0.52156863, 0.04313725, 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0.53333333, 0.99215686, 0.99215686,
0.99215686,
0.83137255, 0.52941176, 0.51764706, 0.0627451 ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ,
0. ,
0. , 0. , 0. , 0. ])
model=keras.Sequential([
keras.layers.Dense(10,input_shape=(784,),activation='sigmoid')
])
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"]
)
model.fit(X_train_flattened,y_train,epochs=5)
OUTPUT:
Epoch 1/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 4s 2ms/step - accuracy: 0.8133 -
loss: 0.7189
Epoch 2/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 6s 2ms/step - accuracy: 0.9153 -
loss: 0.3040
Epoch 3/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 8s 4ms/step - accuracy: 0.9219 -
loss: 0.2839
Epoch 4/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 7s 2ms/step - accuracy: 0.9233 -
loss: 0.2702
Epoch 5/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.9255 -
loss: 0.2680
<keras.src.callbacks.history.History at 0x7bb0326f7a50>
model.evaluate(X_test_flattened,y_test)
OUTPUT:
[0.26684650778770447, 0.925000011920929]
y_predict=model.predict(X_test_flattened)
OUTPUT:
plt.matshow(X_test[5])
OUTPUT:
<matplotlib.image.AxesImage at 0x7bb0119fbf90>
PREDICTED LABEL FOR TEST IMAGE AT INDEX 5
y_predict[5]
OUTPUT:
print(np.argmax(y_predict[5]))
OUTPUT:
y_test[:5]
OUTPUT:
y_predicted_Labels[:5]
OUTPUT:
tf.math.confusion_matrix(labels=y_test,predictions=y_predicted_Labels)
OUTPUT:
import seaborn as sn
cm=tf.math.confusion_matrix(labels=y_test,predictions=y_predicted_Labels)
plt.figure(figsize=(10,10))
sn.heatmap(cm,annot=True,fmt="d")
plt.xlabel("Predicted")
plt.ylabel("Truth")
OUTPUT: