0% found this document useful (0 votes)
21 views4 pages

Import As Import As Import Import As Import As From Import From Import From Import From Import From Import From Import

Uploaded by

indhaygude07
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
21 views4 pages

Import As Import As Import Import As Import As From Import From Import From Import From Import From Import From Import

Uploaded by

indhaygude07
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 4

import numpy as np

import pandas as pd
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score

from tensorflow.keras.models import Sequential


from tensorflow.keras.layers import Flatten, Conv2D, Dense, MaxPooling2D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist

WARNING:tensorflow:From C:\Users\rutik\anaconda3\Lib\site-packages\keras\src\losses.py:2976: The name tf.losses.


sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.

(X_train, y_train), (X_test, y_test) = mnist.load_data()

print(X_train.shape)

(60000, 28, 28)

X_train[0].min(), X_train[0].max()

(0, 255)

X_train = (X_train - 0.0) / (255.0 - 0.0)


X_test = (X_test - 0.0) / (255.0 - 0.0)
X_train[0].min(), X_train[0].max()

(0.0, 1.0)

def plot_digit(image, digit, plt, i):


plt.subplot(4, 5, i + 1)
plt.imshow(image, cmap=plt.get_cmap('gray'))
plt.title(f"Digit: {digit}")
plt.xticks([])
plt.yticks([])
plt.figure(figsize=(16, 10))
for i in range(20):
plot_digit(X_train[i], y_train[i], plt, i)
plt.show()

X_train = X_train.reshape((X_train.shape + (1,)))


X_test = X_test.reshape((X_test.shape + (1,)))
y_train[0:20]

array([5, 0, 4, 1, 9, 2, 1, 3, 1, 4, 3, 5, 3, 6, 1, 7, 2, 8, 6, 9],
dtype=uint8)

model = Sequential([
Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)),
MaxPooling2D((2, 2)),
Flatten(),
Dense(100, activation="relu"),
Dense(10, activation="softmax")
])

WARNING:tensorflow:From C:\Users\rutik\anaconda3\Lib\site-packages\keras\src\backend.py:873: The name tf.get_def


ault_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.

WARNING:tensorflow:From C:\Users\rutik\anaconda3\Lib\site-packages\keras\src\layers\pooling\max_pooling2d.py:161
: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.

optimizer = SGD(learning_rate=0.01, momentum=0.9)


model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"]
)
model.summary()

Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 32) 320

max_pooling2d (MaxPooling2 (None, 13, 13, 32) 0


D)

flatten (Flatten) (None, 5408) 0

dense (Dense) (None, 100) 540900

dense_1 (Dense) (None, 10) 1010

=================================================================
Total params: 542230 (2.07 MB)
Trainable params: 542230 (2.07 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________

model.fit(X_train, y_train, epochs=10, batch_size=32)

Epoch 1/10
WARNING:tensorflow:From C:\Users\rutik\anaconda3\Lib\site-packages\keras\src\utils\tf_utils.py:492: The name tf.
ragged.RaggedTensorValue is deprecated. Please use tf.compat.v1.ragged.RaggedTensorValue instead.

WARNING:tensorflow:From C:\Users\rutik\anaconda3\Lib\site-packages\keras\src\engine\base_layer_utils.py:384: The


name tf.executing_eagerly_outside_functions is deprecated. Please use tf.compat.v1.executing_eagerly_outside_fun
ctions instead.

1875/1875 [==============================] - 29s 14ms/step - loss: 0.2402 - accuracy: 0.9278


Epoch 2/10
1875/1875 [==============================] - 28s 15ms/step - loss: 0.0799 - accuracy: 0.9765
Epoch 3/10
1875/1875 [==============================] - 26s 14ms/step - loss: 0.0525 - accuracy: 0.9844
Epoch 4/10
1875/1875 [==============================] - 28s 15ms/step - loss: 0.0379 - accuracy: 0.9881
Epoch 5/10
1875/1875 [==============================] - 29s 16ms/step - loss: 0.0294 - accuracy: 0.9909
Epoch 6/10
1875/1875 [==============================] - 27s 14ms/step - loss: 0.0219 - accuracy: 0.9933
Epoch 7/10
1875/1875 [==============================] - 27s 14ms/step - loss: 0.0160 - accuracy: 0.9951
Epoch 8/10
1875/1875 [==============================] - 26s 14ms/step - loss: 0.0121 - accuracy: 0.9963
Epoch 9/10
1875/1875 [==============================] - 27s 15ms/step - loss: 0.0094 - accuracy: 0.9974
Epoch 10/10
1875/1875 [==============================] - 27s 15ms/step - loss: 0.0063 - accuracy: 0.9984
<keras.src.callbacks.History at 0x13f38a25010>

plt.figure(figsize=(16, 10))
for i in range(20):
image = random.choice(X_test).squeeze()
digit = np.argmax(model.predict(image.reshape((1, 28, 28, 1)))[0], axis=-1)
plot_digit(image, digit, plt, i)
plt.show()

1/1 [==============================] - 0s 270ms/step


1/1 [==============================] - 0s 77ms/step
1/1 [==============================] - 0s 48ms/step
1/1 [==============================] - 0s 67ms/step
1/1 [==============================] - 0s 71ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 56ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 55ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 53ms/step
1/1 [==============================] - 0s 56ms/step
1/1 [==============================] - 0s 49ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 56ms/step

predictions = np.argmax(model.predict(X_test), axis=-1)


accuracy_score(y_test, predictions)*100

313/313 [==============================] - 2s 6ms/step


98.69

n=random.randint(0,9999)
plt.imshow(X_test[n])
plt.show()
predicted_value=model.predict(X_test)
print("Handwritten number in the image is= %d" %np.argmax(predicted_value[n]))

313/313 [==============================] - 2s 6ms/step


Handwritten number in the image is= 8

score = model.evaluate(X_test, y_test, verbose=0)


print('Test loss:', score[0]) #Test loss: 0.0296396646054
print('Test accuracy:', score[1])

Test loss: 0.04847879707813263


Test accuracy: 0.9868999719619751

#The implemented CNN model is giving Loss=0.04624301567673683 and


#accuracy: 0.9872000217437744 for test mnist dataset
Loading [MathJax]/jax/output/CommonHTML/fonts/TeX/fontdata.js

You might also like