Assignemnt 6
Assignemnt 6
In [1]: import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
Imports
In [2]: import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
c:\Users\Gaurav\AppData\Local\Programs\Python\Python39\lib\site-packages\tqdm\auto.p
y:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See ht
tps://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
Load Data
In [3]: ## Loading images and labels
(train_ds, train_labels), (test_ds, test_labels) = tfds.load("tf_flowers",
split=["train[:70%]", "train[:30%]"], ## Train test split
batch_size=-1,
as_supervised=True, # Include labels
)
Image Preprocessing
In [4]: ## check existing image size
train_ds[0].shape
In [6]: train_labels
In [8]: train_labels[0]
In [10]: train_ds[0].shape
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 150, 150, 3)] 0
=================================================================
Total params: 14,714,688
Trainable params: 0
Non-trainable params: 14,714,688
_________________________________________________________________
Add custom classifier with two dense layers of trainable parameters to model
flatten_layer = layers.Flatten()
dense_layer_1 = layers.Dense(50, activation='relu')
dense_layer_2 = layers.Dense(20, activation='relu')
model = models.Sequential([
base_model,
flatten_layer,
dense_layer_1,
dense_layer_2,
prediction_layer
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
Epoch 1/50
65/65 [==============================] - 91s 1s/step - loss: 1.6600 - accuracy: 0.45
64 - val_loss: 1.1794 - val_accuracy: 0.5486
Epoch 2/50
65/65 [==============================] - 88s 1s/step - loss: 0.8261 - accuracy: 0.69
10 - val_loss: 1.0397 - val_accuracy: 0.6226
Epoch 3/50
65/65 [==============================] - 90s 1s/step - loss: 0.5542 - accuracy: 0.78
88 - val_loss: 0.9917 - val_accuracy: 0.6479
Epoch 4/50
65/65 [==============================] - 93s 1s/step - loss: 0.3986 - accuracy: 0.85
11 - val_loss: 0.9414 - val_accuracy: 0.7062
Epoch 5/50
65/65 [==============================] - 95s 1s/step - loss: 0.2973 - accuracy: 0.89
39 - val_loss: 0.9425 - val_accuracy: 0.7043
Epoch 6/50
65/65 [==============================] - 95s 1s/step - loss: 0.2008 - accuracy: 0.92
75 - val_loss: 1.0838 - val_accuracy: 0.7004
Epoch 7/50
65/65 [==============================] - 96s 1s/step - loss: 0.1542 - accuracy: 0.94
99 - val_loss: 1.0646 - val_accuracy: 0.7257
Epoch 8/50
65/65 [==============================] - 98s 2s/step - loss: 0.1176 - accuracy: 0.96
59 - val_loss: 1.0945 - val_accuracy: 0.7237
Epoch 9/50
65/65 [==============================] - 99s 2s/step - loss: 0.0899 - accuracy: 0.97
18 - val_loss: 1.1666 - val_accuracy: 0.7062
Epoch 10/50
65/65 [==============================] - 100s 2s/step - loss: 0.0597 - accuracy: 0.9
839 - val_loss: 1.2617 - val_accuracy: 0.7121
Epoch 11/50
65/65 [==============================] - 100s 2s/step - loss: 0.0428 - accuracy: 0.9
883 - val_loss: 1.3025 - val_accuracy: 0.6984
Epoch 12/50
65/65 [==============================] - 100s 2s/step - loss: 0.0380 - accuracy: 0.9
912 - val_loss: 1.3514 - val_accuracy: 0.7121
In [19]: los,accurac=model.evaluate(test_ds,test_labels)
print("Loss: ",los,"Accuracy: ", accurac)
Test
[[0. 0. 1. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]
[0. 0. 0. 1. 0.]
[1. 0. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0.]]