0% found this document useful (0 votes)
7 views2 pages

DL 8

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views2 pages

DL 8

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 2

8) Build AlexNet using Advanced CNN.

import numpy as np # linear algebra


import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import time

(train_images, train_labels), (test_images, test_labels) =


keras.datasets.cifar10.load_data()
train_ds=tf.data.Dataset.from_tensor_slices((train_images,train_labels))
test_ds=tf.data.Dataset.from_tensor_slices((test_images,test_labels))
plt.figure(figsize=(30,30))
for i,(image,label) in enumerate(train_ds.take(20)):
#print(label)
ax=plt.subplot(5,5,i+1)
plt.imshow(image)
plt.title(CLASS_NAMES[label.numpy()[0]])
plt.axis('off')
def process_image(image,label):
image=tf.image.per_image_standardization(image)
image=tf.image.resize(image,(64,64))

return image,label
train_ds_size=tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size=tf.data.experimental.cardinality(test_ds).numpy()
print('Train size:',train_ds_size)
print('Test size:',test_ds_size)
train_ds=(train_ds
.map(process_image)
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32,drop_remainder=True)
)
test_ds=(test_ds
.map(process_image)
.shuffle(buffer_size=test_ds_size)
.batch(batch_size=32,drop_remainder=True)
)
model=keras.models.Sequential([
keras.layers.Conv2D(filters=128, kernel_size=(11,11), strides=(4,4),
activation='relu', input_shape=(64,64,3)),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(2,2)),
keras.layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1),
activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(3,3)),
keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1),
activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=(1,1), strides=(1,1),
activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=(1,1), strides=(1,1),
activation='relu', padding="same"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(1024,activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(1024,activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(10,activation='softmax')

])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.optimizers.SGD(lr=0.001),
metrics=['accuracy']
)
model.summary()
history=model.fit(
train_ds,
epochs=50,
validation_data=test_ds,
validation_freq=1
)
model.history.history.keys()

dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])


f,ax=plt.subplots(2,1,figsize=(10,10))

#Assigning the first subplot to graph training loss and validation loss
ax[0].plot(model.history.history['loss'],color='b',label='Training
Loss')
ax[0].plot(model.history.history['val_loss'],color='r',label='Validatio
n Loss')

#Plotting the training accuracy and validation accuracy


ax[1].plot(model.history.history['accuracy'],color='b',label='Training
Accuracy')
ax[1].plot(model.history.history['val_accuracy'],color='r',label='Valid
ation Accuracy')

plt.legend()
print('Accuracy Score = ',np.max(history.history['val_accuracy']))

You might also like