0% found this document useful (0 votes)
16 views

DL Practical PROGRAM

Uploaded by

dvsps21
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
16 views

DL Practical PROGRAM

Uploaded by

dvsps21
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 28

BTCS - 705 Deep Learning Lab

Task 6 –
Write a program to develop optimized ANN model, then print the
Accuracy plot of the Mushroom dataset downloaded from Kaggle.
Program –
import pandas as pd
import numpy as np

df = pd.read_csv('mushroom_cleaned.csv')

#REMOVING DUPLICATES
df1 = df.drop_duplicates()

#LABEL ENCODING
label_encoder = LabelEncoder()
for column in df1.columns:
df1.loc[:, column] = label_encoder.fit_transform(df1[column])

#TRAINING TEST SPLIT


x = df1.drop(['class'], axis =1, inplace = False)
y = df1['class']

from sklearn.model_selection import train_test_split

x_train,x_test, y_train, y_test = train_test_split(x,y, test_size =0.3,


random_state = 42)

#STANDARD SCALING
from sklearn.preprocessing import StandardScaler

sc = StandardScaler()
sc.fit(x_train)
x_train = sc.transform(x_train)
x_test = sc.transform(x_test)

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout

model = Sequential()

#This line initializes an empty neural network model as a


Sequential model.
#The Sequential model is a linear stack of layers,
#meaning you can add layers to it one after another using
the .add() method.
# Input layer and first hidden layer with 128 neurons and dropout

model.add(Dense(128,activation='relu',input_dim= x_train.shape[1]))
model.add(Dropout(0.3))

# Second hidden layer with 64 neurons


model.add(Dense(64, activation='relu'))

# Third hidden layer with 32 neurons


model.add(Dense(32, activation='relu'))

# Output layer for binary classification


model.add(Dense(1, activation='sigmoid'))

model.summary() #1

#COMPILING MODEL
from tensorflow.keras.optimizers import Adam

optimizer = Adam(learning_rate=0.001)
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

# Train the model


history = model.fit(x_train, y_train, epochs=50, batch_size=32,
validation_data=(x_test, y_test), verbose=0)

#ACCURACY
from sklearn.metrics import accuracy_score

# Evaluate the model on the test data


y_pred = (model.predict(x_test) > 0.5).astype("int32")
accuracy = accuracy_score(y_test, y_pred)
print(f"Test Accuracy: {accuracy:.3f}")
#2

# PLOT TRAINING & VALIDATION ACCURACY VALUES


import matplotlib.pyplot as plt

plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
#3

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 7 –
Write a Program to develop a ANN model, then print the accuracy
plot of TITANIC dataset.
Program –
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt

# Load and preprocess data


df = pd.read_csv('titanic.csv')

df.drop(columns=["PassengerId", "Name", "Ticket", "Cabin"],


inplace=True)

df["Age"].fillna(df["Age"].median(), inplace=True)
df["Embarked"].fillna(df["Embarked"].mode()[0], inplace=True)

# Convert categorical data to numeric


label_encoder = LabelEncoder()
df["Sex"] = label_encoder.fit_transform(df["Sex"])
df["Embarked"] = label_encoder.fit_transform(df["Embarked"])

# Define features and target


x = df.drop(['Embarked'], axis=1)
y = to_categorical(df['Embarked'])

# Split the dataset


x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3,

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

random_state=42)

# Standardize features
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)

# Build model
model = Sequential([
Dense(64, activation='relu', input_dim=x_train.shape[1]),
Dropout(0.3),
Dense(32, activation='relu'),
Dense(y.shape[1], activation='softmax')
])

# Compile model
optimizer = Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])

# Train model
history = model.fit(x_train, y_train, epochs=50, batch_size=32,
validation_data=(x_test, y_test), verbose=1)

# Evaluate model
test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f"Test Accuracy: {test_accuracy:.3f}") #1

# Plot accuracy
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.title('Training and Validation Accuracy')
plt.show() #2

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 8 –
Print the Confusion Matrix and Loss Plot of PIMA dataset and write
the value of the matrix such as precision, recall, F1 score.
Program –
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, precision_score,
recall_score, f1_score, roc_auc_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping

# Load the dataset


path = 'diabetes.csv'
columns = ['Pregnancies', 'Glucose', 'BloodPressure',
'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age',
'Outcome']
df = pd.read_csv(path, names=columns)

# Data preprocessing
df[columns] = df[columns].apply(pd.to_numeric, errors='coerce')
df.dropna(inplace=True)

# Split features and target


X = df.drop('Outcome', axis=1)
y = df['Outcome']

# Standardize features

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y,
test_size=0.2, random_state=42)

# Build model
model = Sequential([
Dense(16, input_dim=8, activation='relu'),
Dropout(0.3),
Dense(12, activation='relu'),
Dense(8, activation='relu'),
Dense(1, activation='sigmoid')
])

# Compile model
optimizer = Adam(learning_rate=0.001)
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])

# Add early stopping


early_stopping = EarlyStopping(monitor='val_loss', patience=10,
restore_best_weights=True)

# Train model
history = model.fit(X_train, y_train, epochs=150, batch_size=32,
validation_split=0.2, verbose=0, callbacks=[early_stopping])

# Predictions
y_pred = (model.predict(X_test) > 0.5).astype("int32")

# Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('Confusion Matrix')
plt.xlabel('Predicted')

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

plt.ylabel('True')
plt.show()

# Plot Loss
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Loss Plot')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

# Evaluation Metrics
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
roc_auc = roc_auc_score(y_test, y_pred)

print(f"Precision: {precision:0.3F}")
print(f"Recall: {recall:0.3F}")
print(f"F1-Score: {f1:0.3F}")
print(f"ROC-AUC: {roc_auc:0.3F}")

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 9 –
Print the accuracy of ANN model using PIMA dataset with dataset
with different values of Optimizers.
List of Optimizers: Adam, RMSprop, SGD, Adadelta, Adagrad,
Nadam.
Program –
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam, SGD, RMSprop,
Adadelta, Adagrad, Nadam

# Load the PIMA Indians Diabetes Dataset


url =
“https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima
-indians-diabetes.data.csv”

columns = ['Pregnancies', 'Glucose', 'BloodPressure',


'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age',
'Outcome']

data = pd.read_csv(url, header=None, names=columns)

# Split features and labels


X = data.iloc[:, :-1]
y = data.iloc[:, -1]

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42, stratify=y)

# Scale the features


scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# Function to create and train the model


def create_and_train_model(optimizer):
model = Sequential([
Dense(64, activation='relu', input_dim=X_train_scaled.shape[1]),
Dropout(0.3),
Dense(32, activation='relu'),
Dense(16, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer=optimizer, loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model
history = model.fit(X_train_scaled, y_train, epochs=50,
batch_size=32, verbose=0, validation_data=(X_test_scaled, y_test))
# Evaluate the model
y_pred = (model.predict(X_test_scaled) > 0.5).astype("int32")
acc = accuracy_score(y_test, y_pred)
return acc, history

# Test with different optimizers


optimizers = {
'Adam': Adam(),
'SGD': SGD(),

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

'RMSprop': RMSprop(),
"Adadelta": Adadelta(),
"Adagrad": Adagrad(),
"Nadam": Nadam()

results = {}
for name, optimizer in optimizers.items():
accuracy, history = create_and_train_model(optimizer)
results[name] = accuracy
print(f"Optimizer: {name}, Test Accuracy: {accuracy:.3f}")

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 10 –
Object detection with pre – trained RetinaNet with Kerras.
Program –
pip install keras-retinanet opencv-python

import numpy as np
import cv2
import matplotlib.pyplot as plt
from keras_retinanet import models
from keras_retinanet.utils.image import preprocess_image,
resize_image
from keras_retinanet.utils.visualization import draw_box,
draw_caption
from keras_retinanet.utils.colors import label_color

# Load pre-trained RetinaNet model (ResNet50 backbone)


model_path = 'resnet50_coco_best_v2.1.0.h5' # Pre-trained model
on COCO dataset
model = models.load_model(model_path,
backbone_name='resnet50')

# Load label to names mapping for COCO dataset


labels_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle',
4: 'airplane',
5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light',
# Add remaining COCO classes as needed
}

# Load image
image_path = r"K:\market.jpg" # Path to the image
image = cv2.imread(image_path)

# Preprocess the image

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

image = preprocess_image(image)
image, scale = resize_image(image)

# Run detection
boxes, scores, labels =
model.predict_on_batch(np.expand_dims(image, axis=0))

# Correct for image scale


boxes /= scale

# Visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
if score < 0.5: # Adjust threshold as needed
continue

# Draw the box


color = label_color(label)
b = box.astype(int)
draw_box(image, b, color=color)

# Caption for the object


caption = f"{labels_to_names[label]}: {score:.2f}"
draw_caption(image, b, caption)

# Convert back to original format (BGR -> RGB)


image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Display the image with bounding boxes


plt.figure(figsize=(50, 50))
plt.imshow(image)
plt.axis('off')
plt.show()

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 11 –
NEURAL RECOMMENDATION SYSTEM.
Program –
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Flatten, Input, Dot
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt

# Triplet loss function


def triplet_loss(y_true, y_pred, margin=1.0):
anchor, positive, negative = tf.split(y_pred, 3, axis=1)
positive_dist = tf.reduce_sum(tf.square(anchor - positive),
axis=1)
negative_dist = tf.reduce_sum(tf.square(anchor - negative),
axis=1)
return tf.reduce_mean(tf.maximum(positive_dist - negative_dist +
margin, 0.0))

# Neural Recommender System with Implicit Feedback


class NeuralRecommenderWithTripletLoss:
def __init__(self, num_users, num_items, embedding_dim=32,
margin=1.0):
self.num_users = num_users
self.num_items = num_items
self.embedding_dim = embedding_dim
self.margin = margin
self.model = self.build_model()

def build_model(self):
# Input layers for user, positive item, and negative item
user_input = Input(shape=(1,), name="user_input")

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

pos_item_input = Input(shape=(1,),
name="positive_item_input")
neg_item_input = Input(shape=(1,),
name="negative_item_input")

# Embedding layers for user and items


user_embedding = Embedding(self.num_users,
self.embedding_dim, name="user_embedding")
item_embedding = Embedding(self.num_items,
self.embedding_dim, name="item_embedding")

# User embedding
user_embedded = Flatten()(user_embedding(user_input))

# Positive and negative item embeddings


pos_item_embedded = Flatten()
(item_embedding(pos_item_input))
neg_item_embedded = Flatten()
(item_embedding(neg_item_input))

# Concatenate embeddings to compute triplet loss


merged_embeddings = tf.concat([user_embedded,
pos_item_embedded, neg_item_embedded], axis=1)
# Build and compile model
model = Model(inputs=[user_input, pos_item_input,
neg_item_input], outputs=merged_embeddings)
model.compile(optimizer=Adam(learning_rate=0.001),
loss=triplet_loss)
return model

def train(self, user_ids, pos_item_ids, neg_item_ids, epochs=10,


batch_size=64):
# Train the model with the triplet inputs
history = self.model.fit(
[user_ids, pos_item_ids, neg_item_ids],
np.zeros(len(user_ids)), # Dummy targets for triplet loss

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

epochs=epochs,
batch_size=batch_size
)
return history

# Simulate data
num_users = 1000
num_items = 1000
embedding_dim = 32

# Generate random training data


user_ids = np.random.randint(0, num_users, size=10000)
pos_item_ids = np.random.randint(0, num_items, size=10000)
neg_item_ids = np.random.randint(0, num_items, size=10000)

# Create and train the recommender system


recommender = NeuralRecommenderWithTripletLoss(num_users,
num_items, embedding_dim)
history = recommender.train(user_ids, pos_item_ids, neg_item_ids,
epochs=10, batch_size=64)

# Plot the training loss


plt.plot(history.history['loss'])
plt.xlabel('Epochs')
plt.ylabel('Triplet Loss')
plt.title('Training Loss over Epochs')
plt.show()

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 12 -
Implementation of Backpropagation in neural network using numpy.
Program -
import numpy as np
import matplotlib.pyplot as plt

# Sigmoid activation function and its derivative


def sigmoid(x):
return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
return x * (1 - x)

# Mean Squared Error (MSE) Loss function and its derivative


def mse_loss(y_true, y_pred):
return np.mean((y_true - y_pred) ** 2)

def mse_loss_derivative(y_true, y_pred):


return y_pred - y_true

# Neural Network class with backpropagation and loss tracking


class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size,
learning_rate=0.1): # Corrected __init__
# Initialize weights and biases
self.learning_rate = learning_rate
self.weights_input_hidden = np.random.randn(input_size,
hidden_size)
self.bias_hidden = np.random.randn(1, hidden_size)
self.weights_hidden_output = np.random.randn(hidden_size,
output_size)
self.bias_output = np.random.randn(1, output_size)
# Track loss
self.loss_history = []

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

def forward(self, X):


# Forward pass
self.hidden_layer_input = np.dot(X,
self.weights_input_hidden) + self.bias_hidden
self.hidden_layer_output = sigmoid(self.hidden_layer_input)
self.output_layer_input = np.dot(self.hidden_layer_output,
self.weights_hidden_output) + self.bias_output
self.output = sigmoid(self.output_layer_input)
return self.output
def backpropagate(self, X, y):
# Compute output error
output_error = mse_loss_derivative(y, self.output)
output_delta = output_error * sigmoid_derivative(self.output)

# Compute hidden layer error


hidden_error = output_delta.dot(self.weights_hidden_output.T)
hidden_delta = hidden_error *
sigmoid_derivative(self.hidden_layer_output)

# Update weights and biases


self.weights_hidden_output -= self.learning_rate *
self.hidden_layer_output.T.dot(output_delta)
self.bias_output -= self.learning_rate * np.sum(output_delta,
axis=0, keepdims=True)
self.weights_input_hidden -= self.learning_rate *
X.T.dot(hidden_delta)
self.bias_hidden -= self.learning_rate * np.sum(hidden_delta,
axis=0, keepdims=True)

def train(self, X, y, epochs=10000):


for epoch in range(epochs):
# Forward pass
self.forward(X)
# Backpropagation

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

self.backpropagate(X, y)
# Calculate and store loss
loss = mse_loss(y, self.output)
self.loss_history.append(loss)
# Print loss every 1000 epochs
if epoch % 1000 == 0:
print(f"Epoch {epoch}, Loss: {loss}")

def plot_loss(self):
plt.plot(self.loss_history)
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error Loss")
plt.title("Loss Over Epochs")
plt.show()

# Example usage
if __name__ == "__main__": # Corrected __name__ check
# XOR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])

# Define and train the neural network


nn = NeuralNetwork(input_size=2, hidden_size=2,
output_size=1, learning_rate=0.1)
nn.train(X, y, epochs=10000)

# Test the neural network


predictions = nn.forward(X)
print("Predictions:")
print(predictions)

# Plot the loss curve


nn.plot_loss()

Task 13 –

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Implementation of Neural Recommender Systems with Implicit


Feedback and the Triplet Loss.
Program –
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Flatten, Input,
Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt

# Triplet loss function


def triplet_loss(_, y_pred, margin=1.0): # Removed unused `y_true`
anchor, positive, negative = tf.split(y_pred, 3, axis=1)
positive_dist = tf.reduce_sum(tf.square(anchor - positive),
axis=1)
negative_dist = tf.reduce_sum(tf.square(anchor - negative),
axis=1)
return tf.reduce_mean(tf.maximum(positive_dist - negative_dist +
margin, 0.0))

# Neural Recommender System with Implicit Feedback


class NeuralRecommenderWithTripletLoss:
def __init__(self, num_users, num_items, embedding_dim=32,
margin=1.0):
self.num_users = num_users
self.num_items = num_items
self.embedding_dim = embedding_dim
self.margin = margin
self.model = self.build_model()

def build_model(self):
# Input layers for user, positive item, and negative item
user_input = Input(shape=(1,), name="user_input")
pos_item_input = Input(shape=(1,),
name="positive_item_input")

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

neg_item_input = Input(shape=(1,),
name="negative_item_input")

# Embedding layers for user and items


user_embedding = Embedding(self.num_users,
self.embedding_dim, name="user_embedding")
item_embedding = Embedding(self.num_items,
self.embedding_dim, name="item_embedding")

# User embedding
user_embedded = Flatten()(user_embedding(user_input))

# Positive and negative item embeddings


pos_item_embedded = Flatten()
(item_embedding(pos_item_input))
neg_item_embedded = Flatten()
(item_embedding(neg_item_input))

# Concatenate embeddings to compute triplet loss


merged_embeddings = Concatenate(axis=1)([user_embedded,
pos_item_embedded, neg_item_embedded])
# Build and compile model
model = Model(inputs=[user_input, pos_item_input,
neg_item_input], outputs=merged_embeddings)
model.compile(optimizer=Adam(learning_rate=0.001),
loss=triplet_loss)
return model

def train(self, user_ids, pos_item_ids, neg_item_ids, epochs=10,


batch_size=64):
# Train the model with the triplet inputs
history = self.model.fit(
[user_ids, pos_item_ids, neg_item_ids],
np.zeros((len(user_ids), self.embedding_dim * 3)), #
Dummy targets for triplet loss
epochs=epochs,

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

batch_size=batch_size
)
return history

# Simulate data
num_users = 1000
num_items = 1000
embedding_dim = 32

# Generate random training data


user_ids = np.random.randint(0, num_users, size=10000)
pos_item_ids = np.random.randint(0, num_items, size=10000)
neg_item_ids = np.random.randint(0, num_items, size=10000)

# Create and train the recommender system


recommender = NeuralRecommenderWithTripletLoss(num_users,
num_items, embedding_dim)
history = recommender.train(user_ids, pos_item_ids, neg_item_ids,
epochs=10, batch_size=64)

# Plot the training loss


plt.plot(history.history['loss'])
plt.xlabel('Epochs')
plt.ylabel('Triplet Loss')
plt.title('Training Loss over Epochs')
plt.show()

Task 14 –

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Write a program to print the accuracy of CNN using max pooling


technique.
Program –
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D,
Flatten, Dense
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt

# Load and preprocess the CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 # Normalize pixel
values

# Convert labels to categorical format


y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

# Build the CNN model with Max Pooling layers


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

# Train the model and capture the history


history = model.fit(x_train, y_train, epochs=10, batch_size=64,
validation_data=(x_test, y_test))

# Evaluate the model and print test accuracy


test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f"Test Accuracy: {test_accuracy:.4f}")

# Plot training & validation accuracy values


plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.show()

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 15 –
Write a program to print the accuracy of CNN using average pooling
technique.
Program –
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, AveragePooling2D,
Flatten, Dense
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt

# Load and preprocess the CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 # Normalize pixel
values

# Convert labels to categorical format


y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

# Build the CNN model with Average Pooling layers


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
AveragePooling2D(pool_size=(2, 2)),
Conv2D(64, (3, 3), activation='relu'),
AveragePooling2D(pool_size=(2, 2)),
Conv2D(128, (3, 3), activation='relu'),
AveragePooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

# Train the model and capture the history


history = model.fit(x_train, y_train, epochs=10, batch_size=64,
validation_data=(x_test, y_test))

# Evaluate the model and print test accuracy


test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f"Test Accuracy: {test_accuracy:.4f}")

# Plot training & validation accuracy values


plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.show()

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

Task 16 –
Write a program to print the accuracy of CNN using global pooling
technique.
Program –
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,
GlobalAveragePooling2D, Dense
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt

# Load and preprocess the CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 # Normalize pixel
values

# Convert labels to categorical format


y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

# Build the CNN model with Global Average Pooling


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
Conv2D(64, (3, 3), activation='relu'),
Conv2D(128, (3, 3), activation='relu'),
GlobalAveragePooling2D(), # Replaces Flatten and reduces
feature maps globally
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

2128709 SIGNATURE
BTCS - 705 Deep Learning Lab

# Train the model and capture the history


history = model.fit(x_train, y_train, epochs=10, batch_size=64,
validation_data=(x_test, y_test))

# Evaluate the model and print test accuracy


test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f"Test Accuracy: {test_accuracy:.4f}")

# Plot training & validation accuracy values


plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.show()

2128709 SIGNATURE

You might also like