0% found this document useful (0 votes)
11 views70 pages

DL Problem

The document outlines multiple statements regarding training Deep Neural Networks (DNNs) on different datasets, including MNIST and Wildfire, using various optimizers such as Adam, SGD, and RMSprop. Each statement details the model architecture, training process, evaluation metrics, and visualization of results like accuracy, ROC curves, and classification reports. The focus is on comparing performance across different configurations and datasets.

Uploaded by

marmik.shah22
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
11 views70 pages

DL Problem

The document outlines multiple statements regarding training Deep Neural Networks (DNNs) on different datasets, including MNIST and Wildfire, using various optimizers such as Adam, SGD, and RMSprop. Each statement details the model architecture, training process, evaluation metrics, and visualization of results like accuracy, ROC curves, and classification reports. The focus is on comparing performance across different configurations and datasets.

Uploaded by

marmik.shah22
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 70

Statement 1:

Train a Deep Neural Network on the MNIST dataset using the Adam optimizer with a learning
rate of 0.001, and generate a classification report and ROC AUC plot.

# Import necessary libraries


import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, roc_auc_score, roc_curve
from sklearn.preprocessing import label_binarize
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import RocCurveDisplay

# Load the MNIST dataset


(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Normalize the images to range [0, 1]


x_train = x_train / 255.0
x_test = x_test / 255.0

# Convert labels to one-hot encoding


y_train_cat = to_categorical(y_train, num_classes=10)
y_test_cat = to_categorical(y_test, num_classes=10)

# Build a simple deep neural network model


model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax') # 10 output classes
])

# Compile the model using Adam optimizer with learning rate 0.001
optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


history = model.fit(x_train, y_train_cat, epochs=5, batch_size=128, validation_split=0.2)

# Evaluate the model


test_loss, test_accuracy = model.evaluate(x_test, y_test_cat, verbose=0)
print(f"Test Accuracy: {test_accuracy:.4f}")

# Predict classes
y_pred_probs = model.predict(x_test)
y_pred_classes = np.argmax(y_pred_probs, axis=1)

# Classification report
print("\nClassification Report:")
print(classification_report(y_test, y_pred_classes))

# ROC AUC Score (Multiclass)


y_test_bin = label_binarize(y_test, classes=np.arange(10))
roc_auc = roc_auc_score(y_test_bin, y_pred_probs, average='macro', multi_class='ovr')
print(f"\nMulticlass ROC AUC Score: {roc_auc:.4f}")

# Plot ROC Curves for all classes


plt.figure(figsize=(12, 8))
for i in range(10):
fpr, tpr, _ = roc_curve(y_test_bin[:, i], y_pred_probs[:, i])
plt.plot(fpr, tpr, label=f'Class {i} (AUC = {roc_auc_score(y_test_bin[:, i], y_pred_probs[:, i]):.2f})')

plt.plot([0, 1], [0, 1], 'k--') # Diagonal line


plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves for MNIST Classification')
plt.legend(loc='lower right')
plt.grid(True)
plt.show()

Statement 2
Train a DNN using the SGD optimizer with a learning rate of 0.0001 on the MNIST dataset
and analyze the model's performance.

# Import necessary libraries


import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical

# Load the MNIST dataset


(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize the input data to the range [0, 1]
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0

# Convert labels to one-hot encoded vectors


y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

# Define the Deep Neural Network model


model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax') # Output layer for 10 classes
])

# Compile the model with SGD optimizer and low learning rate
optimizer = SGD(learning_rate=0.0001)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


history = model.fit(x_train, y_train,
epochs=20,
batch_size=128,
validation_split=0.1,
verbose=2)

# Evaluate the model on test data


test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_acc * 100:.2f}%")
print(f"Test Loss: {test_loss:.4f}")

# Plot training and validation accuracy and loss


plt.figure(figsize=(14, 5))

plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Val Accuracy')
plt.title('Accuracy over Epochs')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)

plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Loss over Epochs')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()
Statement 3
Train a Deep Neural Network on the MNIST dataset using RMSprop optimizer with a learning
rate of 0.0001, and compare results using an accuracy table and ROC curve.

# Import required libraries


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, roc_curve, auc
from sklearn.preprocessing import label_binarize
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import to_categorical

# Load MNIST dataset


(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Normalize pixel values


x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0

# One-hot encode the labels


y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)

# Build the DNN model


model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
# Compile model with RMSprop optimizer
optimizer = RMSprop(learning_rate=0.0001)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


history = model.fit(x_train, y_train_cat,
epochs=15,
batch_size=128,
validation_split=0.1,
verbose=2)

# Evaluate model on test set


test_loss, test_acc = model.evaluate(x_test, y_test_cat, verbose=0)
print(f"\nTest Accuracy: {test_acc * 100:.2f}%")
print(f"Test Loss: {test_loss:.4f}")

# Predict class probabilities


y_pred_prob = model.predict(x_test)
y_pred_classes = np.argmax(y_pred_prob, axis=1)

# ------------------ Accuracy Table (Classification Report) ------------------


report = classification_report(y_test, y_pred_classes, output_dict=True)
report_df = pd.DataFrame(report).transpose()
plt.figure(figsize=(10, 6))
sns.heatmap(report_df.iloc[:-1, :-1], annot=True, fmt=".2f", cmap="Blues")
plt.title("Classification Report (Accuracy Table)")
plt.show()

# ------------------ ROC Curve ------------------


# Binarize labels for multi-class ROC
y_test_bin = label_binarize(y_test, classes=range(10))
fpr = {}
tpr = {}
roc_auc = {}

# Calculate ROC for each class


for i in range(10):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_prob[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])

# Plot all ROC curves


plt.figure(figsize=(10, 8))
for i in range(10):
plt.plot(fpr[i], tpr[i], label=f'Class {i} (AUC = {roc_auc[i]:.2f})')
plt.plot([0, 1], [0, 1], 'k--', label='Random Guess')
plt.title('Multi-Class ROC Curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.grid(True)
plt.show()
Statement 4
Use SGD optimizer with a learning rate of 0.01 to train a DNN on the Wildfire dataset, then
evaluate precision, recall, and F1-score with supporting bar plots.

# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical

# Load the Wildfire dataset (adjust file path if needed)


df = pd.read_csv('wildfire.csv')

# Display first few rows


print("Dataset Preview:")
print(df.head())

# Assume last column is the target (binary: 0 = No fire, 1 = Fire)


X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values

# Scale the features


scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# One-hot encode the target if binary classification


y_cat = to_categorical(y)

# Split data into training and testing sets


x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)

# Build the DNN model


model = Sequential([
Dense(64, activation='relu', input_shape=(X.shape[1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # Binary classification with 2 output units
])

# Compile model with SGD optimizer


optimizer = SGD(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train the model


history = model.fit(x_train, y_train, epochs=20, batch_size=32, validation_split=0.1, verbose=2)

# Evaluate on test data


loss, accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f"\nTest Accuracy: {accuracy*100:.2f}%")
print(f"Test Loss: {loss:.4f}")

# Predict labels for test data


y_pred_probs = model.predict(x_test)
y_pred_classes = np.argmax(y_pred_probs, axis=1)
y_true = np.argmax(y_test, axis=1)

# Generate classification report


report = classification_report(y_true, y_pred_classes, output_dict=True)
report_df = pd.DataFrame(report).transpose()

# Display precision, recall, f1-score


metrics_df = report_df[['precision', 'recall', 'f1-score']].iloc[:2] # Only class 0 and 1

# Plotting
metrics_df.plot(kind='bar', figsize=(8, 6), colormap='viridis')
plt.title('Precision, Recall & F1-Score for Wildfire Detection')
plt.xlabel('Class (0 = No Fire, 1 = Fire)')
plt.ylabel('Score')
plt.ylim(0, 1)
plt.grid(True)
plt.xticks(rotation=0)
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
Statement 5
Train a DNN on the Forest Fire dataset using RMSprop optimizer with a learning rate of 0.01.
Report training and validation accuracy

# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import to_categorical

# Load the Forest Fire dataset (adjust file path if necessary)


df = pd.read_csv('forestfires.csv')

# Show dataset structure


print("Dataset Preview:")
print(df.head())

# Assuming the last column is the binary class (0: no fire, 1: fire)
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values

# Feature scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# One-hot encode target for binary classification


y_cat = to_categorical(y)

# Split into training and test sets


x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)

# Build the DNN model


model = Sequential([
Dense(64, activation='relu', input_shape=(X.shape[1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # 2 neurons for binary classification
])

# Compile with RMSprop optimizer and learning rate of 0.01


optimizer = RMSprop(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train the model and store history


history = model.fit(x_train, y_train,
epochs=20,
batch_size=32,
validation_split=0.2,
verbose=2)
# Plot training & validation accuracy
plt.figure(figsize=(8, 6))
plt.plot(history.history['accuracy'], label='Training Accuracy', marker='o')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy', marker='s')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
Statement 6
Compare DNN training using Adam and SGD optimizers (both with a learning rate of 0.001) on
the Wildfire dataset
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.utils import to_categorical

# Load Wildfire dataset


df = pd.read_csv('wildfire.csv')

# Display first few rows


print("Dataset Sample:")
print(df.head())

# Feature and label split


X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values

# Normalize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# One-hot encode target


y_cat = to_categorical(y)

# Train-test split
x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)
# Define a function to build the model
def build_model(optimizer):
model = Sequential([
Dense(64, activation='relu', input_shape=(X.shape[1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # Binary classification
])
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model

# Create and train model with SGD optimizer


sgd_model = build_model(SGD(learning_rate=0.001))
history_sgd = sgd_model.fit(x_train, y_train, epochs=20, batch_size=32,
validation_split=0.2, verbose=0)

# Create and train model with Adam optimizer


adam_model = build_model(Adam(learning_rate=0.001))
history_adam = adam_model.fit(x_train, y_train, epochs=20, batch_size=32,
validation_split=0.2, verbose=0)

# Plot training and validation accuracy for both


plt.figure(figsize=(10, 6))
plt.plot(history_sgd.history['val_accuracy'], label='SGD - Validation Accuracy', linestyle='--',
marker='o')
plt.plot(history_adam.history['val_accuracy'], label='Adam - Validation Accuracy', linestyle='-',
marker='s')
plt.plot(history_sgd.history['accuracy'], label='SGD - Training Accuracy', linestyle='--', alpha=0.7)
plt.plot(history_adam.history['accuracy'], label='Adam - Training Accuracy', linestyle='-', alpha=0.7)

plt.title('Comparison of DNN Training: SGD vs Adam (lr = 0.001)')


plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
Statement 7
Image Classification on MNIST Using DNN with Learning Rate Variation
● Use the MNIST dataset and build a DNN
● Train the same model using learning rates: 0.01, 0.001
● Use SGD optimizer and track accuracy for each run
● Plot loss and accuracy for comparison
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical

# Load MNIST data


(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Normalize pixel values


x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.

# One-hot encode labels


y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)

# Define function to build model


def build_model():
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
return model

# Learning rates to compare


learning_rates = [0.01, 0.001]

# Dictionaries to store histories


histories = {}

for lr in learning_rates:
print(f"\nTraining model with learning rate = {lr}")
model = build_model()
optimizer = SGD(learning_rate=lr)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train_cat, epochs=15, batch_size=128, validation_split=0.1, verbose=2)
histories[lr] = history

# Plot comparison of loss and accuracy


plt.figure(figsize=(14, 6))

# Plot Loss
plt.subplot(1, 2, 1)
for lr, history in histories.items():
plt.plot(history.history['loss'], label=f'Train Loss (lr={lr})')
plt.plot(history.history['val_loss'], linestyle='--', label=f'Val Loss (lr={lr})')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)

# Plot Accuracy
plt.subplot(1, 2, 2)
for lr, history in histories.items():
plt.plot(history.history['accuracy'], label=f'Train Acc (lr={lr})')
plt.plot(history.history['val_accuracy'], linestyle='--', label=f'Val Acc (lr={lr})')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()
Statement 8
Evaluating DNN on CIFAR-10 Using Batch Size Variation
● Load CIFAR-10 dataset
● Use a feed-forward network with BatchNormalization
● Train with batch sizes 32 and 64, keeping other parameters constant
● Use Adam optimizer and train for 10 epochs
● Compare accuracy and plot graphs
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical

# Load CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Normalize pixel values


x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.

# One-hot encode labels


y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)

# Define model builder with BatchNormalization


def build_model():
model = Sequential([
Flatten(input_shape=(32, 32, 3)),
Dense(512),
BatchNormalization(),
Activation('relu'),
Dense(256),
BatchNormalization(),
Activation('relu'),
Dense(128),
BatchNormalization(),
Activation('relu'),
Dense(10, activation='softmax')
])
return model

# Batch sizes to evaluate


batch_sizes = [32, 64]
histories = {}

for batch_size in batch_sizes:


print(f"\nTraining with batch size = {batch_size}")
model = build_model()
model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])

history = model.fit(x_train, y_train_cat,


epochs=10,
batch_size=batch_size,
validation_split=0.1,
verbose=2)

histories[batch_size] = history

# Plot accuracy comparison


plt.figure(figsize=(14, 6))

plt.subplot(1, 2, 1)
for bs, history in histories.items():
plt.plot(history.history['accuracy'], label=f'Train Acc (batch={bs})')
plt.plot(history.history['val_accuracy'], linestyle='--', label=f'Val Acc (batch={bs})')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)

# Plot loss comparison


plt.subplot(1, 2, 2)
for bs, history in histories.items():
plt.plot(history.history['loss'], label=f'Train Loss (batch={bs})')
plt.plot(history.history['val_loss'], linestyle='--', label=f'Val Loss (batch={bs})')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.show()
Statement 9
Train a DNN on the UCI dataset using batch size 32 and a learning rate of 0.0001. Evaluate
training time and accuracy

import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical

# Load UCI Wine Quality Dataset (Red Wine)


url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
data = pd.read_csv(url, sep=';')

# Features and target


X = data.drop('quality', axis=1).values
y = data['quality'].values

# Because quality values are integers from 3-8, we will treat this as a classification problem
# Convert target to categorical classes
# First encode labels to consecutive integers starting from 0
label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y)
num_classes = len(np.unique(y_encoded))
y_cat = to_categorical(y_encoded, num_classes)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)

# Feature scaling
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Build simple DNN model
def build_model():
model = Sequential([
Dense(64, input_shape=(X_train.shape[1],), activation='relu'),
Dense(32, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Parameters
batch_size = 32
learning_rate = 0.0001

# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train model with timing


start_time = time.time()
history = model.fit(X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)
end_time = time.time()

training_time = end_time - start_time

# Evaluate on test data


test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)

print(f"\nTraining time: {training_time:.2f} seconds")


print(f"Test Accuracy: {test_accuracy*100:.2f}%")

Statement 10
Preprocess the Alphabet CSV dataset using label encoding and standard scaling, then train a
simple DNN using batch size 32 and learning rate 0.0001

import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical

# Load dataset (replace 'alphabet.csv' with your actual filename/path)


# For demonstration, assuming the last column is the target (alphabet labels)
data = pd.read_csv('alphabet.csv')

# Separate features and target


X = data.iloc[:, :-1]
y = data.iloc[:, -1]

# Label encode target (alphabets to integers)


label_encoder_target = LabelEncoder()
y_encoded = label_encoder_target.fit_transform(y)

# Check for categorical features in X and label encode if any


for col in X.columns:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])

# Standard scale features


scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# One-hot encode target for classification


num_classes = len(np.unique(y_encoded))
y_cat = to_categorical(y_encoded, num_classes)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)

# Build simple DNN


def build_model():
model = Sequential([
Dense(64, input_shape=(X_train.shape[1],), activation='relu'),
Dense(32, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Parameters
batch_size = 32
learning_rate = 0.0001

# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train model
history = model.fit(X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)

# Evaluate on test set


test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_accuracy*100:.2f}%")
Statement 11
Use a batch size of 64 and learning rate of 0.001 to train a DNN on the UCI dataset. Document
training accuracy and loss.

import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt

# Load UCI Alphabet dataset (Letter Recognition)


# Replace 'alphabet.csv' with your actual path if different
data = pd.read_csv('alphabet.csv')

# Assuming last column is target (letters)


X = data.iloc[:, :-1]
y = data.iloc[:, -1]

# Label encode target (letters to integers)


label_encoder_target = LabelEncoder()
y_encoded = label_encoder_target.fit_transform(y)

# Check for categorical features in X and label encode if any (usually numeric, but just in case)
for col in X.columns:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])

# Standard scale features


scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# One-hot encode target for classification


num_classes = len(np.unique(y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)

# Build DNN model


def build_model():
model = Sequential([
Dense(128, input_shape=(X_train.shape[1],), activation='relu'),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Parameters
batch_size = 64
learning_rate = 0.001

# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train model and record history


history = model.fit(X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)

# Plot training accuracy and loss


plt.figure(figsize=(12,5))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
# Evaluate on test set
test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_accuracy*100:.2f}%")
Statement 12
Preprocess the Alphabet dataset and train a CNN with the architecture using Adam optimizer,
20 epochs, batch size 64, and learning rate 0.001.

import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt

# Load Alphabet dataset


data = pd.read_csv('alphabet.csv') # replace with your path

# Features and target


X = data.iloc[:, :-1]
y = data.iloc[:, -1]

# Label encode target


le_target = LabelEncoder()
y_encoded = le_target.fit_transform(y)

# Check for categorical features in X and encode if any (usually numeric)


for col in X.columns:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])

# Standard scale features


scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Number of samples and features


n_samples, n_features = X_scaled.shape

# Reshape features to 2D for CNN input


# For example, reshape to (samples, 4, 4, 1) if features = 16
# If features != perfect square, pad with zeros
import math

def reshape_for_cnn(X):
n = X.shape[1]
sq = int(math.ceil(np.sqrt(n))) # smallest square side >= n
padded_size = sq*sq

# Pad zeros if needed


if padded_size > n:
padding = np.zeros((X.shape[0], padded_size - n))
X_padded = np.hstack((X, padding))
else:
X_padded = X

# Reshape to (samples, sq, sq, 1)


return X_padded.reshape(-1, sq, sq, 1)

X_cnn = reshape_for_cnn(X_scaled)

# One-hot encode target


num_classes = len(np.unique(y_encoded))
y_cat = to_categorical(y_encoded, num_classes)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_cnn, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)

# Build CNN model


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=X_train.shape[1:]),
MaxPooling2D((2,2)),
Dropout(0.25),

Conv2D(64, (3,3), activation='relu'),


MaxPooling2D((2,2)),
Dropout(0.25),

Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])

# Compile
learning_rate = 0.001
optimizer = Adam(learning_rate=learning_rate)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train
history = model.fit(X_train, y_train, epochs=20, batch_size=64, validation_split=0.1, verbose=2)

# Plot training accuracy and loss


import matplotlib.pyplot as plt

plt.figure(figsize=(12,5))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()

# Evaluate on test set


test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_acc*100:.2f}%")
Statement 13
Compare the performance of a CNN and a DNN on the CIFAR-10 dataset. Highlight differences
in accuracy and training time.

import time
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import History

# Load CIFAR-10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Normalize data
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0

# One-hot encode labels


num_classes = 10
y_train_cat = to_categorical(y_train, num_classes)
y_test_cat = to_categorical(y_test, num_classes)

# 1) Define DNN model (simple feedforward)


def build_dnn():
model = Sequential([
Flatten(input_shape=X_train.shape[1:]),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# 2) Define CNN model


def build_cnn():
model = Sequential([
Conv2D(32, (3,3), activation='relu', padding='same', input_shape=X_train.shape[1:]),
MaxPooling2D((2,2)),
Conv2D(64, (3,3), activation='relu', padding='same'),
MaxPooling2D((2,2)),
Conv2D(128, (3,3), activation='relu', padding='same'),
MaxPooling2D((2,2)),
Flatten(),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Training parameters
batch_size = 64
epochs = 15
learning_rate = 0.001
optimizer = Adam(learning_rate=learning_rate)

# Train and time DNN


dnn = build_dnn()
dnn.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
start_time = time.time()
history_dnn = dnn.fit(X_train, y_train_cat, epochs=epochs, batch_size=batch_size,
validation_split=0.1, verbose=2)
dnn_time = time.time() - start_time

# Evaluate DNN
dnn_loss, dnn_acc = dnn.evaluate(X_test, y_test_cat, verbose=0)

# Train and time CNN


cnn = build_cnn()
cnn.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
start_time = time.time()
history_cnn = cnn.fit(X_train, y_train_cat, epochs=epochs, batch_size=batch_size,
validation_split=0.1, verbose=2)
cnn_time = time.time() - start_time

# Evaluate CNN
cnn_loss, cnn_acc = cnn.evaluate(X_test, y_test_cat, verbose=0)

# Print results
print(f"DNN Test Accuracy: {dnn_acc*100:.2f}% | Training time: {dnn_time:.2f} seconds")
print(f"CNN Test Accuracy: {cnn_acc*100:.2f}% | Training time: {cnn_time:.2f} seconds")

# Plot accuracy comparison


plt.figure(figsize=(10,5))
plt.plot(history_dnn.history['val_accuracy'], label='DNN Validation Accuracy')
plt.plot(history_cnn.history['val_accuracy'], label='CNN Validation Accuracy')
plt.title('Validation Accuracy: DNN vs CNN on CIFAR-10')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
Statement 14
Implement a Deep Neural Network (DNN) on the MNIST dataset using the Adam optimizer with
a learning rate of 0.001 and plot training accuracy and loss.

import matplotlib.pyplot as plt


from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical

# Load MNIST data


(X_train, y_train), (X_test, y_test) = mnist.load_data()

# Normalize
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0

# One-hot encode labels


num_classes = 10
y_train_cat = to_categorical(y_train, num_classes)
y_test_cat = to_categorical(y_test, num_classes)

# Build DNN model


model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(num_classes, activation='softmax')
])

# Compile with Adam optimizer


optimizer = Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train model
history = model.fit(X_train, y_train_cat, epochs=20, batch_size=64, validation_split=0.1, verbose=2)

# Plot training accuracy and loss


plt.figure(figsize=(12, 5))

plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 15
Implement a DNN using RMSprop with learning rates 0.01 and 0.0001 on the Wildfire dataset.
Compare training and validation performance.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import to_categorical

# Load Wildfire dataset (replace 'wildfire.csv' with actual path)


data = pd.read_csv('wildfire.csv')

# Example preprocessing - adjust based on your dataset columns:


# Assume last column is the target variable
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

# Encode target if categorical


if y.dtype == object or len(np.unique(y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)

# If classification, convert to categorical


num_classes = len(np.unique(y))
y_cat = to_categorical(y, num_classes)

# Split data
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)

# Standard scale features


scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)

# Define DNN model builder function


def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Training params
batch_size = 32
epochs = 30
# Train with RMSprop lr=0.01
model_high_lr = build_model(X_train.shape[1], num_classes)
model_high_lr.compile(optimizer=RMSprop(learning_rate=0.01),
loss='categorical_crossentropy', metrics=['accuracy'])

history_high_lr = model_high_lr.fit(X_train, y_train,


validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Train with RMSprop lr=0.0001


model_low_lr = build_model(X_train.shape[1], num_classes)
model_low_lr.compile(optimizer=RMSprop(learning_rate=0.0001),
loss='categorical_crossentropy', metrics=['accuracy'])

history_low_lr = model_low_lr.fit(X_train, y_train,


validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Plot training and validation accuracy and loss for both learning rates
plt.figure(figsize=(14, 6))

plt.subplot(1, 2, 1)
plt.plot(history_high_lr.history['accuracy'], label='Train Acc (lr=0.01)')
plt.plot(history_high_lr.history['val_accuracy'], label='Val Acc (lr=0.01)')
plt.plot(history_low_lr.history['accuracy'], label='Train Acc (lr=0.0001)')
plt.plot(history_low_lr.history['val_accuracy'], label='Val Acc (lr=0.0001)')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history_high_lr.history['loss'], label='Train Loss (lr=0.01)')
plt.plot(history_high_lr.history['val_loss'], label='Val Loss (lr=0.01)')
plt.plot(history_low_lr.history['loss'], label='Train Loss (lr=0.0001)')
plt.plot(history_low_lr.history['val_loss'], label='Val Loss (lr=0.0001)')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 16
Multiclass classification using Deep Neural Networks: Example: Use the OCR letter
recognition dataset/Alphabet.csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import classification_report
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical

# Load dataset (adjust path as needed)


data = pd.read_csv('Alphabet.csv')

# Inspect columns, usually first column is label, rest are features


print(data.head())

# Separate features and labels


X = data.iloc[:, 1:].values # all columns except first are features
y = data.iloc[:, 0].values # first column is the label (letters)

# Encode labels (letters) to integers


le = LabelEncoder()
y_enc = le.fit_transform(y)

# One-hot encode output labels for multiclass classification


num_classes = len(np.unique(y_enc))
y_cat = to_categorical(y_enc, num_classes)

# Train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y_cat, test_size=0.2, random_state=42)

# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Build DNN model


model = Sequential([
Dense(128, activation='relu', input_shape=(X_train.shape[1],)),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])

# Compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Train model
history = model.fit(X_train, y_train, epochs=30, batch_size=64, validation_split=0.1, verbose=2)

# Evaluate on test data


loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f"Test Accuracy: {accuracy*100:.2f}%")

# Predict classes for test set


y_pred_prob = model.predict(X_test)
y_pred = np.argmax(y_pred_prob, axis=1)
y_true = np.argmax(y_test, axis=1)

# Classification report
print("\nClassification Report:\n")
print(classification_report(y_true, y_pred, target_names=le.classes_))

# Plot accuracy and loss


plt.figure(figsize=(12, 5))

plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
Statement 17
Implement the training of a DNN using Adam and SGD optimizers with a learning rate of
0.001 on the Wildfire dataset. Provide comparative plots.

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.utils import to_categorical

# Load Wildfire dataset - replace with your actual file path


data = pd.read_csv('wildfire.csv')

# Example preprocessing - adjust based on your dataset structure


X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

# Encode target if categorical


if y.dtype == object or len(np.unique(y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)

num_classes = len(np.unique(y))
y_cat = to_categorical(y, num_classes)

# Split data
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)

# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)

# Build DNN model function


def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
batch_size = 32
epochs = 30
learning_rate = 0.001

# Train with Adam optimizer


model_adam = build_model(X_train.shape[1], num_classes)
model_adam.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_adam = model_adam.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Train with SGD optimizer


model_sgd = build_model(X_train.shape[1], num_classes)
model_sgd.compile(optimizer=SGD(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_sgd = model_sgd.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Plot comparison graphs


plt.figure(figsize=(14, 6))

# Accuracy plot
plt.subplot(1, 2, 1)
plt.plot(history_adam.history['accuracy'], label='Adam Train Acc')
plt.plot(history_adam.history['val_accuracy'], label='Adam Val Acc')
plt.plot(history_sgd.history['accuracy'], label='SGD Train Acc')
plt.plot(history_sgd.history['val_accuracy'], label='SGD Val Acc')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

# Loss plot
plt.subplot(1, 2, 2)
plt.plot(history_adam.history['loss'], label='Adam Train Loss')
plt.plot(history_adam.history['val_loss'], label='Adam Val Loss')
plt.plot(history_sgd.history['loss'], label='SGD Train Loss')
plt.plot(history_sgd.history['val_loss'], label='SGD Val Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 18
Implement a DNN using batch sizes 32 and 64 with a fixed learning rate of 0.001 on the UCI
dataset. Compare model loss and performance.

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical

# Load UCI dataset - replace path accordingly


data = pd.read_csv('uci_dataset.csv')

# Example preprocessing (adjust based on dataset specifics)


X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

# Encode labels if categorical


if y.dtype == object or len(np.unique(y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)

num_classes = len(np.unique(y))
y_cat = to_categorical(y, num_classes)

# Train/test split
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)

# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)

# Build model function


def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
learning_rate = 0.001
epochs = 30

# Train with batch size 32


model_32 = build_model(X_train.shape[1], num_classes)
model_32.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_32 = model_32.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=32, verbose=2)

# Train with batch size 64


model_64 = build_model(X_train.shape[1], num_classes)
model_64.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_64 = model_64.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=64, verbose=2)

# Plot Loss comparison


plt.figure(figsize=(14, 6))

plt.subplot(1, 2, 1)
plt.plot(history_32.history['loss'], label='Batch Size 32 - Train Loss')
plt.plot(history_32.history['val_loss'], label='Batch Size 32 - Val Loss')
plt.plot(history_64.history['loss'], label='Batch Size 64 - Train Loss')
plt.plot(history_64.history['val_loss'], label='Batch Size 64 - Val Loss')
plt.title('Loss Comparison')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

# Plot Accuracy comparison


plt.subplot(1, 2, 2)
plt.plot(history_32.history['accuracy'], label='Batch Size 32 - Train Acc')
plt.plot(history_32.history['val_accuracy'], label='Batch Size 32 - Val Acc')
plt.plot(history_64.history['accuracy'], label='Batch Size 64 - Train Acc')
plt.plot(history_64.history['val_accuracy'], label='Batch Size 64 - Val Acc')
plt.title('Accuracy Comparison')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.show()
Statement 19
Preprocess the Alphabet dataset and train both a DNN and a CNN. Use Adam optimizer with a
batch size of 64. Compare accuracy across 20 epochs.

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical

# Load Alphabet dataset (adjust path as needed)


data = pd.read_csv('Alphabet.csv')

# Separate features and target (assuming last column is label)


X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

# Encode labels
le = LabelEncoder()
y_encoded = le.fit_transform(y)
num_classes = len(np.unique(y_encoded))
y_cat = to_categorical(y_encoded, num_classes)

# Train-test split
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)

# Standardize features for DNN and CNN


scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_val_scaled = scaler.transform(X_val)

# For CNN: reshape input into image format


# Alphabet dataset is usually 16x16 images (256 features), reshape accordingly
img_dim = 16 # update if different
X_train_cnn = X_train_scaled.reshape(-1, img_dim, img_dim, 1)
X_val_cnn = X_val_scaled.reshape(-1, img_dim, img_dim, 1)

# Parameters
batch_size = 64
epochs = 20
learning_rate = 0.001

# Build DNN model


def build_dnn(input_dim, num_classes):
model = Sequential([
Dense(256, activation='relu', input_dim=input_dim),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model

# Build CNN model


def build_cnn(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model

# Train DNN
dnn = build_dnn(X_train_scaled.shape[1], num_classes)
dnn.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_dnn = dnn.fit(X_train_scaled, y_train,
validation_data=(X_val_scaled, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Train CNN
cnn = build_cnn(X_train_cnn.shape[1:], num_classes)
cnn.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_cnn = cnn.fit(X_train_cnn, y_train,
validation_data=(X_val_cnn, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)

# Plot Accuracy Comparison


plt.figure(figsize=(12,5))
plt.plot(history_dnn.history['val_accuracy'], label='DNN Validation Accuracy')
plt.plot(history_cnn.history['val_accuracy'], label='CNN Validation Accuracy')
plt.title('Validation Accuracy Comparison')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
Statement 20
Classify Apple leaf images using a CNN without data augmentation for 10 epochs.

dataset/
train/
class1/
class2/
...
validation/
class1/
class2/
...

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt

# Paths to dataset directories (update these paths)


train_dir = 'dataset/train'
val_dir = 'dataset/validation'

# Image parameters
img_height, img_width = 150, 150
batch_size = 32
epochs = 10
learning_rate = 0.001

# Use ImageDataGenerator for loading images WITHOUT augmentation


train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical'
)

val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical'
)

num_classes = len(train_generator.class_indices)

# Build CNN model


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)),
MaxPooling2D((2, 2)),

Conv2D(64, (3, 3), activation='relu'),


MaxPooling2D((2, 2)),

Conv2D(128, (3, 3), activation='relu'),


MaxPooling2D((2, 2)),

Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])

model.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


history = model.fit(
train_generator,
validation_data=val_generator,
epochs=epochs,
verbose=2
)

# Plot training & validation accuracy and loss


plt.figure(figsize=(12,5))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 21
Implement a CNN on Tomato dataset using batch sizes of 32 and 64 separately. Keep the learning

rate fixed at 0.0001 and compare results.

tomato_dataset/
train/
class1/
class2/
validation/
class1/
class2/

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt

# Update these paths to your dataset location


train_dir = 'tomato_dataset/train'
val_dir = 'tomato_dataset/validation'

img_height, img_width = 150, 150


learning_rate = 0.0001
epochs = 10

def create_data_generators(batch_size):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)

val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
return train_generator, val_generator

def build_cnn_model(input_shape, num_classes):


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
MaxPooling2D(2,2),

Conv2D(64, (3,3), activation='relu'),


MaxPooling2D(2,2),

Conv2D(128, (3,3), activation='relu'),


MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model

# Train with batch size 32


batch_size_32 = 32
train_gen_32, val_gen_32 = create_data_generators(batch_size_32)
num_classes = len(train_gen_32.class_indices)
input_shape = (img_height, img_width, 3)

model_32 = build_cnn_model(input_shape, num_classes)


model_32.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])

history_32 = model_32.fit(
train_gen_32,
validation_data=val_gen_32,
epochs=epochs,
verbose=2
)

# Train with batch size 64


batch_size_64 = 64
train_gen_64, val_gen_64 = create_data_generators(batch_size_64)

model_64 = build_cnn_model(input_shape, num_classes)


model_64.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
history_64 = model_64.fit(
train_gen_64,
validation_data=val_gen_64,
epochs=epochs,
verbose=2
)

# Plot accuracy and loss comparison


plt.figure(figsize=(14,6))

plt.subplot(1,2,1)
plt.plot(history_32.history['val_accuracy'], label='Batch size 32')
plt.plot(history_64.history['val_accuracy'], label='Batch size 64')
plt.title('Validation Accuracy Comparison')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history_32.history['val_loss'], label='Batch size 32')
plt.plot(history_64.history['val_loss'], label='Batch size 64')
plt.title('Validation Loss Comparison')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()

Statement 22
Implement CNNs using Adam and RMSprop optimizers with a learning rate of 0.001 on Peach
images. Record validation loss and accuracy.
peach_dataset/
train/
ripe/
img_001.jpg
img_002.jpg
...
unripe/
img_001.jpg
img_002.jpg
...
validation/
ripe/
img_101.jpg
img_102.jpg
...
unripe/
img_101.jpg
img_102.jpg
...

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.optimizers import Adam, RMSprop
import matplotlib.pyplot as plt

# Update these paths to your Peach dataset locations


train_dir = 'peach_dataset/train'
val_dir = 'peach_dataset/validation'

img_height, img_width = 150, 150


learning_rate = 0.001
epochs = 10
batch_size = 32

def create_data_generators(batch_size):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)

val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
return train_generator, val_generator

def build_cnn_model(input_shape, num_classes):


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
MaxPooling2D(2,2),

Conv2D(64, (3,3), activation='relu'),


MaxPooling2D(2,2),

Conv2D(128, (3,3), activation='relu'),


MaxPooling2D(2,2),

Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model

# Prepare data generators


train_gen, val_gen = create_data_generators(batch_size)
num_classes = len(train_gen.class_indices)
input_shape = (img_height, img_width, 3)

# Model with Adam optimizer


model_adam = build_cnn_model(input_shape, num_classes)
model_adam.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])

history_adam = model_adam.fit(
train_gen,
validation_data=val_gen,
epochs=epochs,
verbose=2
)

# Model with RMSprop optimizer


model_rmsprop = build_cnn_model(input_shape, num_classes)
model_rmsprop.compile(optimizer=RMSprop(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])

history_rmsprop = model_rmsprop.fit(
train_gen,
validation_data=val_gen,
epochs=epochs,
verbose=2
)

# Plot validation accuracy and loss for comparison


plt.figure(figsize=(14,6))

plt.subplot(1,2,1)
plt.plot(history_adam.history['val_accuracy'], label='Adam')
plt.plot(history_rmsprop.history['val_accuracy'], label='RMSprop')
plt.title('Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history_adam.history['val_loss'], label='Adam')
plt.plot(history_rmsprop.history['val_loss'], label='RMSprop')
plt.title('Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()

Statement 23
Build and train a CNN model for Apple image classification that includes Dropout layers. Train
using 15 epochs and evaluate performance.
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import matplotlib.pyplot as plt

# Set paths to your Apple image dataset folders


train_dir = 'apple_dataset/train'
val_dir = 'apple_dataset/validation'

img_height, img_width = 150, 150


batch_size = 32
epochs = 15

# Data generators with rescaling


train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)

val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)

num_classes = len(train_generator.class_indices)

# Build CNN model with Dropout


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)),
MaxPooling2D(2,2),

Conv2D(64, (3,3), activation='relu'),


MaxPooling2D(2,2),

Conv2D(128, (3,3), activation='relu'),


MaxPooling2D(2,2),

Flatten(),
Dense(128, activation='relu'),
Dropout(0.5), # Dropout layer to reduce overfitting
Dense(num_classes, activation='softmax')
])

model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


history = model.fit(
train_generator,
validation_data=val_generator,
epochs=epochs,
verbose=2
)

# Evaluate performance on validation set


val_loss, val_accuracy = model.evaluate(val_generator)
print(f'Validation Loss: {val_loss:.4f}')
print(f'Validation Accuracy: {val_accuracy:.4f}')

# Plot training & validation accuracy and loss


plt.figure(figsize=(12,5))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Val Accuracy')
plt.title('Training & Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Training & Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 24
Split Grape image data into 70% train, 15% validation, and 15% test. Train a CNN for 10
epochs using a fixed learning rate of 0.001.

path_to_dataset/
class1/
img1.jpg
img2.jpg
class2/
img3.jpg
...

import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import os

# Set dataset path


dataset_dir = 'path_to_dataset' # e.g. 'Grape_images/'

# Parameters
img_height, img_width = 150, 150
batch_size = 32
learning_rate = 0.001
epochs = 15
validation_split = 0.15 # for validation set
test_split = 0.15 # test set will be created separately below

# Load full dataset with validation split using image_dataset_from_directory


full_dataset = tf.keras.utils.image_dataset_from_directory(
dataset_dir,
shuffle=True,
image_size=(img_height, img_width),
batch_size=batch_size,
validation_split=validation_split + test_split,
subset="training",
seed=123
)

val_test_dataset = tf.keras.utils.image_dataset_from_directory(
dataset_dir,
shuffle=True,
image_size=(img_height, img_width),
batch_size=batch_size,
validation_split=validation_split + test_split,
subset="validation",
seed=123
)
# Split val_test_dataset into validation and test sets manually
val_batches = int(len(val_test_dataset)*validation_split/(validation_split + test_split))
val_dataset = val_test_dataset.take(val_batches)
test_dataset = val_test_dataset.skip(val_batches)

# Normalize pixel values to [0,1]


normalization_layer = layers.Rescaling(1./255)

full_dataset = full_dataset.map(lambda x, y: (normalization_layer(x), y))


val_dataset = val_dataset.map(lambda x, y: (normalization_layer(x), y))
test_dataset = test_dataset.map(lambda x, y: (normalization_layer(x), y))

# Build CNN Model with Dropout


num_classes = len(full_dataset.class_names)

model = models.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)),
layers.MaxPooling2D(2,2),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(128, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes, activation='softmax')
])

# Compile model
model.compile(
optimizer=Adam(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# Train model
history = model.fit(
full_dataset,
validation_data=val_dataset,
epochs=epochs
)

# Evaluate on test data


test_loss, test_acc = model.evaluate(test_dataset)
print(f'Test accuracy: {test_acc:.4f}')
print(f'Test loss: {test_loss:.4f}')
# Plot training & validation accuracy and loss
plt.figure(figsize=(12,4))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Acc')
plt.plot(history.history['val_accuracy'], label='Val Acc')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 25
Use LeNet architecture to classify the Cats and Dogs dataset, and plot training loss and
accuracy curves.

import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import os

# Set dataset path (assumes directory structure like:


# cats_and_dogs/
# cats/
# cat1.jpg
# dogs/
# dog1.jpg
dataset_dir = 'path_to_cats_and_dogs' # Change this path accordingly

# Parameters
img_height, img_width = 32, 32 # LeNet input size is 32x32 grayscale; we keep RGB and resize to
32x32
batch_size = 32
epochs = 15
learning_rate = 0.001

# Load dataset with 80/20 train-validation split


train_ds = tf.keras.utils.image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

val_ds = tf.keras.utils.image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

# Normalize pixel values to [0,1]


normalization_layer = layers.Rescaling(1./255)

train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))


val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))

# Define LeNet architecture


def LeNet():
model = models.Sequential()
model.add(layers.Conv2D(6, kernel_size=(5,5), activation='tanh', input_shape=(img_height,
img_width, 3), padding='same'))
model.add(layers.AveragePooling2D())
model.add(layers.Conv2D(16, kernel_size=(5,5), activation='tanh'))
model.add(layers.AveragePooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(120, activation='tanh'))
model.add(layers.Dense(84, activation='tanh'))
model.add(layers.Dense(2, activation='softmax')) # 2 classes: cat, dog
return model

model = LeNet()

# Compile model
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# Train model
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)

# Plot training & validation accuracy and loss


plt.figure(figsize=(12,5))

plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Acc')
plt.plot(history.history['val_accuracy'], label='Val Acc')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 26
Use MobileNet architecture perform transfer learning on the Cats and Dogs dataset, and
evaluate model performance using a classification report.

import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.preprocessing import image_dataset_from_directory
from sklearn.metrics import classification_report
import numpy as np
import os

# Dataset directory structure (example):


# cats_and_dogs/
# cats/
# cat1.jpg
# ...
# dogs/
# dog1.jpg
# ...

dataset_dir = 'path_to_cats_and_dogs' # Change to your actual path

# Parameters
img_height, img_width = 224, 224 # MobileNet default input size
batch_size = 32
epochs = 10
learning_rate = 0.0001

# Load datasets with 80/20 split


train_ds = image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

val_ds = image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)

# Preprocess input for MobileNet


train_ds = train_ds.map(lambda x, y: (preprocess_input(x), y))
val_ds = val_ds.map(lambda x, y: (preprocess_input(x), y))

# Cache and prefetch for performance optimization


AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

# Load MobileNet base model with pretrained weights, exclude top layers
base_model = MobileNet(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')

base_model.trainable = False # Freeze base model layers initially

# Add classification head


model = models.Sequential([
base_model,
layers.GlobalAveragePooling2D(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.3),
layers.Dense(2, activation='softmax') # 2 classes: cats and dogs
])

# Compile model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# Train the top layers first


history = model.fit(train_ds, validation_data=val_ds, epochs=epochs)

# Optional: Fine-tune some base model layers


base_model.trainable = True

# Fine-tune from this layer onwards


fine_tune_at = 100
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False

# Recompile with lower learning rate for fine-tuning


model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate/10),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# Continue training
fine_tune_epochs = 5
total_epochs = epochs + fine_tune_epochs

history_fine = model.fit(train_ds,
validation_data=val_ds,
epochs=total_epochs,
initial_epoch=history.epoch[-1])

# Evaluate on validation set and print classification report


# Extract true labels and predictions
y_true = []
y_pred = []

for images, labels in val_ds:


preds = model.predict(images)
y_true.extend(labels.numpy())
y_pred.extend(np.argmax(preds, axis=1))
print("Classification Report on Validation Set:")
print(classification_report(y_true, y_pred, target_names=train_ds.class_names))
Statement 27
Build both CNN and DNN models for the CIFAR-10 dataset, compare their accuracy and loss

import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt

# Load CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

# Normalize pixel values to [0,1]


x_train, x_test = x_train / 255.0, x_test / 255.0

# Flatten labels
y_train = y_train.flatten()
y_test = y_test.flatten()

num_classes = 10

# Build CNN model


def build_cnn():
model = models.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
layers.MaxPooling2D(2,2),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(128, (3,3), activation='relu'),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes, activation='softmax')
])
return model

# Build DNN model


def build_dnn():
model = models.Sequential([
layers.Flatten(input_shape=(32,32,3)),
layers.Dense(512, activation='relu'),
layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes, activation='softmax')
])
return model

# Compile and train model helper


def compile_and_train(model, epochs=15):
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
validation_data=(x_test, y_test),
epochs=epochs,
batch_size=64,
verbose=2)
return history

# Train CNN
cnn_model = build_cnn()
print("Training CNN model...")
cnn_history = compile_and_train(cnn_model)

# Train DNN
dnn_model = build_dnn()
print("\nTraining DNN model...")
dnn_history = compile_and_train(dnn_model)

# Plot accuracy and loss comparison


plt.figure(figsize=(12,5))

# Accuracy plot
plt.subplot(1,2,1)
plt.plot(cnn_history.history['accuracy'], label='CNN Train Acc')
plt.plot(cnn_history.history['val_accuracy'], label='CNN Val Acc')
plt.plot(dnn_history.history['accuracy'], label='DNN Train Acc')
plt.plot(dnn_history.history['val_accuracy'], label='DNN Val Acc')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

# Loss plot
plt.subplot(1,2,2)
plt.plot(cnn_history.history['loss'], label='CNN Train Loss')
plt.plot(cnn_history.history['val_loss'], label='CNN Val Loss')
plt.plot(dnn_history.history['loss'], label='DNN Train Loss')
plt.plot(dnn_history.history['val_loss'], label='DNN Val Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()
Statement 28
Implement an RNN on the GOOGL.csv dataset and compare its training time and loss curve
with an LSTM model.

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, LSTM, Dense
from tensorflow.keras.optimizers import Adam

# Load the GOOGL.csv dataset (make sure it's in the working directory)
data = pd.read_csv('GOOGL.csv')

# Assuming dataset has 'Date' and 'Close' columns; focus on 'Close' prices
close_prices = data['Close'].values.reshape(-1, 1)

# Normalize prices between 0 and 1


scaler = MinMaxScaler()
scaled_close = scaler.fit_transform(close_prices)

# Prepare time series sequences


def create_sequences(data, seq_length=20):
X, y = [], []
for i in range(len(data) - seq_length):
X.append(data[i:i+seq_length])
y.append(data[i+seq_length])
return np.array(X), np.array(y)

SEQ_LENGTH = 20
X, y = create_sequences(scaled_close, SEQ_LENGTH)

# Split into train and test sets (e.g., 80%-20%)


split = int(0.8 * len(X))
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]

# Build Simple RNN model


def build_rnn():
model = Sequential([
SimpleRNN(50, activation='tanh', input_shape=(SEQ_LENGTH, 1)),
Dense(1)
])
model.compile(optimizer=Adam(), loss='mse')
return model

# Build LSTM model


def build_lstm():
model = Sequential([
LSTM(50, activation='tanh', input_shape=(SEQ_LENGTH, 1)),
Dense(1)
])
model.compile(optimizer=Adam(), loss='mse')
return model

# Train and record time + loss history


def train_model(model, epochs=30, batch_size=32):
start_time = time.time()
history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(X_test, y_test), verbose=2)
end_time = time.time()
training_time = end_time - start_time
return history, training_time

# Prepare data shape (samples, seq_length, features)


X_train = X_train.reshape((X_train.shape[0], SEQ_LENGTH, 1))
X_test = X_test.reshape((X_test.shape[0], SEQ_LENGTH, 1))

# Train RNN
print("Training Simple RNN model...")
rnn_model = build_rnn()
rnn_history, rnn_time = train_model(rnn_model)

# Train LSTM
print("\nTraining LSTM model...")
lstm_model = build_lstm()
lstm_history, lstm_time = train_model(lstm_model)

# Plot loss curves


plt.figure(figsize=(10,5))
plt.plot(rnn_history.history['loss'], label='RNN Train Loss')
plt.plot(rnn_history.history['val_loss'], label='RNN Val Loss')
plt.plot(lstm_history.history['loss'], label='LSTM Train Loss')
plt.plot(lstm_history.history['val_loss'], label='LSTM Val Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss (MSE)')
plt.legend()
plt.show()

# Print training time comparison


print(f"Simple RNN Training Time: {rnn_time:.2f} seconds")
print(f"LSTM Training Time: {lstm_time:.2f} seconds")
Statement 29
Use transfer learning with VGG16 on the Cats and Dogs dataset, freezing the first 4 layers, and
train the classifier and evaluate model performance using a classification report.

cats_and_dogs/
├── train/
│ ├── cats/
│ └── dogs/
├── validation/
│ ├── cats/
│ └── dogs/

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
import os

# Define paths (adjust to your dataset folder structure)


base_dir = 'cats_and_dogs' # folder containing 'train' and 'validation' subfolders
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'validation')

# Parameters
IMG_SIZE = (224, 224)
BATCH_SIZE = 32
EPOCHS = 10
LEARNING_RATE = 0.0001

# Data Generators
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=15,
zoom_range=0.1
)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary'
)

val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary',
shuffle=False
)

# Load VGG16 base model without top layers


base_model = VGG16(weights='imagenet', include_top=False, input_shape=(*IMG_SIZE, 3))

# Freeze first 4 layers


for layer in base_model.layers[:4]:
layer.trainable = False
for layer in base_model.layers[4:]:
layer.trainable = True

# Add custom classification head


x = base_model.output
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(1, activation='sigmoid')(x) # Binary classification

model = Model(inputs=base_model.input, outputs=output)

# Compile model
model.compile(optimizer=Adam(learning_rate=LEARNING_RATE),
loss='binary_crossentropy',
metrics=['accuracy'])

# Train model
history = model.fit(
train_generator,
epochs=EPOCHS,
validation_data=val_generator
)
# Predict on validation data
val_generator.reset()
preds = model.predict(val_generator)
predicted_classes = (preds > 0.5).astype(int).reshape(-1)

# True classes
true_classes = val_generator.classes

# Classification report
target_names = list(train_generator.class_indices.keys())
print(classification_report(true_classes, predicted_classes, target_names=target_names))

# Optionally print confusion matrix


print("Confusion Matrix:")
print(confusion_matrix(true_classes, predicted_classes))
Statement 30
Load and visualize sample images from the Potato dataset,train CNN for 5 epochs

potato_dataset/
├── class1/
├── class2/
└── ...

import matplotlib.pyplot as plt


import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.models import Sequential
import os
import numpy as np

# Define dataset path - update this to your local Potato dataset directory
dataset_dir = 'potato_dataset' # should contain subfolders for each class

# Parameters
IMG_SIZE = (128, 128)
BATCH_SIZE = 32
EPOCHS = 5

# Data generators with simple augmentation for training, rescale only for validation
train_datagen = ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
train_generator = train_datagen.flow_from_directory(
dataset_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='training',
shuffle=True
)

val_generator = train_datagen.flow_from_directory(
dataset_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='validation',
shuffle=False
)

# Visualize some sample images


def plot_sample_images(generator):
images, labels = next(generator) # batch of images and labels
class_indices = {v: k for k, v in generator.class_indices.items()}
plt.figure(figsize=(10, 10))
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(images[i])
label_index = np.argmax(labels[i])
plt.title(class_indices[label_index])
plt.axis('off')
plt.show()

plot_sample_images(train_generator)

# Build simple CNN model


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(*IMG_SIZE, 3)),
MaxPooling2D(2, 2),

Conv2D(64, (3,3), activation='relu'),


MaxPooling2D(2, 2),

Conv2D(128, (3,3), activation='relu'),


MaxPooling2D(2, 2),

Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(train_generator.num_classes, activation='softmax')
])

model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train model
history = model.fit(
train_generator,
epochs=EPOCHS,
validation_data=val_generator
)
Statement 31
Implement LSTM models on GOOGL.csv with learning rates 0.001 and 0.0001 for 20 and 50
epochs. Compare accuracy and convergence.

your_project_folder/
├── GOOGL.csv
├── lstm_googl.py # (your code file, if saving separately)

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

# Load dataset
df = pd.read_csv('GOOGL.csv') # Ensure file is in your working directory
df = df[['Date', 'Close']]
df['Date'] = pd.to_datetime(df['Date'])
df.sort_values('Date', inplace=True)
df.dropna(inplace=True)

# Normalize closing prices


scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(df[['Close']])

# Prepare sequences for LSTM


def create_sequences(data, time_steps=60):
X, y = [], []
for i in range(time_steps, len(data)):
X.append(data[i - time_steps:i])
y.append(data[i])
return np.array(X), np.array(y)

time_steps = 60
X, y = create_sequences(scaled_data, time_steps)

# Split into train/test


train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

# Define function to build LSTM model


def build_model(learning_rate):
model = Sequential([
LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)),
Dropout(0.2),
LSTM(50),
Dropout(0.2),
Dense(1)
])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mae'])
return model

# Train models with different settings


configs = [
{'lr': 0.001, 'epochs': 20},
{'lr': 0.0001, 'epochs': 50}
]

histories = []

for cfg in configs:


print(f"\nTraining with LR={cfg['lr']} for {cfg['epochs']} epochs")
model = build_model(cfg['lr'])
history = model.fit(
X_train, y_train,
epochs=cfg['epochs'],
batch_size=32,
validation_data=(X_test, y_test),
verbose=1
)
histories.append((cfg, history))
# Plot training and validation loss
for cfg, history in histories:
plt.plot(history.history['val_loss'], label=f"Val Loss (LR={cfg['lr']}, E={cfg['epochs']})")
plt.plot(history.history['loss'], linestyle='--', label=f"Train Loss (LR={cfg['lr']}, E={cfg['epochs']})")

plt.title("LSTM Loss Comparison")


plt.xlabel("Epochs")
plt.ylabel("Loss (MSE)")
plt.legend()
plt.grid(True)
plt.show()
Statement 32
Implement a CNN on Tomato dataset using batch sizes of 32 and 64 separately. Keep the
learning rate fixed at 0.0001 and compare results.

project_folder/
├── tomato_data/
│ ├── train/
│ │ ├── class1/
│ │ └── class2/
│ ├── val/
│ │ ├── class1/
│ │ └── class2/

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt

# Constants
IMG_SIZE = (128, 128)
EPOCHS = 10
LEARNING_RATE = 0.0001
DATA_DIR = 'tomato_data' # Adjust path if needed

# Data preparation
datagen = ImageDataGenerator(rescale=1./255)

train_batches = {}
val_batches = {}

for batch_size in [32, 64]:


train_batches[batch_size] = datagen.flow_from_directory(
DATA_DIR + '/train',
target_size=IMG_SIZE,
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)

val_batches[batch_size] = datagen.flow_from_directory(
DATA_DIR + '/val',
target_size=IMG_SIZE,
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)

# Define CNN model builder


def build_model(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model

# Train and evaluate for both batch sizes


histories = {}

for batch_size in [32, 64]:


print(f"\nTraining with batch size {batch_size}")
model = build_model((IMG_SIZE[0], IMG_SIZE[1], 3), train_batches[batch_size].num_classes)
history = model.fit(
train_batches[batch_size],
validation_data=val_batches[batch_size],
epochs=EPOCHS,
verbose=1
)
histories[batch_size] = history

# Plot results
for metric in ['loss', 'accuracy']:
plt.figure(figsize=(8, 5))
for batch_size in [32, 64]:
plt.plot(histories[batch_size].history[metric], label=f'Train {metric} (BS={batch_size})')
plt.plot(histories[batch_size].history['val_' + metric], linestyle='--', label=f'Val {metric}
(BS={batch_size})')
plt.title(f'CNN {metric.capitalize()} Comparison')
plt.xlabel('Epochs')
plt.ylabel(metric.capitalize())
plt.legend()
plt.grid(True)
plt.show()
Statement 34
Implement CNN model on Potato leaf images using the Adam optimizer and i Use a learning rate
of

0.01 evaluate model Performance

project_folder/
├── potato_data/
│ ├── train/
│ │ ├── class1/
│ │ └── class2/
│ └── val/
│ ├── class1/
│ └── class2/

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np

# Constants
IMG_SIZE = (128, 128)
BATCH_SIZE = 32
EPOCHS = 10
LEARNING_RATE = 0.001
DATA_DIR = 'potato_data' # change to your dataset path

# Image preprocessing
datagen = ImageDataGenerator(rescale=1./255)

train_gen = datagen.flow_from_directory(
DATA_DIR + '/train',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)

val_gen = datagen.flow_from_directory(
DATA_DIR + '/val',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False
)

# Build CNN model


model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(train_gen.num_classes, activation='softmax')
])
# Compile model with Adam optimizer
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy']
)

# Train the model


history = model.fit(
train_gen,
validation_data=val_gen,
epochs=EPOCHS,
verbose=1
)

# Evaluate model performance


val_loss, val_acc = model.evaluate(val_gen)
print(f"\nValidation Loss: {val_loss:.4f}")
print(f"Validation Accuracy: {val_acc:.4f}")

# Classification Report
val_gen.reset()
y_pred = model.predict(val_gen)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = val_gen.classes
class_labels = list(val_gen.class_indices.keys())

print("\nClassification Report:")
print(classification_report(y_true, y_pred_classes, target_names=class_labels))

# Plotting training curves


plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Acc')
plt.plot(history.history['val_accuracy'], label='Val Acc')
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.tight_layout()
plt.show()
Statement 35
Build a Deep Neural Network for Fashion MNIST Classification

● Load Fashion MNIST dataset


● Preprocess the data using standardization
● Define a feed-forward neural network with 3 Dense layers
● Use RMSprop optimizer and categorical crossentropy loss
● Train the model for 15 epochs and evaluate performance
● Plot the training and validation curves

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt

# Load Fashion MNIST dataset


(X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()

# Standardization (mean=0, std=1)


X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# One-hot encoding of labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)

# Define the model


model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(
optimizer=RMSprop(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy']
)

# Train the model


history = model.fit(
X_train, y_train_cat,
validation_data=(X_test, y_test_cat),
epochs=15,
batch_size=32,
verbose=1
)

# Evaluate the model


loss, accuracy = model.evaluate(X_test, y_test_cat)
print(f"\nTest Accuracy: {accuracy:.4f}")
print(f"Test Loss: {loss:.4f}")

# Plot training and validation curves


plt.figure(figsize=(12,5))

# Accuracy plot
plt.subplot(1,2,1)
plt.plot(history.history['accuracy'], label='Train Acc')
plt.plot(history.history['val_accuracy'], label='Val Acc')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# Loss plot
plt.subplot(1,2,2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.tight_layout()
plt.show()

You might also like