0% found this document useful (0 votes)
14 views3 pages

Ta1 U1 DL

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
14 views3 pages

Ta1 U1 DL

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

10/14/24, 12:17 PM TA1_U1_DL.

ipynb - Colab

import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt

# Load Iris dataset


iris = load_iris()
X = iris.data
y = iris.target

# Encode the target variable


encoder = LabelEncoder()
y = encoder.fit_transform(y)
y = tf.keras.utils.to_categorical(y, 3)

# Split dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Standardize the input data


scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

def build_model(activation_function='relu'):
model = models.Sequential()
model.add(layers.Dense(64, input_shape=(4,), activation=activation_function, kernel_initializer='he_normal')) # He Initialization
model.add(layers.BatchNormalization()) # Batch Normalization
model.add(layers.Dense(32, activation=activation_function))
model.add(layers.BatchNormalization()) # Batch Normalization
model.add(layers.Dense(3, activation='softmax')) # Output layer for classification

model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model

# Experimenting with different batch sizes


batch_sizes = [8, 16, 32, 64]

history_dict = {}
for batch_size in batch_sizes:
print(f"Training with batch size: {batch_size}")
model = build_model(activation_function='relu')
history = model.fit(X_train, y_train,
epochs=10,
batch_size=batch_size,
validation_data=(X_test, y_test),
verbose=1)
history_dict[batch_size] = history

https://colab.research.google.com/drive/1ur9jOfNcwMOs-7Yproawg4jWxJFGv-xp#scrollTo=3H9BH2Brx50-&printMode=true 1/3
10/14/24, 12:17 PM TA1_U1_DL.ipynb - Colab
/ / p y _ y _
Epoch 5/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.7652 - loss: 0.5695 - val_accuracy: 0.6000 - val_loss: 0.7729
Epoch 6/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - accuracy: 0.8240 - loss: 0.4026 - val_accuracy: 0.6333 - val_loss: 0.6450
Epoch 7/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.8475 - loss: 0.3988 - val_accuracy: 0.7000 - val_loss: 0.5472
Epoch 8/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - accuracy: 0.8913 - loss: 0.2886 - val_accuracy: 0.9000 - val_loss: 0.4759
Epoch 9/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.9544 - loss: 0.2337 - val_accuracy: 0.9000 - val_loss: 0.4278
Epoch 10/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - accuracy: 0.9300 - loss: 0.2688 - val_accuracy: 0.9000 - val_loss: 0.3947
Training with batch size: 64
Epoch 1/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 2s 242ms/step - accuracy: 0.2514 - loss: 1.3962 - val_accuracy: 0.4000 - val_loss: 1.0408
Epoch 2/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 36ms/step - accuracy: 0.3597 - loss: 1.1184 - val_accuracy: 0.5667 - val_loss: 0.9246
Epoch 3/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step - accuracy: 0.5809 - loss: 0.8581 - val_accuracy: 0.6333 - val_loss: 0.8295
Epoch 4/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 46ms/step - accuracy: 0.6368 - loss: 0.7838 - val_accuracy: 0.7000 - val_loss: 0.7524
Epoch 5/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step - accuracy: 0.7774 - loss: 0.5761 - val_accuracy: 0.8000 - val_loss: 0.6902
Epoch 6/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 35ms/step - accuracy: 0.8156 - loss: 0.4862 - val_accuracy: 0.8667 - val_loss: 0.6404
Epoch 7/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step - accuracy: 0.8701 - loss: 0.3987 - val_accuracy: 0.8667 - val_loss: 0.5990
Epoch 8/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step - accuracy: 0.9132 - loss: 0.3316 - val_accuracy: 0.8667 - val_loss: 0.5647
Epoch 9/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step - accuracy: 0.9399 - loss: 0.2672 - val_accuracy: 0.8333 - val_loss: 0.5376
Epoch 10/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 32ms/step - accuracy: 0.9458 - loss: 0.2328 - val_accuracy: 0.8333 - val_loss: 0.5160

# Plot training accuracy and validation accuracy for different batch sizes
plt.figure(figsize=(14, 7))

# Accuracy Plot
plt.subplot(1, 2, 1)
for batch_size in batch_sizes:
plt.plot(history_dict[batch_size].history['accuracy'], label=f'Batch Size {batch_size}')
plt.title('Training Accuracy vs Batch Size')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

# Loss Plot
plt.subplot(1, 2, 2)
for batch_size in batch_sizes:
plt.plot(history_dict[batch_size].history['loss'], label=f'Batch Size {batch_size}')
plt.title('Training Loss vs Batch Size')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()

https://colab.research.google.com/drive/1ur9jOfNcwMOs-7Yproawg4jWxJFGv-xp#scrollTo=3H9BH2Brx50-&printMode=true 2/3
10/14/24, 12:17 PM TA1_U1_DL.ipynb - Colab

https://colab.research.google.com/drive/1ur9jOfNcwMOs-7Yproawg4jWxJFGv-xp#scrollTo=3H9BH2Brx50-&printMode=true 3/3

You might also like