Final DL
Final DL
1.Using Numpy
import numpy as np
# Sigmoid activation
function def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Derivative of sigmoid
def sigmoid_derivative(x):
return x * (1 - x)
# Input and output
X = np.array([[0,
0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([[0],
[1],
[1],
[0]]) # XOR problem
# Initialize weights
randomly np.random.seed(1)
weights_1 = np.random.rand(2, 4)
weights_2 = np.random.rand(4, 1)
# Backpropagation
error = y - output
d_output = error * sigmoid_derivative(output)
d_layer1 = d_output.dot(weights_2.T) * sigmoid_derivative(layer1)
# Update weights
weights_2 += layer1.T.dot(d_output)
weights_1 += X.T.dot(d_layer1)
print("Final output:")
print(output)
2.Using PyTorch
import torch
import torch.nn as nn
import torch.optim as
optim
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# Training loop
for epoch in range(10000):
# Forward pass
outputs = model(X)
loss = criterion(outputs, y)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print(f'Epoch {epoch}, Loss: {loss.item():.4f}')
# Final prediction
with torch.no_grad():
predicted = model(X).round()
print("\nPredicted outputs:")
print(predicted)
Output:
Marks
Practical – 2
Aim: To design a neural network (NN) model with a single hidden layer for
addressing classification problems.
Program:
import numpy as np
# Sigmoid
activation def
sigmoid(x):
return 1 / (1 +
np.exp(-x)) # Derivative of
sigmoid
def
sigmoid_derivative(x):
return x * (1 - x)
np.random.seed(42)
input_size = 2
hidden_size = 4
output_size = 1
error = y - output
d_output = error *
sigmoid_derivative(output) #
Backpropagation
if epoch % 1000 == 0:
loss = np.mean(np.square(error))
print(f"Epoch {epoch}, Loss: {loss:.4f}")
print("\nFinal predictions:")
print(np.round(output, 2))
Output:
Conclusion:
Problem Completeness
Knowledge Recognition Logic and Ethics (2)
(2) (2) Building accurac Total
(2) y (2)
Good Avg. Good Avg. Good Avg. Good Avg. Good Avg.
(2) (1) (2) (1) (2) (1) (2) (1) (2) (1)
Practical – 3
Aim: Design a neural network for classifying movie reviews (Binary
Classification) using IMDB dataset
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
# Tokenizer
tokenizer = get_tokenizer("basic_english")
# Build vocabulary
def yield_tokens(data_iter):
for label, text in data_iter:
yield tokenizer(text)
train_data = list(IMDB(split='train'))
vocab =
build_vocab_from_iterator(yield_tokens(train_data), specials=["<pad>"])
vocab.set_default_index(vocab["<pad>"])
# Pipelines
def encode(text):
return torch.tensor(vocab(tokenizer(text)), dtype=torch.long)
def label_to_num(label):
return 1 if label == 'pos' else 0
# Collate function
def collate(batch):
texts = [encode(text) for label, text in batch]
# DataLoader
train_loader = DataLoader(train_data, batch_size=16,
shuffle=True, collate_fn=collate)
# Simple model
class SentimentNet(nn.Module):
def init (self, vocab_size,
embed_dim): super(). init ()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.fc = nn.Linear(embed_dim, 1)
self.sigmoid = nn.Sigmoid()
# Initialize
model = SentimentNet(len(vocab), embed_dim=64)
loss_fn = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
for epoch in range(3):
total_loss = 0
for texts, labels in train_loader:
preds = model(texts)
loss = loss_fn(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {total_loss:.4f}")
Output:
Conclusion:
We built a binary classifier for IMDB reviews (positive or negative). The
network learned the meaning of words using embedding layers. It showed
how deep learning handles text classification.
Marks
Practical – 4
Aim: To Implement a multilayer perceptron (MLP) model
for prediction tasks, such as house prices(Bostan Housing Price
Dataset).
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Load dataset
boston = load_boston()
X = boston.data
y =
boston.target #
Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Convert to tensors
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
y_test = torch.tensor(y_test, dtype=torch.float32).view(-1, 1)
# Testing
model.eval()
predictions = model(X_test).detach().numpy()
print("\nSample Predictions (first 5):")
print(predictions[:5].flatten())
Output:
Conclusion:
Marks
Practical – 5
Aim: To develop a Conventional Feed Forward Neural Network on MNIST
Data set using. (a) Sequential Class (b)Model Class API
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from torchvision import datasets, transforms
# MNIST data
transform = transforms.ToTensor()
train_data = datasets.MNIST(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=64, shuffle=True)
# Training
for epoch in range(3):
total_loss = 0
for images, labels in train_loader:
preds = model(images)
loss = criterion(preds, labels)optimizer.zero_grad() loss.backward() optimizer.step()
total_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {total_loss:.4f}")
# Define model using custom class
class FFNN(nn.Module):
def init (self):
super(FFNN, self). init ()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(28*28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
self.relu = nn.ReLU()
model = FFNN()
# Training
for epoch in range(3):
total_loss = 0
for images, labels in train_loader:
preds = model(images)
loss = criterion(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {total_loss:.4f}")
Output:
Conclusion:
Problem Completeness
Knowledge Logic
Recognition and accuracy Ethics (2)
(2) (2) Building (2) (2)
Rubrics Total
Good Avg. Good Avg. Good Avg. Good Avg. Good Avg.
(2) (1) (2) (1) (2) (1) (2) (1) (2) (1)
Marks
Practical – 6
Aim: To implement Auto Encoders for Dimensionality Reduction
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
# Define Autoencoder
class
Autoencoder(nn.Module):
def init (self):
super(Autoencoder, self). init ()
# Encoder: compress from 784 ->
32 self.encoder = nn.Sequential(
nn.Flatten(),
nn.Linear(28*28, 128),
nn.ReLU(),
nn.Linear(128, 32)
)
# Decoder: decompress from 32 -> 784
self.decoder = nn.Sequential(
nn.Linear(32, 128),
nn.ReLU(),
nn.Linear(128, 28*28),
nn.Sigmoid() # Output between 0 and 1
)
# Train Autoencoder
for epoch in
range(5):
total_loss = 0
for images, _ in train_loader: # labels are not needed
outputs = model(images)
loss = criterion(outputs, images.view(-1, 28*28)) optimizer.zero_grad()
loss.backward() optimizer.step() total_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {total_loss:.4f}")
Output:
Conclusion:
Marks
Practical – 7
Aim: Build a Convolution Neural Network(CNN) for MNIST
Handwritten Digit Classification.
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
model = SimpleCNN()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss = loss_fn(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"Epoch {epoch+1}, Loss: {loss.item():.4f}")
# Testing
correct = 0
total = 0
with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
Output:
Conclusion:
Marks
Practical – 8
Aim: To perform hyperparameter tuning with Long Short-Term Memory
(LSTM) networks using time series data.
Program:
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# Create sequences
def create_seq(data, seq_len):
xs, ys = [], []
for i in range(len(data) - seq_len):
xs.append(data[i:i+seq_len])
ys.append(data[i+seq_len])
return np.array(xs), np.array(ys)
seq_len = 20
X, Y = create_seq(y, seq_len)
# Train/test split
train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
Y_train, Y_test = Y[:train_size], Y[train_size:]
# Convert to tensors
X_train = torch.tensor(X_train, dtype=torch.float32).unsqueeze(-1)
Y_train = torch.tensor(Y_train, dtype=torch.float32).unsqueeze(-1)
X_test = torch.tensor(X_test, dtype=torch.float32).unsqueeze(-1)
Y_test = torch.tensor(Y_test, dtype=torch.float32).unsqueeze(-1)
# Hyperparameter tuning
hidden_sizes = [8, 16]
learning_rates = [0.01, 0.001]
# Train
for epoch in range(20):
model.train()
out = model(X_train)
loss = criterion(out, Y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"Final Train Loss: {loss.item():.4f}")
# Test
model.eval()
with torch.no_grad():
pred = model(X_test)
test_loss = criterion(pred, Y_test)
print(f"Test Loss:
{test_loss.item():.4f}")
Output:
Conclusion:
We built an LSTM model for time series prediction and tuned its settings. We
tested different hidden sizes and learning rates. This helped find the best
model configuration to predict future values.
Marks
Practical – 9
Aim: To train a Neural Network on the MNIST dataset and employ
SHAP (SHapley Additive exPlanations) to interpret its predictions.
Program:
import torch
import torch.nn as nn
import torch.optim as
optim
from torchvision import datasets, transforms
import shap
import matplotlib.pyplot as plt
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=True)
model.train()
for images, labels in train_loader:
outputs = model(images)
loss = criterion(outputs,
labels) optimizer.zero_grad()
loss.backward()
optimizer.step()
print(" Training Done")
# 5. Plot
shap.image_plot(shap_values, image.numpy())
Output:
Conclusion:
After training a simple MNIST classifier, we used SHAP to explain predictions.
SHAP showed which pixels influenced the model's decision. This made the
neural network’s thinking process easier to understand.
Marks
Practical – 10
Aim: Implement reinforcement learning on any data set.
Program:
import gym
import numpy as np
# 1. Create environment
env = gym.make('FrozenLake-v1', is_slippery=False) # Non-slippery for
simplicity
# 2. Initialize Q-Table
q_table = np.zeros([env.observation_space.n, env.action_space.n])
# 3. Set
hyperparameters
learning_rate = 0.8
discount_factor = 0.95
episodes = 1000
# 4. Training loop
for episode in range(episodes):
state = env.reset()[0]
done = False
# Update Q-Table
q_table[state, action] = q_table[state, action] + learning_rate * (reward +
discount_factor * np.max(q_table[new_state, :]) - q_table[state, action])
state = new_state
print("\n Training
done = False
while not done:
action = np.argmax(q_table[state, :])
state, reward, done, truncated, _ = env.step(action)
env.render()
Output:
Conclusion:
We trained a simple agent using Q-learning in the FrozenLake environment. The
agent learned to reach the goal by trial and error. It showed how
reinforcement learning works without needing a dataset.
Marks