0% found this document useful (0 votes)
28 views7 pages

Aai 04

The document describes a generative adversarial network (GAN) model for generating images based on a fashion MNIST dataset. It defines generator and discriminator models, loss functions, and training procedures. The GAN is trained over multiple epochs to generate new images and improve the real/fake classification accuracy of the discriminator model.

Uploaded by

ahmed.412052.cs
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
28 views7 pages

Aai 04

The document describes a generative adversarial network (GAN) model for generating images based on a fashion MNIST dataset. It defines generator and discriminator models, loss functions, and training procedures. The GAN is trained over multiple epochs to generate new images and improve the real/fake classification accuracy of the discriminator model.

Uploaded by

ahmed.412052.cs
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 7

3/5/24, 12:23 PM Copy of AAI_exp4.

ipynb - Colaboratory

%matplotlib inline import torch


import torch.nn as nn import
pandas as pd
import numpy as np
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader from PIL
import Image
from torch import autograd
from torch.autograd import Variable
from torchvision.utils import make_grid import
matplotlib.pyplot as plt

device = 'cuda' if torch.cuda.is_available() else 'cpu' print('torch version:',torch.


version )
print('device:', device)

torch version: 2.1.0+cu121 device:


cuda

# Data
train_data_path = '/content/drive/MyDrive/fashion-mnist_train.csv' # Path of data valid_data_path =
'/content/drive/MyDrive/fashion-mnist_test.csv' # Path of data print('Train data path:', train_data_path)
print('Valid data path:', valid_data_path) img_size = 28 # Image
size
batch_size = 64 # Batch size

# Model
z_size = 100
generator_layer_size = [256, 512, 1024]
discriminator_layer_size = [1024, 512, 256] # Training
epochs = 10 # Train epochs learning_rate =
1e-4

Train data path: /content/drive/MyDrive/fashion-mnist_train.csv Valid data path:


/content/drive/MyDrive/fashion-mnist_test.csv

from google.colab import drive drive.mount('/content/drive')

Mounted at /content/drive

class_list = ['T-Shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Ba class_num = len(class_list)

class FashionMNIST(Dataset):
def init (self, path, img_size, transform=None): self.transform =
transform
fashion_df = pd.read_csv(path)
self.images = fashion_df.iloc[:, 1:].values.astype('uint8').reshape(-1, img_size, img_size) self.labels = fashion_df.label.values
print('Image size:', self.images.shape) print('--- Label
---')
print(fashion_df.label.value_counts())

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 1/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory

def len (self):


return len(self.images) def
getitem (self, idx):
label = self.labels[idx] img =
self.images[idx]
img = Image.fromarray(self.images[idx]) if
self.transform:
img = self.transform(img) return img,
label

class_list[dataset[2][1]]

'Shirt'

dataset[10][0]

class_list[dataset[10][1]]

'T-Shirt'

transform = transforms.Compose([ transforms.ToTensor(),


transforms.Normalize(mean=(0.5,), std=(0.5,))])

for images, labels in data_loader:


fig, ax = plt.subplots(figsize=(18,10)) ax.set_xticks([])
ax.set_yticks([])
ax.imshow(make_grid(images, nrow=16).permute(1,2,0)) break

WARNING:matplotlib.image:Clipping input data to the valid ra

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 2/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory

class Generator(nn.Module):
def init (self, generator_layer_size, z_size, img_size, class_num): super(). init ()
self.z_size = z_size
self.img_size = img_size
self.label_emb = nn.Embedding(class_num, class_num)

self.model = nn.Sequential(
nn.Linear(self.z_size + class_num, generator_layer_size[0]), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(generator_layer_size[0], generator_layer_size[1]), nn.LeakyReLU(0.2,
inplace=True),
nn.Linear(generator_layer_size[1], generator_layer_size[2]), nn.LeakyReLU(0.2,
inplace=True),
nn.Linear(generator_layer_size[2], self.img_size * self.img_size), nn.Tanh())

def forward(self, z, labels): z = z.view(-


1, self.z_size) c =
self.label_emb(labels) # Concat
image & label
x = torch.cat([z, c], 1) #
Generator out
out = self.model(x)
return out.view(-1, self.img_size, self.img_size)

class Discriminator(nn.Module):
def init (self, discriminator_layer_size, img_size, class_num): super(). init ()
self.label_emb = nn.Embedding(class_num, class_num) self.img_size
= img_size

self.model = nn.Sequential(
nn.Linear(self.img_size * self.img_size + class_num, discriminator_layer_size[0]), nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[0], discriminator_layer_size[1]), nn.LeakyReLU(0.2,
inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[1], discriminator_layer_size[2]), nn.LeakyReLU(0.2,
inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[2], 1), nn.Sigmoid())

def forward(self, x, labels): # Reshape


fake image
x = x.view(-1, self.img_size * self.img_size) # One-hot vector
to embedding vector
c = self.label_emb(labels) # Concat
image & label
x = torch.cat([x, c], 1) #
Discriminator out
out = self.model(x) return
out.squeeze()

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 3/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory

# Define generator
generator = Generator(generator_layer_size, z_size, img_size, class_num).to(device) # Define discriminator
discriminator = Discriminator(discriminator_layer_size, img_size, class_num).to(device)

# Loss function
criterion = nn.BCELoss()

# Optimizer
g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate)
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate)

def generator_train_step(batch_size, discriminator, generator, g_optimizer, criterion): # Init gradient


g_optimizer.zero_grad() #
Building z
z = Variable(torch.randn(batch_size, z_size)).to(device) # Building fake
labels
fake_labels = Variable(torch.LongTensor(np.random.randint(0, class_num, batch_size))).to(devic # Generating fake images
fake_images = generator(z, fake_labels) #
Disciminating fake images
validity = discriminator(fake_images, fake_labels) # Calculating
discrimination loss (fake images)
g_loss = criterion(validity, Variable(torch.ones(batch_size)).to(device)) # Backword propagation
g_loss.backward()
# Optimizing generator g_optimizer.step()
return g_loss.data

def discriminator_train_step(batch_size, discriminator, generator, d_optimizer, criterion, real_ima # Init gradient


d_optimizer.zero_grad()
# Disciminating real images
real_validity = discriminator(real_images, labels) # Calculating
discrimination loss (real images)
real_loss = criterion(real_validity, Variable(torch.ones(batch_size)).to(device)) # Building z
z = Variable(torch.randn(batch_size, z_size)).to(device) # Building fake
labels
fake_labels = Variable(torch.LongTensor(np.random.randint(0, class_num, batch_size))).to(device # Generating fake images
fake_images = generator(z, fake_labels) #
Disciminating fake images
fake_validity = discriminator(fake_images, fake_labels) # Calculating
discrimination loss (fake images)
fake_loss = criterion(fake_validity, Variable(torch.zeros(batch_size)).to(device)) # Sum two losses
d_loss = real_loss + fake_loss #
Backword propagation
d_loss.backward()
# Optimizing discriminator d_optimizer.step()
return d_loss.data

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 4/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory

z = Variable(torch.randn(z_size, z_size)).to(device) # Labels 0 ~ 9


labels = Variable(torch.LongTensor([i for _ in range(class_num) for i in range(class_num)])).to(dev # Generating images
sample_images = generator(z, labels).unsqueeze(1).data.cpu()
grid = make_grid(sample_images, nrow=class_num, normalize=True).permute(1,2,0).numpy() fig, ax =
plt.subplots(figsize=(15,15))
ax.imshow(grid)
_ = plt.yticks([])
_ = plt.xticks(np.arange(15, 300, 30), class_list, rotation=45, fontsize=20)

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 5/7
3/5/24, 12:25 PM Copy of AAI_exp4.ipynb - Colaboratory

from sklearn.metrics import confusion_matrix, accuracy_score conf_matrix =


None
# Initialize list to store accuracy for each epoch accuracy_list = []

# Training loop
for epoch in range(epochs):
print('Starting epoch {}...'.format(epoch+1))
# Lists to store true and predicted labels for confusion matrix true_labels_list = []
predicted_labels_list = []
for i, (images, labels) in enumerate(data_loader): # Train data
real_images = Variable(images).to(device) labels =
Variable(labels).to(device)
# Set generator train generator.train()
# Train discriminator
d_loss = discriminator_train_step(len(real_images), discriminator,
generator, d_optimizer, criterion,real_images, labels)
# Train generator
g_loss = generator_train_step(batch_size, discriminator, generator,
g_optimizer, criterion)
# Generate fake images and obtain discriminator predictions
fake_images, fake_labels = generate_fake_images(generator, batch_size,
z_size, class_num, device)
fake_predictions = discriminator_predictions(discriminator, fake_images, fake_labels) real_predictions =
discriminator_predictions(discriminator, real_images, labels)
# Append true and predicted labels to the lists
true_labels_list.extend(np.concatenate((np.ones(len(real_predictions)), np.zeros(len(fake_p
predicted_labels_list.extend(np.concatenate((real_predictions, fake_predictions)).tolist())

# Convert lists to numpy arrays


true_labels_array = np.array(true_labels_list)
predicted_labels_array = np.array(predicted_labels_list) # Calculate
confusion matrix
conf_matrix = confusion_matrix(true_labels_array, predicted_labels_array) # Calculate accuracy
accuracy = accuracy_score(true_labels_array, predicted_labels_array) accuracy_list.append(accuracy)
print('Epoch {}: Generator Loss: {:.4f}, Discriminator Loss: {:.4f}'.format(epoch+1, g_loss, d_ print('Accuracy:
{:.4f}'.format(accuracy))

# Print confusion matrix


print('Confusion Matrix:') print(conf_matrix)

Starting epoch 1...


Epoch 1: Generator Loss: 0.9854, Discriminator Loss: 1.2401
Accuracy: 0.7026
Starting epoch 2...
Epoch 2: Generator Loss: 1.1582, Discriminator Loss: 1.0301
Accuracy: 0.7037
Starting epoch 3...
Epoch 3: Generator Loss: 0.9832, Discriminator Loss: 1.1211
Accuracy: 0.6939
Starting epoch 4...
Epoch 4: Generator Loss: 1.0537, Discriminator Loss: 1.0936
Accuracy: 0.6843
Starting epoch 5...

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=o1cbJX1toQPx&printMode=true 1/2
3/5/24, 12:25 PM Copy of AAI_exp4.ipynb - Colaboratory
Epoch 5: Generator Loss: 0.9625, Discriminator Loss: 0.9370
Accuracy: 0.6865
Starting epoch 6...
Epoch 6: Generator Loss: 0.9221, Discriminator Loss: 1.2869
Accuracy: 0.6759
Starting epoch 7...
Epoch 7: Generator Loss: 0.8081, Discriminator Loss: 1.0706
Accuracy: 0.6757
Starting epoch 8...
Epoch 8: Generator Loss: 0.9652, Discriminator Loss: 1.2018
Accuracy: 0.6864
Starting epoch 9...
Epoch 9: Generator Loss: 0.9724, Discriminator Loss: 1.2329
Accuracy: 0.6888
Starting epoch 10...
Epoch 10: Generator Loss: 0.8915, Discriminator Loss: 1.2083
Accuracy: 0.6915
Confusion Matrix:
[[45083 14949]
[22083 37917]]

https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=o1cbJX1toQPx&printMode=true 2/2

You might also like