Aai 04
Aai 04
ipynb - Colaboratory
# Data
train_data_path = '/content/drive/MyDrive/fashion-mnist_train.csv' # Path of data valid_data_path =
'/content/drive/MyDrive/fashion-mnist_test.csv' # Path of data print('Train data path:', train_data_path)
print('Valid data path:', valid_data_path) img_size = 28 # Image
size
batch_size = 64 # Batch size
# Model
z_size = 100
generator_layer_size = [256, 512, 1024]
discriminator_layer_size = [1024, 512, 256] # Training
epochs = 10 # Train epochs learning_rate =
1e-4
Mounted at /content/drive
class_list = ['T-Shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Ba class_num = len(class_list)
class FashionMNIST(Dataset):
def init (self, path, img_size, transform=None): self.transform =
transform
fashion_df = pd.read_csv(path)
self.images = fashion_df.iloc[:, 1:].values.astype('uint8').reshape(-1, img_size, img_size) self.labels = fashion_df.label.values
print('Image size:', self.images.shape) print('--- Label
---')
print(fashion_df.label.value_counts())
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 1/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory
class_list[dataset[2][1]]
'Shirt'
dataset[10][0]
class_list[dataset[10][1]]
'T-Shirt'
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 2/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory
class Generator(nn.Module):
def init (self, generator_layer_size, z_size, img_size, class_num): super(). init ()
self.z_size = z_size
self.img_size = img_size
self.label_emb = nn.Embedding(class_num, class_num)
self.model = nn.Sequential(
nn.Linear(self.z_size + class_num, generator_layer_size[0]), nn.LeakyReLU(0.2, inplace=True),
nn.Linear(generator_layer_size[0], generator_layer_size[1]), nn.LeakyReLU(0.2,
inplace=True),
nn.Linear(generator_layer_size[1], generator_layer_size[2]), nn.LeakyReLU(0.2,
inplace=True),
nn.Linear(generator_layer_size[2], self.img_size * self.img_size), nn.Tanh())
class Discriminator(nn.Module):
def init (self, discriminator_layer_size, img_size, class_num): super(). init ()
self.label_emb = nn.Embedding(class_num, class_num) self.img_size
= img_size
self.model = nn.Sequential(
nn.Linear(self.img_size * self.img_size + class_num, discriminator_layer_size[0]), nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[0], discriminator_layer_size[1]), nn.LeakyReLU(0.2,
inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[1], discriminator_layer_size[2]), nn.LeakyReLU(0.2,
inplace=True),
nn.Dropout(0.3),
nn.Linear(discriminator_layer_size[2], 1), nn.Sigmoid())
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 3/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory
# Define generator
generator = Generator(generator_layer_size, z_size, img_size, class_num).to(device) # Define discriminator
discriminator = Discriminator(discriminator_layer_size, img_size, class_num).to(device)
# Loss function
criterion = nn.BCELoss()
# Optimizer
g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate)
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate)
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 4/7
3/5/24, 12:23 PM Copy of AAI_exp4.ipynb - Colaboratory
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=UnX5eZ4soQPx&printMode=true 5/7
3/5/24, 12:25 PM Copy of AAI_exp4.ipynb - Colaboratory
# Training loop
for epoch in range(epochs):
print('Starting epoch {}...'.format(epoch+1))
# Lists to store true and predicted labels for confusion matrix true_labels_list = []
predicted_labels_list = []
for i, (images, labels) in enumerate(data_loader): # Train data
real_images = Variable(images).to(device) labels =
Variable(labels).to(device)
# Set generator train generator.train()
# Train discriminator
d_loss = discriminator_train_step(len(real_images), discriminator,
generator, d_optimizer, criterion,real_images, labels)
# Train generator
g_loss = generator_train_step(batch_size, discriminator, generator,
g_optimizer, criterion)
# Generate fake images and obtain discriminator predictions
fake_images, fake_labels = generate_fake_images(generator, batch_size,
z_size, class_num, device)
fake_predictions = discriminator_predictions(discriminator, fake_images, fake_labels) real_predictions =
discriminator_predictions(discriminator, real_images, labels)
# Append true and predicted labels to the lists
true_labels_list.extend(np.concatenate((np.ones(len(real_predictions)), np.zeros(len(fake_p
predicted_labels_list.extend(np.concatenate((real_predictions, fake_predictions)).tolist())
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=o1cbJX1toQPx&printMode=true 1/2
3/5/24, 12:25 PM Copy of AAI_exp4.ipynb - Colaboratory
Epoch 5: Generator Loss: 0.9625, Discriminator Loss: 0.9370
Accuracy: 0.6865
Starting epoch 6...
Epoch 6: Generator Loss: 0.9221, Discriminator Loss: 1.2869
Accuracy: 0.6759
Starting epoch 7...
Epoch 7: Generator Loss: 0.8081, Discriminator Loss: 1.0706
Accuracy: 0.6757
Starting epoch 8...
Epoch 8: Generator Loss: 0.9652, Discriminator Loss: 1.2018
Accuracy: 0.6864
Starting epoch 9...
Epoch 9: Generator Loss: 0.9724, Discriminator Loss: 1.2329
Accuracy: 0.6888
Starting epoch 10...
Epoch 10: Generator Loss: 0.8915, Discriminator Loss: 1.2083
Accuracy: 0.6915
Confusion Matrix:
[[45083 14949]
[22083 37917]]
https://colab.research.google.com/drive/1K9sn2rQ0TrpWbL5sHupdqsG4Pg1c_hRx#scrollTo=o1cbJX1toQPx&printMode=true 2/2