Experiment No 13 Final
Experiment No 13 Final
13
CODE
# ===============================
# ===============================
%load_ext tensorboard
# ===============================
# 🧠 IMPORTS
# ===============================
import torch
import torch.nn as nn
import torchvision
import os
# ===============================
# 🧠 MODEL DEFINITIONS
# ===============================
class Discriminator(nn.Module):
super().__init__()
self.model = nn.Sequential(
nn.Linear(img_dim, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid()
return self.model(x)
class Generator(nn.Module):
super().__init__()
self.model = nn.Sequential(
nn.Linear(z_dim, 256),
nn.BatchNorm1d(256),
nn.LeakyReLU(0.2),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2),
nn.Linear(512, img_dim),
nn.Tanh()
return self.model(x)
# ===============================
# ⚙️ HYPERPARAMETERS
# ===============================
lr = 1e-4
z_dim = 64
image_dim = 28 * 28
batch_size = 64
num_epochs = 100
# ===============================
# ===============================
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# ===============================
# ===============================
disc = Discriminator(image_dim).to(device)
criterion = nn.BCELoss()
writer_fake = SummaryWriter(f"runs/GAN_MNIST/fake")
writer_real = SummaryWriter(f"runs/GAN_MNIST/real")
step = 0
G_losses = []
D_losses = []
os.makedirs("samples", exist_ok=True)
os.makedirs("models", exist_ok=True)
# ===============================
# 🔁 TRAINING LOOP
# ===============================
cur_batch_size = real.size(0)
# ========== Train Discriminator ==========
fake = gen(noise)
disc_real = disc(real).view(-1)
disc_fake = disc(fake.detach()).view(-1)
disc.zero_grad()
lossD.backward()
opt_disc.step()
output = disc(fake).view(-1)
gen.zero_grad()
lossG.backward()
opt_gen.step()
# Save losses
G_losses.append(lossG.item())
D_losses.append(lossD.item())
if batch_idx % 100 == 0:
print(
with torch.no_grad():
if (epoch + 1) % 10 == 0:
torch.save(gen.state_dict(), f"models/generator_epoch_{epoch+1}.pth")
# ===============================
# ===============================
plt.figure(figsize=(10, 5))
plt.plot(G_losses, label="G")
plt.plot(D_losses, label="D")
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig("samples/loss_curve.png")
plt.show()
# ===============================
# ===============================
%load_ext tensorboard
# ===============================
# 🧠 IMPORTS
# ===============================
import torch
import torch.nn as nn
import torchvision
import torchvision.datasets as datasets
import os
# ===============================
# 🧠 MODEL DEFINITIONS
# ===============================
class Discriminator(nn.Module):
super().__init__()
self.model = nn.Sequential(
nn.Linear(img_dim, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid()
return self.model(x)
class Generator(nn.Module):
super().__init__()
self.model = nn.Sequential(
nn.Linear(z_dim, 256),
nn.BatchNorm1d(256),
nn.LeakyReLU(0.2),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2),
nn.Linear(512, img_dim),
nn.Tanh()
return self.model(x)
# ===============================
# ⚙️ HYPERPARAMETERS
# ===============================
lr = 1e-4
z_dim = 64
image_dim = 28 * 28
batch_size = 64
num_epochs = 100
# ===============================
# ===============================
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# ===============================
# ===============================
disc = Discriminator(image_dim).to(device)
criterion = nn.BCELoss()
writer_fake = SummaryWriter(f"runs/GAN_MNIST/fake")
writer_real = SummaryWriter(f"runs/GAN_MNIST/real")
step = 0
G_losses = []
D_losses = []
os.makedirs("samples", exist_ok=True)
os.makedirs("models", exist_ok=True)
# ===============================
# 🔁 TRAINING LOOP
# ===============================
cur_batch_size = real.size(0)
fake = gen(noise)
disc_real = disc(real).view(-1)
disc_fake = disc(fake.detach()).view(-1)
disc.zero_grad()
lossD.backward()
opt_disc.step()
output = disc(fake).view(-1)
gen.zero_grad()
lossG.backward()
opt_gen.step()
# Save losses
G_losses.append(lossG.item())
D_losses.append(lossD.item())
# ========== Logging & Visualization ==========
if batch_idx % 100 == 0:
print(
with torch.no_grad():
step += 1
if (epoch + 1) % 10 == 0:
torch.save(gen.state_dict(), f"models/generator_epoch_{epoch+1}.pth")
# ===============================
# ===============================
plt.figure(figsize=(10, 5))
plt.plot(G_losses, label="G")
plt.plot(D_losses, label="D")
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig("samples/loss_curve.png")
plt.show()