0% found this document useful (0 votes)
3 views7 pages

Not F: # Check If The File Exists

Uploaded by

Nake Shippuden
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
3 views7 pages

Not F: # Check If The File Exists

Uploaded by

Nake Shippuden
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 7

import os

import zipfile

local_zip = '/content/test_set.zip'

# Check if the file exists


if not os.path.isfile(local_zip):
print(f"File {local_zip} does not exist.")
else:
try:
with zipfile.ZipFile(local_zip, 'r') as zip_ref:
zip_ref.extractall('/content/test_set')
print("Extraction completed successfully.")
except zipfile.BadZipFile:
print(f"Error: {local_zip} is not a valid zip file or is
corrupted.")
except Exception as e:
print(f"An unexpected error occurred: {e}")

Extraction completed successfully.


train_cats_dir = os.path.join('/content/test_set/test_set', 'cats')
train_dogs_dir = os.path.join('/content/test_set/test_set', 'dogs')

# Check if directories exist


if not os.path.exists(train_cats_dir):
print(f"The directory {train_cats_dir} does not exist.")
if not os.path.exists(train_dogs_dir):
print(f"The directory {train_dogs_dir} does not exist.")

# Print the number of images in each directory


print('Total training cat images:', len(os.listdir(train_cats_dir)))
print('Total training dog images:', len(os.listdir(train_dogs_dir)))

Found 1619 images belonging to 2 classes.


Found 404 images belonging to 2 classes.

import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from IPython.display import clear_output # Pastikan impor yang benar
untuk clear_output
import time

# Load dataset
(train_images, train_labels), (_, _) =
tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28,
1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images
to [-1, 1]

BUFFER_SIZE = 60000
BATCH_SIZE = 256

train_dataset =
tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).b
atch(BATCH_SIZE)

# Model generator
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False,
input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the
batch size

model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1),


padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2),


padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())

model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2),


padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)

return model

generator = make_generator_model()

# Model discriminator
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))

model.add(layers.Conv2D(128, (5, 5), strides=(2, 2),


padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))

model.add(layers.Flatten())
model.add(layers.Dense(1))

return model

discriminator = make_discriminator_model()

# Loss and optimizers


cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_output, fake_output):


real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss

def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)

generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

# Training
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint =
tf.train.Checkpoint(generator_optimizer=generator_optimizer,

discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)

EPOCHS = 15
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])

@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])

with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:


generated_images = generator(noise, training=True)

real_output = discriminator(images, training=True)


fake_output = discriminator(generated_images, training=True)

gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)

gradients_of_generator = gen_tape.gradient(gen_loss,
generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)

generator_optimizer.apply_gradients(zip(gradients_of_generator,
generator.trainable_variables))

discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,
discriminator.trainable_variables))

def train(dataset, epochs):


for epoch in range(epochs):
start = time.time()

for image_batch in dataset:


train_step(image_batch)

clear_output(wait=True)
generate_and_save_images(generator, epoch + 1, seed)

if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)

print ('Time for epoch {} is {} sec'.format(epoch + 1,


time.time()-start))

clear_output(wait=True)
generate_and_save_images(generator, epochs, seed)

def generate_and_save_images(model, epoch, test_input):


predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))

for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5,
cmap='gray')
plt.axis('off')

plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()

# Train the model


train(train_dataset, EPOCHS)

You might also like