AIprog12
AIprog12
com/download/3/e/1/3e1c3f21-ecdb-4869-
8368-6deba77b919f/kagglecatsanddogs_5340.zip
!unzip kagglecatsanddogs_53
https://zenodo.org/records/5226945
!wget https://zenodo.org/records/5226945/files/cats_dogs_light.zip?
download=1
!unzip cats_dogs_light.zip?download=1
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
!unzip /tmp/cats_and_dogs_filtered.zip
Setp3: Code
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Flatten, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define constants
batch_size = 32
img_height = 150
img_width = 150
epochs = 10
# Create image data generators
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'/content/cats_and_dogs_filtered/train',
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
'/content/cats_and_dogs_filtered/validation',
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
# Build a simple neural network model
model = Sequential([
Flatten(input_shape=(img_height, img_width, 3)),
Dense(128, activation='relu'),
Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
11
# Train the model
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size
)
Output:
Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
Epoch 1/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 6s 78ms/step - accuracy: 0.5477 - loss: 13.6614 -
val_accuracy: 0.5746 - val_loss: 1.8352
Epoch 2/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 1s 23ms/step - accuracy: 0.4375 - loss: 2.2653 -
val_accuracy: 0.5706 - val_loss: 1.7689
Epoch 3/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 5s 83ms/step - accuracy: 0.5540 - loss: 2.9765 -
val_accuracy: 0.5575 - val_loss: 1.2186
Epoch 4/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 1s 23ms/step - accuracy: 0.5000 - loss: 1.3509 -
val_accuracy: 0.5111 - val_loss: 2.9195
Epoch 5/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 5s 83ms/step - accuracy: 0.5651 - loss: 2.6707 -
val_accuracy: 0.5423 - val_loss: 3.1215
Epoch 6/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 2s 27ms/step - accuracy: 0.2812 - loss: 4.3689 -
val_accuracy: 0.5716 - val_loss: 1.5851
Epoch 7/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 5s 76ms/step - accuracy: 0.6277 - loss: 1.6029 -
val_accuracy: 0.5171 - val_loss: 2.4660
Epoch 8/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 1s 23ms/step - accuracy: 0.6875 - loss: 1.1648 -
val_accuracy: 0.5030 - val_loss: 3.6614
Epoch 9/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 4s 67ms/step - accuracy: 0.5722 - loss: 2.0467 -
val_accuracy: 0.5514 - val_loss: 1.3998
Epoch 10/10
62/62 ━━━━━━━━━━━━━━━━━━━━ 1s 24ms/step - accuracy: 0.5000 - loss: 1.2462 -
val_accuracy: 0.5212 - val_loss: 2.4486