DL - Project 2.ipynb Colab
DL - Project 2.ipynb Colab
ipynb - Colab
keyboard_arrow_down RESNET-50
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import os
import cv2
import numpy as np
def load_images_from_folder(folder):
images = []
labels = []
for class_folder in os.listdir(folder):
class_path = os.path.join(folder, class_folder)
if os.path.isdir(class_path):
for filename in os.listdir(class_path):
img_path = os.path.join(class_path, filename)
image = cv2.imread(img_path)
# Resize image to desired dimensions
image = cv2.resize(image, (224, 224))
images.append(image)
labels.append(class_folder) # Assign label based on folder name
return np.array(images), np.array(labels)
img.shape
import tensorflow as tf
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam
# Encode labels
label_encoder = LabelEncoder()
label_encoded = label_encoder.fit_transform(label)
num_classes = len(label_encoder.classes_)
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 1/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
predictions = Dense(num_classes, activation='softmax')(x)
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 2/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 8/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
history = model.fit(
X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_val, y_val),
verbose=1
)
Epoch 1/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 41s 314ms/step - accuracy: 0.7126 - loss: 0.7079 - val_accuracy: 0.9536 - val_loss: 0.1265
Epoch 2/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 7s 84ms/step - accuracy: 0.9552 - loss: 0.1394 - val_accuracy: 0.9706 - val_loss: 0.0879
Epoch 3/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 8s 99ms/step - accuracy: 0.9690 - loss: 0.0935 - val_accuracy: 0.9753 - val_loss: 0.0754
Epoch 4/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 9s 85ms/step - accuracy: 0.9782 - loss: 0.0655 - val_accuracy: 0.9784 - val_loss: 0.0667
Epoch 5/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 11s 101ms/step - accuracy: 0.9865 - loss: 0.0452 - val_accuracy: 0.9815 - val_loss: 0.0581
Epoch 6/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 7s 84ms/step - accuracy: 0.9904 - loss: 0.0393 - val_accuracy: 0.9799 - val_loss: 0.0551
Epoch 7/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 7s 85ms/step - accuracy: 0.9940 - loss: 0.0278 - val_accuracy: 0.9799 - val_loss: 0.0548
Epoch 8/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 11s 100ms/step - accuracy: 0.9945 - loss: 0.0239 - val_accuracy: 0.9830 - val_loss: 0.0570
Epoch 9/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 10s 101ms/step - accuracy: 0.9953 - loss: 0.0219 - val_accuracy: 0.9845 - val_loss: 0.0510
Epoch 10/10
81/81 ━━━━━━━━━━━━━━━━━━━━ 8s 102ms/step - accuracy: 0.9968 - loss: 0.0173 - val_accuracy: 0.9845 - val_loss: 0.0493
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 9/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 10/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is c
Model saved!
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 11/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
import tensorflow as tf
from tensorflow.keras.applications import ResNet50, EfficientNetB0
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import LabelEncoder
Epoch 1/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 88s 1s/step - accuracy: 0.8703 - loss: 0.3547 - val_accuracy: 0.9969 - val_loss: 0.0095
Epoch 2/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 28s 285ms/step - accuracy: 0.9995 - loss: 0.0039 - val_accuracy: 0.9985 - val_loss: 0.0063
Epoch 3/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 21s 288ms/step - accuracy: 1.0000 - loss: 0.0012 - val_accuracy: 0.9985 - val_loss: 0.0062
Epoch 4/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 273ms/step - accuracy: 1.0000 - loss: 5.9944e-04 - val_accuracy: 0.9985 - val_loss: 0.0044
Epoch 5/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 21s 284ms/step - accuracy: 1.0000 - loss: 4.2384e-04 - val_accuracy: 0.9985 - val_loss: 0.0038
Epoch 6/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 276ms/step - accuracy: 1.0000 - loss: 3.1228e-04 - val_accuracy: 0.9985 - val_loss: 0.0035
Epoch 7/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 21s 287ms/step - accuracy: 1.0000 - loss: 2.5735e-04 - val_accuracy: 0.9985 - val_loss: 0.0034
Epoch 8/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 12s 287ms/step - accuracy: 1.0000 - loss: 2.2545e-04 - val_accuracy: 0.9985 - val_loss: 0.0032
Epoch 9/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 272ms/step - accuracy: 1.0000 - loss: 2.1402e-04 - val_accuracy: 0.9985 - val_loss: 0.0031
Epoch 10/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 21s 286ms/step - accuracy: 1.0000 - loss: 1.7322e-04 - val_accuracy: 0.9985 - val_loss: 0.0030
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 12/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
Epoch 11/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 276ms/step - accuracy: 1.0000 - loss: 1.6765e-04 - val_accuracy: 0.9985 - val_loss: 0.0028
Epoch 12/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 275ms/step - accuracy: 1.0000 - loss: 8.6853e-05 - val_accuracy: 0.9985 - val_loss: 0.0028
Epoch 13/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 12s 287ms/step - accuracy: 1.0000 - loss: 9.8932e-05 - val_accuracy: 0.9985 - val_loss: 0.0026
Epoch 14/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 272ms/step - accuracy: 1.0000 - loss: 9.2357e-05 - val_accuracy: 0.9985 - val_loss: 0.0026
Epoch 15/15
41/41 ━━━━━━━━━━━━━━━━━━━━ 20s 272ms/step - accuracy: 1.0000 - loss: 8.0390e-05 - val_accuracy: 0.9985 - val_loss: 0.0024
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is c
Model saved!
keyboard_arrow_down VIT
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import os
import cv2
import numpy as np
def load_images_from_folder(folder):
images = []
labels = []
for class_folder in os.listdir(folder):
class_path = os.path.join(folder, class_folder)
if os.path.isdir(class_path):
for filename in os.listdir(class_path):
img_path = os.path.join(class_path, filename)
image = cv2.imread(img_path)
# Resize image to desired dimensions
image = cv2.resize(image, (224, 224))
images.append(image)
labels.append(class_folder) # Assign label based on folder name
return np.array(images), np.array(labels)
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 13/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
Found existing installation: nvidia curand cu12 10.3.6.82
Uninstalling nvidia-curand-cu12-10.3.6.82:
Successfully uninstalled nvidia-curand-cu12-10.3.6.82
Attempting uninstall: nvidia-cufft-cu12
Found existing installation: nvidia-cufft-cu12 11.2.3.61
Uninstalling nvidia-cufft-cu12-11.2.3.61:
Successfully uninstalled nvidia-cufft-cu12-11.2.3.61
Attempting uninstall: nvidia-cuda-runtime-cu12
Found existing installation: nvidia-cuda-runtime-cu12 12.5.82
Uninstalling nvidia-cuda-runtime-cu12-12.5.82:
Successfully uninstalled nvidia-cuda-runtime-cu12-12.5.82
Attempting uninstall: nvidia-cuda-nvrtc-cu12
Found existing installation: nvidia-cuda-nvrtc-cu12 12.5.82
Uninstalling nvidia-cuda-nvrtc-cu12-12.5.82:
Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.5.82
Attempting uninstall: nvidia-cuda-cupti-cu12
Found existing installation: nvidia-cuda-cupti-cu12 12.5.82
Uninstalling nvidia-cuda-cupti-cu12-12.5.82:
Successfully uninstalled nvidia-cuda-cupti-cu12-12.5.82
Attempting uninstall: nvidia-cublas-cu12
Found existing installation: nvidia-cublas-cu12 12.5.3.2
Uninstalling nvidia-cublas-cu12-12.5.3.2:
Successfully uninstalled nvidia-cublas-cu12-12.5.3.2
Attempting uninstall: nvidia-cusparse-cu12
Found existing installation: nvidia-cusparse-cu12 12.5.1.3
Uninstalling nvidia-cusparse-cu12-12.5.1.3:
Successfully uninstalled nvidia-cusparse-cu12-12.5.1.3
Attempting uninstall: nvidia-cudnn-cu12
Found existing installation: nvidia-cudnn-cu12 9.3.0.75
Uninstalling nvidia-cudnn-cu12-9.3.0.75:
Successfully uninstalled nvidia-cudnn-cu12-9.3.0.75
Attempting uninstall: nvidia-cusolver-cu12
Found existing installation: nvidia-cusolver-cu12 11.6.3.83
Uninstalling nvidia-cusolver-cu12-11.6.3.83:
Successfully uninstalled nvidia-cusolver-cu12-11.6.3.83
Successfully installed nvidia-cublas-cu12-12.4.5.8 nvidia-cuda-cupti-cu12-12.4.127 nvidia-cuda-nvrtc-cu12-12.4.127 nvidia-cuda-ru
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import ViTForImageClassification, ViTFeatureExtractor, AdamW
from sklearn.metrics import accuracy_score
def __len__(self):
return len(self.images)
# Preprocess Data
def preprocess_data(images, labels):
images = feature_extractor(images=list(images), return_tensors="pt")["pixel_values"]
label_mapping = {label: idx for idx, label in enumerate(np.unique(labels))}
labels = np.array([label_mapping[label] for label in labels])
return images, labels, label_mapping
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 14/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
# Set Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Training Loop
epochs = 5
for epoch in range(epochs):
model.train()
train_loss = 0
correct = 0
total = 0
for batch in train_loader:
optimizer.zero_grad()
pixel_values = batch["pixel_values"].to(device)
labels = batch["labels"].to(device)
outputs = model(pixel_values)
loss = criterion(outputs.logits, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, preds = torch.max(outputs.logits, dim=1)
correct += (preds == labels).sum().item()
total += labels.size(0)
# Evaluation Loop
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch in test_loader:
pixel_values = batch["pixel_values"].to(device)
labels = batch["labels"].to(device)
outputs = model(pixel_values)
_, preds = torch.max(outputs.logits, dim=1)
correct += (preds == labels).sum().item()
total += labels.size(0)
# Save Model
model.save_pretrained("./vit_model")
feature_extractor.save_pretrained("./vit_model")
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 15/16
2/12/25, 9:42 PM DL_PROJECT-2.ipynb - Colab
/usr/local/lib/python3.11/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning:
from sklearn.metrics import does
The secret `HF_TOKEN` classification_report, confusion_matrix,
not exist in your Colab secrets. roc_curve, auc
import
Tonumpy as np
authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as s
You
import will be able to as
matplotlib.pyplot reuse
plt this secret in all of your notebooks.
Please
import note
seaborn as that
sns authentication is recommended but still optional to access public models or datasets.
warnings.warn(
# Get preprocessor_config.json:
Predictions and True100% Labels 160/160 [00:00<00:00, 11.6kB/s]
true_labels = []
/usr/local/lib/python3.11/dist-packages/transformers/models/vit/feature_extraction_vit.py:28: FutureWarning: The class ViTFeatureExt
pred_labels = []
warnings.warn(
pred_probs = []
config.json: 100% 502/502 [00:00<00:00, 30.9kB/s]
model.eval()
model.safetensors: 100% 346M/346M [00:01<00:00, 239MB/s]
with torch.no_grad():
Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and ar
for batch in test_loader:
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
pixel_values = batch["pixel_values"].to(device)
/usr/local/lib/python3.11/dist-packages/transformers/optimization.py:591: FutureWarning: This implementation of AdamW is deprecated
labels = batch["labels"].to(device)
warnings.warn(
outputs = model(pixel_values)
<ipython-input-4-8708d9259d53>:17: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detac
probs = torch.nn.functional.softmax(outputs.logits,
"pixel_values": dim=1)
torch.tensor(self.images[idx], dtype=torch.float32),
_, preds
Epoch = torch.max(probs,
1/5, Loss: dim=1)
40.7884, Accuracy: 97.10%
Epoch 2/5, Loss: 6.0971, Accuracy: 100.00%
Epoch 3/5, Loss: 3.5657, Accuracy: 100.00%
true_labels.extend(labels.cpu().numpy())
Epoch 4/5, Loss: 2.3541, Accuracy: 100.00%
pred_labels.extend(preds.cpu().numpy())
Epoch 5/5, Loss: 1.6590, Accuracy: 100.00%
pred_probs.extend(probs.cpu().numpy())
Test Accuracy: 100.00%
['./vit_model/preprocessor_config.json']
# Classification Report
print("Classification Report:")
print(classification_report(true_labels, pred_labels, target_names=label_mapping.keys()))
# Confusion Matrix
cm = confusion_matrix(true_labels, pred_labels)
plt.figure(figsize=(8,6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=label_mapping.keys(), yticklabels=label_mapping.keys())
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Confusion Matrix")
plt.show()
https://colab.research.google.com/drive/14_oaNkbOYMNPK7Q0zpR7n715elYBTwUx#scrollTo=MjeNQmYiVqre&printMode=true 16/16