Skin Disease Detection Using Transformers
Skin Disease Detection Using Transformers
libraries installed
# It is defined by the kaggle/python Docker image:
https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
# List of directories
directories = [Eczema_dir, Warts_Molluscum_dir, Atopic_Dermatitis_dir,
Melanocytic_Nevi_dir, Psoriasis_dir,
Seborrheic_Keratoses_dir,
Tinea_Ringworm_dir]
# Create dataframe with file paths and labels
filepaths, labels = [], []
for i, directory in enumerate(directories):
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
filepaths.append(filepath)
labels.append(i) # Use numerical labels
# Define transforms
train_transforms = transforms.Compose([
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.2),
transforms.RandomRotation(30),
transforms.ColorJitter(brightness=0.2, contrast=0.2,
saturation=0.2, hue=0.1),
transforms.RandomAffine(degrees=0, translate=(0.1, 0.1),
scale=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,
0.224, 0.225]),
transforms.RandomErasing(p=0.2)
])
val_test_transforms = transforms.Compose([
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,
0.224, 0.225])
])
# Create custom dataset class
class SkinDiseaseDataset(Dataset):
def __init__(self, dataframe, transform=None):
self.dataframe = dataframe
self.transform = transform
def __len__(self):
return len(self.dataframe)
# Create datasets
train_dataset = SkinDiseaseDataset(train_df,
transform=train_transforms)
val_dataset = SkinDiseaseDataset(val_df,
transform=val_test_transforms)
test_dataset = SkinDiseaseDataset(test_df,
transform=val_test_transforms)
# Create dataloaders
num_workers = 4
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True,
num_workers=num_workers, pin_memory=True,
persistent_workers=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,
shuffle=False,
num_workers=num_workers, pin_memory=True,
persistent_workers=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=False,
num_workers=num_workers, pin_memory=True)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Training function
def train_model(model, train_loader, val_loader, criterion, optimizer,
scheduler, num_epochs):
train_losses, val_losses = [], []
train_accs, val_accs = [], []
best_val_acc = 0.0
best_model_path = 'best_deit_skin_model.pth'
# Validation phase
model.eval()
running_loss, correct, total = 0.0, 0, 0
with torch.no_grad():
val_bar = tqdm(val_loader, desc=f'Epoch
{epoch+1}/{num_epochs} [Val]')
for inputs, labels in val_bar:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
val_bar.set_postfix(loss=loss.item(),
acc=100.*correct/total)
scheduler.step()
train_losses.append(epoch_train_loss)
val_losses.append(epoch_val_loss)
train_accs.append(epoch_train_acc)
val_accs.append(epoch_val_acc)
print(f'\nEpoch {epoch+1}/{num_epochs}:')
print(f'Train Loss: {epoch_train_loss:.4f}, Train Acc:
{epoch_train_acc:.2f}%')
print(f'Val Loss: {epoch_val_loss:.4f}, Val Acc:
{epoch_val_acc:.2f}%')
print(f'Learning rate: {scheduler.get_last_lr()[0]:.6f}')
model.load_state_dict(torch.load(best_model_path))
return model, train_losses, val_losses, train_accs, val_accs
plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Acc')
plt.plot(val_accs, label='Val Acc')
plt.title('Accuracy')
plt.legend()
plt.tight_layout()
plt.show()
cm = confusion_matrix(all_labels, all_preds)
plt.figure(figsize=(14, 10))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=class_labels, yticklabels=class_labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.tight_layout()
plt.show()
images_cpu = images.cpu()
mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
images_display = images_cpu * std + mean
images_display = torch.clamp(images_display, 0, 1)
plt.figure(figsize=(20, 20))
for i in range(num_images):
plt.subplot(5, 2, i+1)
plt.imshow(images_display[i].permute(1, 2, 0))
true_label = class_labels[labels[i]]
pred_label = class_labels[preds[i]]
confidence = probs[i, preds[i]] * 100
title_color = "green" if preds[i] == labels[i] else "red"
plt.title(f"True: {true_label}\nPred: {pred_label}\nConf:
{confidence:.1f}%",
color=title_color, fontsize=12)
plt.axis('off')
plt.tight_layout()
plt.show()
{"model_id":"2167c72085894f90af8700ee09574af5","version_major":2,"vers
ion_minor":0}
Epoch 1/25:
Train Loss: 1.1584, Train Acc: 62.96%
Val Loss: 1.0298, Val Acc: 71.85%
Learning rate: 0.000010
New best model! Val Acc: 71.85%
Validation loss decreased (inf --> 1.029822). Saving model...
Epoch 2/25:
Train Loss: 0.9978, Train Acc: 72.54%
Val Loss: 0.9596, Val Acc: 74.63%
Learning rate: 0.000009
New best model! Val Acc: 74.63%
Validation loss decreased (1.029822 --> 0.959599). Saving model...
Epoch 3/25:
Train Loss: 0.9285, Train Acc: 76.45%
Val Loss: 0.9297, Val Acc: 77.29%
Learning rate: 0.000008
New best model! Val Acc: 77.29%
Validation loss decreased (0.959599 --> 0.929710). Saving model...
Epoch 4/25:
Train Loss: 0.8770, Train Acc: 79.21%
Val Loss: 0.8981, Val Acc: 78.09%
Learning rate: 0.000007
New best model! Val Acc: 78.09%
Validation loss decreased (0.929710 --> 0.898150). Saving model...
Epoch 5/25:
Train Loss: 0.8433, Train Acc: 81.21%
Val Loss: 0.8846, Val Acc: 79.73%
Learning rate: 0.000005
New best model! Val Acc: 79.73%
Validation loss decreased (0.898150 --> 0.884619). Saving model...
Epoch 6/25:
Train Loss: 0.8084, Train Acc: 83.07%
Val Loss: 0.8753, Val Acc: 80.03%
Learning rate: 0.000004
New best model! Val Acc: 80.03%
Validation loss decreased (0.884619 --> 0.875315). Saving model...
Epoch 7/25:
Train Loss: 0.7830, Train Acc: 84.47%
Val Loss: 0.8654, Val Acc: 80.24%
Learning rate: 0.000003
New best model! Val Acc: 80.24%
Validation loss decreased (0.875315 --> 0.865395). Saving model...
Epoch 8/25 [Train]: 100%|██████████| 421/421 [02:39<00:00, 2.63it/s,
acc=85.5, loss=0.988]
Epoch 8/25 [Val]: 100%|██████████| 75/75 [00:10<00:00, 7.12it/s,
acc=80.9, loss=1.43]
Epoch 8/25:
Train Loss: 0.7629, Train Acc: 85.54%
Val Loss: 0.8615, Val Acc: 80.91%
Learning rate: 0.000002
New best model! Val Acc: 80.91%
Validation loss decreased (0.865395 --> 0.861548). Saving model...
Epoch 9/25:
Train Loss: 0.7460, Train Acc: 86.48%
Val Loss: 0.8501, Val Acc: 81.42%
Learning rate: 0.000001
New best model! Val Acc: 81.42%
Validation loss decreased (0.861548 --> 0.850144). Saving model...
Epoch 10/25:
Train Loss: 0.7394, Train Acc: 86.77%
Val Loss: 0.8483, Val Acc: 81.75%
Learning rate: 0.000001
New best model! Val Acc: 81.75%
Validation loss decreased (0.850144 --> 0.848337). Saving model...
Epoch 11/25:
Train Loss: 0.7318, Train Acc: 87.10%
Val Loss: 0.8461, Val Acc: 81.80%
Learning rate: 0.000001
New best model! Val Acc: 81.80%
Validation loss decreased (0.848337 --> 0.846072). Saving model...
Epoch 12/25 [Train]: 100%|██████████| 421/421 [02:40<00:00, 2.63it/s,
acc=87.4, loss=0.528]
Epoch 12/25 [Val]: 100%|██████████| 75/75 [00:11<00:00, 6.53it/s,
acc=81.5, loss=1.39]
Epoch 12/25:
Train Loss: 0.7276, Train Acc: 87.38%
Val Loss: 0.8438, Val Acc: 81.50%
Learning rate: 0.000002
Validation loss decreased (0.846072 --> 0.843824). Saving model...
Epoch 13/25:
Train Loss: 0.7273, Train Acc: 86.92%
Val Loss: 0.8407, Val Acc: 82.05%
Learning rate: 0.000003
New best model! Val Acc: 82.05%
Validation loss decreased (0.843824 --> 0.840723). Saving model...
Epoch 14/25:
Train Loss: 0.7229, Train Acc: 87.30%
Val Loss: 0.8413, Val Acc: 82.22%
Learning rate: 0.000004
New best model! Val Acc: 82.22%
EarlyStopping counter: 1 out of 8
Epoch 15/25:
Train Loss: 0.7169, Train Acc: 87.86%
Val Loss: 0.8452, Val Acc: 81.67%
Learning rate: 0.000006
EarlyStopping counter: 2 out of 8
Epoch 16/25 [Train]: 100%|██████████| 421/421 [02:40<00:00, 2.63it/s,
acc=88.4, loss=0.611]
Epoch 16/25 [Val]: 100%|██████████| 75/75 [00:10<00:00, 7.19it/s,
acc=82.4, loss=1.6]
Epoch 16/25:
Train Loss: 0.7036, Train Acc: 88.38%
Val Loss: 0.8389, Val Acc: 82.43%
Learning rate: 0.000007
New best model! Val Acc: 82.43%
Validation loss decreased (0.840723 --> 0.838917). Saving model...
Epoch 17/25:
Train Loss: 0.6980, Train Acc: 88.92%
Val Loss: 0.8315, Val Acc: 82.81%
Learning rate: 0.000008
New best model! Val Acc: 82.81%
Validation loss decreased (0.838917 --> 0.831526). Saving model...
Epoch 18/25:
Train Loss: 0.6882, Train Acc: 89.39%
Val Loss: 0.8377, Val Acc: 82.68%
Learning rate: 0.000009
EarlyStopping counter: 1 out of 8
Epoch 19/25:
Train Loss: 0.6723, Train Acc: 89.98%
Val Loss: 0.8220, Val Acc: 83.14%
Learning rate: 0.000010
New best model! Val Acc: 83.14%
Validation loss decreased (0.831526 --> 0.822037). Saving model...
Epoch 20/25 [Train]: 100%|██████████| 421/421 [02:39<00:00, 2.63it/s,
acc=90.7, loss=0.489]
Epoch 20/25 [Val]: 100%|██████████| 75/75 [00:10<00:00, 6.99it/s,
acc=83.7, loss=1.24]
Epoch 20/25:
Train Loss: 0.6603, Train Acc: 90.65%
Val Loss: 0.8140, Val Acc: 83.73%
Learning rate: 0.000010
New best model! Val Acc: 83.73%
Validation loss decreased (0.822037 --> 0.814007). Saving model...
Epoch 21/25:
Train Loss: 0.6399, Train Acc: 91.83%
Val Loss: 0.8170, Val Acc: 83.99%
Learning rate: 0.000010
New best model! Val Acc: 83.99%
EarlyStopping counter: 1 out of 8
Epoch 22/25:
Train Loss: 0.6229, Train Acc: 92.86%
Val Loss: 0.8256, Val Acc: 83.94%
Learning rate: 0.000009
EarlyStopping counter: 2 out of 8
Epoch 23/25:
Train Loss: 0.6058, Train Acc: 93.53%
Val Loss: 0.8290, Val Acc: 83.82%
Learning rate: 0.000008
EarlyStopping counter: 3 out of 8
Epoch 24/25 [Train]: 100%|██████████| 421/421 [02:40<00:00, 2.62it/s,
acc=94, loss=0.453]
Epoch 24/25 [Val]: 100%|██████████| 75/75 [00:11<00:00, 6.37it/s,
acc=84.1, loss=1.74]
Epoch 24/25:
Train Loss: 0.5942, Train Acc: 93.98%
Val Loss: 0.8192, Val Acc: 84.07%
Learning rate: 0.000007
New best model! Val Acc: 84.07%
EarlyStopping counter: 4 out of 8
Epoch 25/25:
Train Loss: 0.5767, Train Acc: 95.28%
Val Loss: 0.8198, Val Acc: 84.75%
Learning rate: 0.000006
New best model! Val Acc: 84.75%
EarlyStopping counter: 5 out of 8
Evaluating: 100%|██████████| 88/88 [00:14<00:00, 5.96it/s]
Classification Report:
precision
recall f1-score support
Eczema 0.75
0.64 0.69 252
Warts Molluscum and other Viral Infections 0.68
0.83 0.75 315
Atopic Dermatitis 0.64
0.66 0.65 189
Melanocytic Nevi 0.99
1.00 1.00 1196
Psoriasis pictures Lichen Planus and related diseases 0.69
0.70 0.70 308
Seborrheic Keratoses and other Benign Tumors 0.90
0.71 0.79 277
Tinea Ringworm Candidiasis and other Fungal Infections 0.76
0.78 0.77 255
accuracy
0.84 2792
macro avg 0.77
0.76 0.76 2792
weighted avg 0.85
0.84 0.84 2792
Eczema: 64.29%
Warts Molluscum and other Viral Infections: 83.17%
Atopic Dermatitis: 66.14%
Melanocytic Nevi: 99.75%
Psoriasis pictures Lichen Planus and related diseases: 69.81%
Seborrheic Keratoses and other Benign Tumors: 71.12%
Tinea Ringworm Candidiasis and other Fungal Infections: 78.43%
Model saved as 'deit_skin_disease_classifier.pth'
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
# List of directories
directories = [Eczema_dir, Warts_Molluscum_dir, Atopic_Dermatitis_dir,
Melanocytic_Nevi_dir, Psoriasis_dir,
Seborrheic_Keratoses_dir,
Tinea_Ringworm_dir]
# Define transforms
train_transforms = transforms.Compose([
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.2),
transforms.RandomRotation(30),
transforms.ColorJitter(brightness=0.2, contrast=0.2,
saturation=0.2, hue=0.1),
transforms.RandomAffine(degrees=0, translate=(0.1, 0.1),
scale=(0.9, 1.1)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,
0.224, 0.225]),
transforms.RandomErasing(p=0.2)
])
val_test_transforms = transforms.Compose([
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,
0.224, 0.225])
])
def __len__(self):
return len(self.dataframe)
# Create datasets
train_dataset = SkinDiseaseDataset(train_df,
transform=train_transforms)
val_dataset = SkinDiseaseDataset(val_df,
transform=val_test_transforms)
test_dataset = SkinDiseaseDataset(test_df,
transform=val_test_transforms)
# Create dataloaders
num_workers = 4
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True,
num_workers=num_workers, pin_memory=True,
persistent_workers=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE,
shuffle=False,
num_workers=num_workers, pin_memory=True,
persistent_workers=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=False,
num_workers=num_workers, pin_memory=True)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Define optimizer.
# Note: Optionally you can adjust differential learning rates for
transformer layers if needed.
optimizer = optim.AdamW(model.parameters(), lr=1e-4,
weight_decay=0.01)
# Training function
def train_model(model, train_loader, val_loader, criterion, optimizer,
scheduler, num_epochs):
train_losses, val_losses = [], []
train_accs, val_accs = [], []
best_val_acc = 0.0
best_model_path = 'best_vit_skin_model.pth'
# Validation phase
model.eval()
running_loss, correct, total = 0.0, 0, 0
with torch.no_grad():
val_bar = tqdm(val_loader, desc=f'Epoch
{epoch+1}/{num_epochs} [Val]')
for inputs, labels in val_bar:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
val_bar.set_postfix(loss=loss.item(),
acc=100.*correct/total)
scheduler.step()
train_losses.append(epoch_train_loss)
val_losses.append(epoch_val_loss)
train_accs.append(epoch_train_acc)
val_accs.append(epoch_val_acc)
print(f'\nEpoch {epoch+1}/{num_epochs}:')
print(f'Train Loss: {epoch_train_loss:.4f}, Train Acc:
{epoch_train_acc:.2f}%')
print(f'Val Loss: {epoch_val_loss:.4f}, Val Acc:
{epoch_val_acc:.2f}%')
print(f'Learning rate: {scheduler.get_last_lr()[0]:.6f}')
model.load_state_dict(torch.load(best_model_path))
return model, train_losses, val_losses, train_accs, val_accs
plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Acc')
plt.plot(val_accs, label='Val Acc')
plt.title('Accuracy')
plt.legend()
plt.tight_layout()
plt.show()
cm = confusion_matrix(all_labels, all_preds)
plt.figure(figsize=(14, 10))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=class_labels, yticklabels=class_labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.tight_layout()
plt.show()
images_cpu = images.cpu()
mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
images_display = images_cpu * std + mean
images_display = torch.clamp(images_display, 0, 1)
plt.figure(figsize=(20, 20))
for i in range(num_images):
plt.subplot(5, 2, i+1)
plt.imshow(images_display[i].permute(1, 2, 0))
true_label = class_labels[labels[i]]
pred_label = class_labels[preds[i]]
confidence = probs[i, preds[i]] * 100
title_color = "green" if preds[i] == labels[i] else "red"
plt.title(f"True: {true_label}\nPred: {pred_label}\nConf:
{confidence:.1f}%",
color=title_color, fontsize=12)
plt.axis('off')
plt.tight_layout()
plt.show()
{"model_id":"b39a8a16e43f4e4089fd491258aa2fb3","version_major":2,"vers
ion_minor":0}
Epoch 1/25:
Train Loss: 1.1789, Train Acc: 62.90%
Val Loss: 1.1543, Val Acc: 63.51%
Learning rate: 0.000098
New best model! Val Acc: 63.51%
Validation loss decreased (inf --> 1.154347). Saving model...
Epoch 2/25:
Train Loss: 1.0619, Train Acc: 68.71%
Val Loss: 1.0125, Val Acc: 71.85%
Learning rate: 0.000091
New best model! Val Acc: 71.85%
Validation loss decreased (1.154347 --> 1.012527). Saving model...
Epoch 3/25:
Train Loss: 0.9868, Train Acc: 73.02%
Val Loss: 0.9710, Val Acc: 74.55%
Learning rate: 0.000080
New best model! Val Acc: 74.55%
Validation loss decreased (1.012527 --> 0.970966). Saving model...
Epoch 4/25:
Train Loss: 0.9065, Train Acc: 77.42%
Val Loss: 0.9046, Val Acc: 78.38%
Learning rate: 0.000066
New best model! Val Acc: 78.38%
Validation loss decreased (0.970966 --> 0.904570). Saving model...
Epoch 5/25:
Train Loss: 0.8360, Train Acc: 81.00%
Val Loss: 0.8912, Val Acc: 79.48%
Learning rate: 0.000051
New best model! Val Acc: 79.48%
Validation loss decreased (0.904570 --> 0.891229). Saving model...
Epoch 6/25:
Train Loss: 0.7546, Train Acc: 85.39%
Val Loss: 0.8336, Val Acc: 81.63%
Learning rate: 0.000035
New best model! Val Acc: 81.63%
Validation loss decreased (0.891229 --> 0.833577). Saving model...
Epoch 7/25:
Train Loss: 0.6717, Train Acc: 89.63%
Val Loss: 0.8331, Val Acc: 83.57%
Learning rate: 0.000021
New best model! Val Acc: 83.57%
Validation loss decreased (0.833577 --> 0.833127). Saving model...
Epoch 8/25:
Train Loss: 0.6061, Train Acc: 92.81%
Val Loss: 0.8093, Val Acc: 85.12%
Learning rate: 0.000010
New best model! Val Acc: 85.12%
Validation loss decreased (0.833127 --> 0.809258). Saving model...
Epoch 9/25:
Train Loss: 0.5584, Train Acc: 95.23%
Val Loss: 0.8282, Val Acc: 85.04%
Learning rate: 0.000003
EarlyStopping counter: 1 out of 8
Epoch 10/25:
Train Loss: 0.5340, Train Acc: 96.31%
Val Loss: 0.8276, Val Acc: 85.21%
Learning rate: 0.000001
New best model! Val Acc: 85.21%
EarlyStopping counter: 2 out of 8
Epoch 11/25:
Train Loss: 0.5208, Train Acc: 96.88%
Val Loss: 0.8307, Val Acc: 85.76%
Learning rate: 0.000003
New best model! Val Acc: 85.76%
EarlyStopping counter: 3 out of 8
Epoch 12/25:
Train Loss: 0.5234, Train Acc: 96.67%
Val Loss: 0.8347, Val Acc: 85.21%
Learning rate: 0.000010
EarlyStopping counter: 4 out of 8
Epoch 13/25:
Train Loss: 0.5251, Train Acc: 96.59%
Val Loss: 0.8276, Val Acc: 85.63%
Learning rate: 0.000021
EarlyStopping counter: 5 out of 8
Epoch 14/25:
Train Loss: 0.5366, Train Acc: 96.05%
Val Loss: 0.8540, Val Acc: 85.25%
Learning rate: 0.000035
EarlyStopping counter: 6 out of 8
Epoch 15/25:
Train Loss: 0.5663, Train Acc: 94.65%
Val Loss: 0.8607, Val Acc: 83.99%
Learning rate: 0.000051
EarlyStopping counter: 7 out of 8
Epoch 16/25:
Train Loss: 0.5916, Train Acc: 93.33%
Val Loss: 0.8797, Val Acc: 83.23%
Learning rate: 0.000066
EarlyStopping counter: 8 out of 8
Early stopping triggered
Evaluating: 100%|██████████| 88/88 [00:30<00:00, 2.92it/s]
Classification Report:
precision
recall f1-score support
Eczema 0.76
0.72 0.74 252
Warts Molluscum and other Viral Infections 0.77
0.80 0.78 315
Atopic Dermatitis 0.66
0.68 0.67 189
Melanocytic Nevi 0.99
1.00 0.99 1196
Psoriasis pictures Lichen Planus and related diseases 0.71
0.71 0.71 308
Seborrheic Keratoses and other Benign Tumors 0.84
0.79 0.81 277
Tinea Ringworm Candidiasis and other Fungal Infections 0.76
0.76 0.76 255
accuracy
0.86 2792
macro avg 0.78
0.78 0.78 2792
weighted avg 0.85
0.86 0.85 2792
Eczema: 71.83%
Warts Molluscum and other Viral Infections: 79.68%
Atopic Dermatitis: 67.72%
Melanocytic Nevi: 100.00%
Psoriasis pictures Lichen Planus and related diseases: 71.43%
Seborrheic Keratoses and other Benign Tumors: 78.70%
Tinea Ringworm Candidiasis and other Fungal Infections: 76.08%
Model saved as 'vit_skin_disease_classifier.pth'