Notebook - Agave Plant Maturation Model Inference and Testing
Notebook - Agave Plant Maturation Model Inference and Testing
def _load_images(self):
images = []
for cls_name in self.classes:
class_dir = os.path.join(self.root_dir, cls_name)
for img_name in os.listdir(class_dir):
images.append((os.path.join(class_dir, img_name), self.class
return images
def __len__(self):
return len(self.images)
if self.transform:
image = self.transform(image)
all_preds = []
all_labels = []
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = outputs.max(1)
all_preds.extend(predicted.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
In [ ]: # Perform evaluation
test_preds, test_labels = evaluate_model(model, test_loader)
# Download videos
video_dir = 'videos'
os.makedirs(video_dir, exist_ok=True)
video_paths = [download_video(url, video_dir) for url in video_urls]
def _load_images(self):
images = []
for cls_name in self.classes:
class_dir = os.path.join(self.root_dir, cls_name)
for img_name in os.listdir(class_dir):
images.append((os.path.join(class_dir, img_name), self.class
return images
def __len__(self):
return len(self.images)
if self.transform:
image = self.transform(image)
val_test_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.22
])
In [ ]: # Training function
def train_model(model, criterion, optimizer, train_loader, val_loader, num_e
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
# Validation phase
model.eval()
val_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in val_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
val_loss /= len(val_loader)
val_acc = correct / total
val_losses.append(val_loss)
val_accs.append(val_acc)
print(f'Epoch {epoch+1}/{num_epochs}:')
print(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}')
print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}')
plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Acc')
plt.plot(val_accs, label='Val Acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.title('Training and Validation Accuracy')
plt.show()
all_preds = []
all_labels = []
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = outputs.max(1)
all_preds.extend(predicted.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
return all_preds, all_labels
cm = confusion_matrix(test_labels, test_preds)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.show()