0% found this document useful (0 votes)
2 views

programing_of_ai

The document outlines a project on programming for artificial intelligence, specifically focusing on Parkinson's disease detection using convolutional neural networks (CNN). It includes tasks such as image processing, model building, training, and evaluation, along with a detailed description of the code used for data preparation and model training. Additionally, it covers MRI image preprocessing techniques for deep learning applications.

Uploaded by

huzaifaazeem48
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

programing_of_ai

The document outlines a project on programming for artificial intelligence, specifically focusing on Parkinson's disease detection using convolutional neural networks (CNN). It includes tasks such as image processing, model building, training, and evaluation, along with a detailed description of the code used for data preparation and model training. Additionally, it covers MRI image preprocessing techniques for deep learning applications.

Uploaded by

huzaifaazeem48
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 18

Programming for Artificial Intelligence.

Submitted by
M.Huzaifa(2022-cs-438)

Under the Guidance of

Sir Abdul Jaleel.

Department of Computer Science.

In Partial Fulfillment of the


Requirements for the Award of
Degree of

Bachelor of Technology

DEPARTMENT OF COMPUTER SCIENCE.


UNIVERSITY OF ENGINEERING AND
TECHNOLOGY,LAHORE(jan-apr) 2025
Task 1

# Importing libraries for image processing and plotting

from keras.preprocessing.image import load_img, img_to_array # load_img loads an image file;


img_to_array converts it into a numerical array format

import os # Provides functions to interact with the operating system (like reading file directories)import
matplotlib.pyplot as plt # Library for creating visualizations and plots

# Set the style for all plots to a dark background for better contrast

plt.style.use('dark_background')

# Visualizing Healthy Spiral Images

# ----------------------------

plt.figure(figsize=(12,12)) # Create a new figure with a width and height of 12 inches each

for i in range(1, 10): # Loop over image indices from 1 to 9 (assuming indexing starts at 0; adjust as
needed)

plt.subplot(3, 3, i) # Create a subplot in a 3x3 grid and select the i-th subplot for plotting the image

# Construct the path for the i-th healthy spiral image by combining the directory path and the list of file
names

img = load_img(

"../input/parkinsons-drawings/spiral/training/healthy/" +

os.listdir("../input/parkinsons-drawings/spiral/training/healthy")[i]

plt.imshow(img) # Display the loaded image in the current subplot

plt.show() # Render and display the entire figure with all 9 images
# ----------------------------

# Visualizing Parkinson Spiral Images

# ----------------------------

plt.figure(figsize=(12,12)) # Create a new figure for spiral images of Parkinson patients

for i in range(1, 10): # Loop over the first 9 images

plt.subplot(3, 3, i) # Create a subplot grid of 3 rows and 3 columns, positioning each image
accordingly

# Construct the path for the i-th Parkinson spiral image from the appropriate folder

img = load_img(

"../input/parkinsons-drawings/spiral/training/parkinson/" +

os.listdir("../input/parkinsons-drawings/spiral/training/parkinson")[i]

plt.imshow(img) # Plot the image in the subplot

plt.show() # Show the figure containing the Parkinson spiral images

# ----------------------------

# Visualizing Healthy Wave Images

# ----------------------------

plt.figure(figsize=(12,12)) # Create a figure for healthy wave images

for i in range(1, 10): # Loop through indices 1 to 9 for healthy wave images

plt.subplot(3, 3, i) # Set up a 3x3 subplot grid

# Create the file path for each healthy wave image by concatenating the base directory and the specific
file name

img = load_img(

"../input/parkinsons-drawings/wave/training/healthy/" +
os.listdir("../input/parkinsons-drawings/wave/training/healthy")[i]

plt.imshow(img) # Display the image

plt.show() # Display the overall figure for healthy wave images

# ----------------------------

# Visualizing Parkinson Wave Images

# ----------------------------

plt.figure(figsize=(12,12)) # Create a figure for Parkinson wave images

for i in range(1, 10): # Loop over the first 9 images from the Parkinson wave folder

plt.subplot(3, 3, i) # Prepare the subplot in a 3x3 grid

# Build the full file path for the image from the Parkinson wave training directory

img = load_img(

"../input/parkinsons-drawings/wave/training/parkinson/" +

os.listdir("../input/parkinsons-drawings/wave/training/parkinson")[i]

plt.imshow(img) # Display the image on the subplot

plt.show() # Show all the subplots in one figure

# ----------------------------

# Importing CNN Layers and Model Classes from Keras

# ----------------------------

from keras.models import Sequential # Import the Sequential model type for stacking layers linearly
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense # Import various layers: Conv2D for
convolutions, MaxPooling2D for pooling, Flatten to convert image to vector, Dense for fully connected
layers

# ----------------------------

# Building the Classifier Model

# ----------------------------

classifier = Sequential() # Initialize a sequential model for the CNN

# Add the first convolutional layer with 32 filters, a 3x3 kernel, and ReLU activation function; specify the
input shape

classifier.add(Conv2D(32, (3,3), input_shape=(128, 128, 3), activation='relu'))

# Add a max pooling layer with a pool size of 2x2 to reduce spatial dimensions

classifier.add(MaxPooling2D(pool_size=(2,2)))

# Add a second convolutional layer with 32 filters and a 3x3 kernel; use ReLU activation

classifier.add(Conv2D(32, (3,3), activation='relu'))

# Add another max pooling layer with a 2x2 pool size

classifier.add(MaxPooling2D(pool_size=(2,2)))

# Flatten the 2D features from the convolutional layers into a 1D feature vector

classifier.add(Flatten())

# Add a fully connected (dense) layer with 128 units and ReLU activation

classifier.add(Dense(activation='relu', units=128))

# Add the output layer with 1 unit (for binary classification) using a sigmoid activation function

classifier.add(Dense(activation='sigmoid', units=1))

# ----------------------------
# Preparing Image Data Generation for Training and Testing

# ----------------------------

from keras.preprocessing.image import ImageDataGenerator # Import for real-time data augmentation

# Create an ImageDataGenerator for training that rescales pixel values and applies data augmentation
transformations

train_datagen = ImageDataGenerator(

rescale=1./255, # Scale image pixel values to be between 0 and 1

shear_range=0.2, # Randomly shear images

zoom_range=0.2, # Randomly zoom images

horizontal_flip=True # Randomly flip images horizontally

# Create an ImageDataGenerator for testing that only rescales the images

test_datagen = ImageDataGenerator(rescale=1./255)

# Set up the generator for spiral training images

spiral_train_generator = train_datagen.flow_from_directory(

'../input/parkinsons-drawings/spiral/training', # Directory for spiral training images

target_size=(128,128), # Resize all images to 128x128 pixels

batch_size=32, # Use batches of 32 images

class_mode='binary' # Indicate binary classification (healthy vs. parkinson)

# Set up the generator for spiral testing images


spiral_test_generator = test_datagen.flow_from_directory(

'../input/parkinsons-drawings/spiral/testing', # Directory for spiral testing images

target_size=(128,128),

batch_size=32,

class_mode='binary'

# Set up the generator for wave training images

wave_train_generator = train_datagen.flow_from_directory(

'../input/parkinsons-drawings/wave/training', # Directory for wave training images

target_size=(128,128),

batch_size=32,

class_mode='binary'

# Set up the generator for wave testing images

wave_test_generator = test_datagen.flow_from_directory(

'../input/parkinsons-drawings/wave/testing', # Directory for wave testing images

target_size=(128,128),

batch_size=32,

class_mode='binary'

# ----------------------------
# Fitting the Model using the Training Data

# ----------------------------

from keras.optimizers import Adam # Import the Adam optimizer

from keras.callbacks import EarlyStopping, ReduceLROnPlateau # Import callbacks for early stopping
and learning rate reduction

# Create an EarlyStopping callback to halt training when validation loss stops improving

early_stopping = EarlyStopping(

monitor='val_loss', # Monitor validation loss

min_delta=0, # Minimum change to qualify as an improvement

patience=3, # Number of epochs with no improvement after which training will be stopped

verbose=1, # Verbose output for the callback

restore_best_weights=True # Restore model weights from the epoch with the best value of the
monitored quantity

# Create a ReduceLROnPlateau callback to reduce the learning rate when a metric has stopped improving

reduce_learningrate = ReduceLROnPlateau(

monitor='val_loss', # Monitor validation loss

factor=0.2, # Factor by which the learning rate will be reduced (new_lr = lr * factor)

patience=3, # Number of epochs with no improvement before reducing the learning rate

verbose=1, # Verbose output for the callback

min_delta=0.0001 # Threshold for measuring the new optimum, to only focus on significant changes

)
# Group callbacks into a list to pass them together

callbacks_list = [early_stopping, reduce_learningrate]

# Define the number of training epochs

epochs = 48

# Compile the classifier with binary cross-entropy loss and Adam optimizer

classifier.compile(

loss='binary_crossentropy', # Loss function for binary classification

optimizer=Adam(lr=0.001), # Adam optimizer with a specified learning rate

metrics=['accuracy'] # Metric to monitor during training

# Fit the model using the spiral training images, validating with spiral testing images

history = classifier.fit_generator(

spiral_train_generator, # Training data generator

steps_per_epoch=spiral_train_generator.n // spiral_train_generator.batch_size, # Total steps per epoch

epochs=epochs, # Total number of epochs

validation_data=spiral_test_generator, # Validation data generator

validation_steps=spiral_test_generator.n // spiral_test_generator.batch_size, # Steps for validation

callbacks=callbacks_list # Callbacks for early stopping and learning rate reduction

# ----------------------------
# Plotting the Training Accuracy and Loss Curves

# ----------------------------

plt.style.use('dark_background') # Set the plot style again for consistency

plt.figure(figsize=(12,6)) # Create a figure for plotting training history

# Plot the Training Accuracy curve

plt.subplot(1,2,1) # Create a subplot for accuracy on the left side

plt.ylabel('Accuracy', fontsize=16) # Label the y-axis

plt.plot(history.history['accuracy'], label='Training Accuracy', color='green') # Plot accuracy with a green


line

plt.legend(loc='lower right') # Place the legend in the lower-right corner

# Plot the Training Loss curve

plt.subplot(1,2,2) # Create a subplot for loss on the right side

plt.ylabel('Loss', fontsize=16) # Label the y-axis

plt.plot(history.history['loss'], label='Training Loss', color='red') # Plot loss with a red line

plt.legend(loc='lower right') # Place the legend

plt.show() # Show the complete figure with both subplots

Task 2(github):

Parkinson’s Disease Detection Using CNN - Fully Commented Code

# Importing necessary libraries


import numpy as np # For numerical operations
import matplotlib.pyplot as plt # For plotting graphs and images
import os # For interacting with the operating system and file directories
import cv2 # OpenCV for image processing
from keras.utils import to_categorical # To convert labels to one-hot encoding
from keras.models import Sequential # For building the CNN model
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout # CNN layers

# Defining the data directory containing the dataset


data_path = '/content/drive/MyDrive/parkinsons_dataset'

# Creating two lists to store images and their labels


images = []
labels = []

# Loop through each folder in the dataset directory


for folder in os.listdir(data_path):
folder_path = os.path.join(data_path, folder) # Full path to current folder
label = int(folder.split('_')[0]) # Extract label from folder name (0 or 1)

# Loop through each image file in the current folder


for file in os.listdir(folder_path):
file_path = os.path.join(folder_path, file) # Full path to image file
image = cv2.imread(file_path) # Read the image using OpenCV
image = cv2.resize(image, (100, 100)) # Resize image to 100x100 pixels
images.append(image) # Add image to list
labels.append(label) # Add corresponding label

# Convert lists to numpy arrays for processing


images = np.array(images)
labels = np.array(labels)

# Normalize image pixel values to the range 0 to 1


images = images / 255.0

# Convert labels to categorical (one-hot encoded) for classification


labels = to_categorical(labels)

# Splitting dataset into training and testing sets


from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)

# Building the CNN model using Keras Sequential API


model = Sequential()

# First convolutional layer with 32 filters and ReLU activation


model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))

# Max pooling layer to reduce spatial dimensions


model.add(MaxPooling2D(pool_size=(2, 2)))

# Second convolutional layer with 64 filters


model.add(Conv2D(64, (3, 3), activation='relu'))
# Another max pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))

# Flatten the 2D features into 1D for the fully connected layer


model.add(Flatten())

# Fully connected (dense) layer with 128 neurons


model.add(Dense(128, activation='relu'))

# Dropout layer to reduce overfitting


model.add(Dropout(0.5))

# Output layer with 2 neurons (since we have 2 classes) and softmax activation
model.add(Dense(2, activation='softmax'))

# Compiling the model with loss function, optimizer, and evaluation metric
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Training the CNN model on training data


model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))

# Evaluate model performance on test set


loss, accuracy = model.evaluate(x_test, y_test)
print("Test Accuracy: {:.2f}%".format(accuracy * 100))
Task 3:

"MRI Image Preprocessing Pipeline for Deep Learning"

# MRI Image Preprocessing and Augmentation Pipeline

# Author: Huzaiifa

# Description: This script handles DICOM to PNG conversion, normalization, resizing, and augmentation
for MRI image datasets.

import os

import pydicom

import numpy as np

from PIL import Image

import random

# Function to convert DICOM files to PNG format

def convert_dicom_to_png(dicom_dir, png_dir):

if not os.path.exists(png_dir):

os.makedirs(png_dir)

for filename in os.listdir(dicom_dir):

if filename.lower().endswith(".dcm"):

dicom_path = os.path.join(dicom_dir, filename)

dicom_image = pydicom.dcmread(dicom_path)
image_array = dicom_image.pixel_array.astype(float)

# Normalize pixel values to 0-255 and convert to uint8

normalized_image = ((image_array - np.min(image_array)) / (np.max(image_array) -


np.min(image_array))) * 255.0

image_uint8 = normalized_image.astype(np.uint8)

# Convert array to PIL image and save as PNG

image = Image.fromarray(image_uint8)

output_path = os.path.join(png_dir, os.path.splitext(filename)[0] + ".png")

image.save(output_path)

# Function to normalize images to pixel range [0, 1]

def normalize_images(image_dir):

for filename in os.listdir(image_dir):

if filename.lower().endswith(('.png', '.jpg', '.jpeg')):

path = os.path.join(image_dir, filename)

image = Image.open(path).convert("L")

array = np.asarray(image).astype(np.float32) / 255.0

image = Image.fromarray((array * 255).astype(np.uint8))

image.save(path)

# Function to resize images to a fixed dimension

def resize_images(image_dir, output_size=(224, 224)):

for filename in os.listdir(image_dir):


if filename.lower().endswith(('.png', '.jpg', '.jpeg')):

path = os.path.join(image_dir, filename)

image = Image.open(path).convert("L")

image = image.resize(output_size)

image.save(path)

# Function to apply data augmentation techniques to the dataset

def augment_images(input_dir, output_dir, augmentations, num_augmented=3):

if not os.path.exists(output_dir):

os.makedirs(output_dir)

for filename in os.listdir(input_dir):

if filename.lower().endswith(('.png', '.jpg', '.jpeg')):

img_path = os.path.join(input_dir, filename)

image = Image.open(img_path).convert("L")

for i in range(num_augmented):

aug_img = image.copy()

# Random rotation

if random.random() < augmentations.get("rotation_probability", 0.5):

angle = random.randint(

-augmentations.get("rotation_angle", 30),

augmentations.get("rotation_angle", 30)
)

aug_img = aug_img.rotate(angle)

# Random horizontal flip

if random.random() < augmentations.get("flip_probability", 0.5):

aug_img = aug_img.transpose(Image.FLIP_LEFT_RIGHT)

# Random vertical flip

if random.random() < augmentations.get("flip_probability", 0.5):

aug_img = aug_img.transpose(Image.FLIP_TOP_BOTTOM)

# Optional: Zoom augmentation

if random.random() < augmentations.get("zoom_probability", 0.3):

zoom_factor = augmentations.get("zoom_factor", 1.2)

w, h = aug_img.size

zoom_w, zoom_h = int(w / zoom_factor), int(h / zoom_factor)

cropped = aug_img.crop(((w - zoom_w) // 2, (h - zoom_h) // 2, (w + zoom_w) // 2, (h +


zoom_h) // 2))

aug_img = cropped.resize((w, h), Image.LANCZOS)

# Save the augmented image

aug_img.save(os.path.join(output_dir, f"aug_{i}_{filename}"))

# Example usage:

# Step 1: Convert DICOM images to PNG format


# convert_dicom_to_png("DICOM_PD", "PNG_PD")

# Step 2: Normalize PNG images to range [0, 1]

# normalize_images("PNG_PD")

# Step 3: Resize images to 224x224 resolution

# resize_images("PNG_PD")

# Step 4: Augment the dataset

# augment_images(

# input_dir="PNG_PD",

# output_dir="Augmented_PD",

# augmentations={

# "rotation_probability": 0.7,

# "rotation_angle": 20,

# "flip_probability": 0.5,

# "zoom_probability": 0.3,

# "zoom_factor": 1.1

# },

# num_augmented=5

Output Images:

You might also like