0% found this document useful (0 votes)
30 views20 pages

Printout

Uploaded by

riyazmbu
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
30 views20 pages

Printout

Uploaded by

riyazmbu
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 20

Page No.

:
Date:
Exp. No: 06

COLOR IMAGE PROCESSING

AIM: Implement color image processing to differentiate objects in a digital image.

SOURCE CODE:

import cv2
import numpy as np
import matplotlib.pyplot as plt

# Load the image


image = cv2.imread('glassPic.jpeg')

# Convert the image to the HSV color space


hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

# Define the color ranges for segmentation

# Red color range (two ranges needed to cover the red hue wrap-around in HSV)
lower_red1 = np.array([0, 120, 70])
upper_red1 = np.array([10, 255, 255])
lower_red2 = np.array([170, 120, 70])
upper_red2 = np.array([180, 255, 255])

# Blue color range


lower_blue = np.array([100, 150, 70])
upper_blue = np.array([140, 255, 255])

# Green color range


lower_green = np.array([40, 70, 70])
upper_green = np.array([80, 255, 255])

# Create masks for the color ranges


mask_red1 = cv2.inRange(hsv_image, lower_red1, upper_red1)
mask_red2 = cv2.inRange(hsv_image, lower_red2, upper_red2)
mask_red = cv2.add(mask_red1, mask_red2) # Combine both masks for red

mask_blue = cv2.inRange(hsv_image, lower_blue, upper_blue)


mask_green = cv2.inRange(hsv_image, lower_green, upper_green)

# Extract the colored objects from the image


red_objects = cv2.bitwise_and(image, image, mask=mask_red)
blue_objects = cv2.bitwise_and(image, image, mask=mask_blue)
green_objects = cv2.bitwise_and(image, image, mask=mask_green)

# Convert BGR to RGB for displaying with matplotlib


image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
red_objects_rgb = cv2.cvtColor(red_objects, cv2.COLOR_BGR2RGB)
blue_objects_rgb = cv2.cvtColor(blue_objects, cv2.COLOR_BGR2RGB)
green_objects_rgb = cv2.cvtColor(green_objects, cv2.COLOR_BGR2RGB)

# Display the original and segmented images using matplotlib

Roll No.: 21121A3353


Page No.:
Date:
plt.figure(figsize=(10, 10))

plt.subplot(2, 2, 1)
plt.imshow(image_rgb)
plt.title('Original Image')
plt.axis('off')

plt.subplot(2, 2, 2)
plt.imshow(red_objects_rgb)
plt.title('Red Objects')
plt.axis('off')

plt.subplot(2, 2, 3)
plt.imshow(blue_objects_rgb)
plt.title('Blue Objects')
plt.axis('off')

plt.subplot(2, 2, 4)
plt.imshow(green_objects_rgb)
plt.title('Green Objects')
plt.axis('off')

plt.tight_layout()
plt.show()

OUTPUT:

RESULTS: The above program to implement color image processing to differentiate different objects has been
done successfully.

Roll No.: 21121A3353


Page No.:
Date:
Exp. No: 07

IMAGE SEGMENTATION USING K-MEANS CLUSTERING

AIM: Implement Image segmentation using the K-means clustering method in a Fruit basket application..

SOURCE CODE:

import cv2
import numpy as np
import matplotlib.pyplot as plt

def segment_image_kmeans(image_path, K):


# Read the image using OpenCV
image = cv2.imread(image_path)
# Convert the image from BGR (OpenCV default) to RGB
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Reshape the image into a 2D array of pixels


pixel_values = image_rgb.reshape((-1, 3))
# Convert to float type
pixel_values = np.float32(pixel_values)

# Define criteria for the K-means algorithm


# (type, max_iter, epsilon)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)

# Apply K-means clustering to segment the image


_, labels, centers = cv2.kmeans(pixel_values, K, None, criteria, 10,
cv2.KMEANS_RANDOM_CENTERS)

# Convert the centers to uint8 (as image pixels are integers)


centers = np.uint8(centers)

# Map the labels to the corresponding cluster centers


segmented_image = centers[labels.flatten()]

# Reshape the segmented image to the original image shape


segmented_image = segmented_image.reshape(image_rgb.shape)

# Display the original and segmented images


plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_rgb)
plt.title('Original Image')
plt.axis('off')

plt.subplot(1, 2, 2)
plt.imshow(segmented_image)
plt.title('Segmented Image with K = {}'.format(K))
plt.axis('off')

plt.show()

Roll No.: 21121A3353


Page No.:
Date:
# Path to the image of the fruit basket
image_path = '/content/fruit-basket2.jpg' # Replace with the path to your image
file
K = 10 # Number of clusters (e.g., 4 for different types of fruits)

# Call the function to perform segmentation


segment_image_kmeans(image_path, K)

OUTPUT:

RESULTS: The above program to implement image segmentation using the K-means clustering method in a
fruit basket application has been successful.

Roll No.: 21121A3353


Page No.:
Date:
Exp. No: 08

IMAGE SEGMENTATION USING WATERSHED TRANSFORM

AIM: Implement Image segmentation using Watershed transform in any Scenery photo.

SOURCE CODE:

import cv2
import numpy as np
from matplotlib import pyplot as plt

# Function to display image using matplotlib


def imshow(img, ax=None, title=None, cmap=None):
if ax is None:
plt.imshow(img, cmap=cmap)
plt.axis('off')
if title:
plt.title(title)
plt.show()
else:
ax.imshow(img, cmap=cmap)
ax.axis('off')
if title:
ax.set_title(title)

# Image loading
img = cv2.imread("/content/coins.png")
imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), title="Original Image")

# Image grayscale conversion


gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imshow(gray, cmap='gray', title="Grayscale Image")

# Threshold Processing
ret, bin_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV +
cv2.THRESH_OTSU)
imshow(bin_img, cmap='gray', title="Threshold Image")

# Noise removal
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
bin_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel, iterations=2)
imshow(bin_img, cmap='gray', title="Noise Removed")

# Create subplots with 2x2 layout


fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))

# Sure background area


sure_bg = cv2.dilate(bin_img, kernel, iterations=3)
imshow(sure_bg, ax=axes[0, 0], cmap='gray', title='Sure Background')

# Distance transform
dist = cv2.distanceTransform(bin_img, cv2.DIST_L2, 5)
imshow(dist, ax=axes[0, 1], cmap='gray', title='Distance Transform')

Roll No.: 21121A3353


Page No.:
Date:

# Foreground area
ret, sure_fg = cv2.threshold(dist, 0.5 * dist.max(), 255, cv2.THRESH_BINARY)
sure_fg = sure_fg.astype(np.uint8)
imshow(sure_fg, ax=axes[1, 0], cmap='gray', title='Sure Foreground')

# Unknown area
unknown = cv2.subtract(sure_bg, sure_fg)
imshow(unknown, ax=axes[1, 1], cmap='gray', title='Unknown')

# Foreground area
ret, sure_fg = cv2.threshold(dist, 0.5 * dist.max(), 255, cv2.THRESH_BINARY)
sure_fg = sure_fg.astype(np.uint8)
imshow(sure_fg, ax=axes[1, 0], cmap='gray', title='Sure Foreground')

# Unknown area
unknown = cv2.subtract(sure_bg, sure_fg)
imshow(unknown, ax=axes[1, 1], cmap='gray', title='Unknown')

plt.show()

# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)

# Add one to all labels so that background is not 0, but 1


markers += 1
# Mark the region of unknown with zero
markers[unknown == 255] = 0

fig, ax = plt.subplots(figsize=(6, 6))


ax.imshow(markers, cmap="tab20b")
ax.axis('off')
plt.title('Markers')
plt.show()

# Watershed Algorithm
markers = cv2.watershed(img, markers)

fig, ax = plt.subplots(figsize=(5, 5))


ax.imshow(markers, cmap="tab20b")
ax.axis('off')
plt.title('Watershed Markers')
plt.show()

labels = np.unique(markers)

coins = []
for label in labels[2:]:
# Create a binary image where only the area of the label is in the foreground
target = np.where(markers == label, 255, 0).astype(np.uint8)

# Perform contour extraction on the created binary image

Roll No.: 21121A3353


Page No.:
Date:
contours, _ = cv2.findContours(target, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
coins.append(contours[0])

# Draw the outline


img = cv2.drawContours(img, coins, -1, color=(0, 23, 223), thickness=2)
imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), title="Detected Coins")

OUTPUT:

Roll No.: 21121A3353


Page No.:
Date:

RESULTS: The above implementation of image segmentation using a watershed transform in any scenery
photo has been done successfully.

Roll No.: 21121A3353


Page No.:
Date:
Exp. No: 09

FEATURE IDENTIFICATION IN FINGERPRINT RECOGNITION SYSTEM

AIM: Implement a program for Features Identification in the Fingerprint Recognition System.

SOURCE CODE:

!pip install opencv-python


!pip install opencv-python-headless
!pip install opencv-contrib-python

from os import path


if not path.exists('utils.py'): # If running on colab: the first time download
and unzip additional files
!wget https://biolab.csr.unibo.it/samples/fr/files.zip
!unzip files.zip

import math
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from utils import *
from ipywidgets import interact

fingerprint = cv.imread('samples/sample_1_1.png', cv.IMREAD_GRAYSCALE)


show(fingerprint, f'Fingerprint with size (w,h): {fingerprint.shape[::-1]}')

# Calculate the local gradient (using Sobel filters)


gx, gy = cv.Sobel(fingerprint, cv.CV_32F, 1, 0), cv.Sobel(fingerprint, cv.CV_32F,
0, 1)
show((gx, 'Gx'), (gy, 'Gy'))

# Calculate the magnitude of the gradient for each pixel


gx2, gy2 = gx**2, gy**2
gm = np.sqrt(gx2 + gy2)
show((gx2, 'Gx**2'), (gy2, 'Gy**2'), (gm, 'Gradient magnitude'))

# Integral over a square window


sum_gm = cv.boxFilter(gm, -1, (25, 25), normalize = False)
show(sum_gm, 'Integral of the gradient magnitude')

# Use a simple threshold for segmenting the fingerprint pattern


thr = sum_gm.max() * 0.2
mask = cv.threshold(sum_gm, thr, 255, cv.THRESH_BINARY)[1].astype(np.uint8)
show(fingerprint, mask, cv.merge((mask, fingerprint, fingerprint)))

W = (23, 23)
gxx = cv.boxFilter(gx2, -1, W, normalize = False)
gyy = cv.boxFilter(gy2, -1, W, normalize = False)
gxy = cv.boxFilter(gx * gy, -1, W, normalize = False)
gxx_gyy = gxx - gyy
gxy2 = 2 * gxy

Roll No.: 21121A3353


Page No.:
Date:
orientations = (cv.phase(gxx_gyy, -gxy2) + np.pi) / 2 # '-' to adjust for y axis
direction
sum_gxx_gyy = gxx + gyy
strengths = np.divide(cv.sqrt((gxx_gyy**2 + gxy2**2)), sum_gxx_gyy,
out=np.zeros_like(gxx), where=sum_gxx_gyy!=0)
show(draw_orientations(fingerprint, orientations, strengths, mask, 1, 16),
'Orientation image')

OUTPUT:

Roll No.: 21121A3353


Page No.:
Date:

RESULTS: The above program to Implement a program for Features Identification in the Fingerprint
Recognition System has been done successfully.

Roll No.: 21121A3353


Page No.:
Date:
Exp. No: 10

TEXTURE-BASED IMAGE CLASSIFICATION USING RECEIVER OPERATING CURVE

AIM: Implement a program using Texture based Image Classification, find the accuracy level using Receiver
Operating curve.

SOURCE CODE:

!pip install opencv-python


!pip install opencv-python-headless
!pip install opencv-contrib-python

from os import path


if not path.exists('utils.py'): # If running on colab: the first time download
and unzip additional files
!wget https://biolab.csr.unibo.it/samples/fr/files.zip
!unzip files.zip

import math
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from utils import *
from ipywidgets import interact

fingerprint = cv.imread('samples/sample_1_1.png', cv.IMREAD_GRAYSCALE)


show(fingerprint, f'Fingerprint with size (w,h): {fingerprint.shape[::-1]}')

# Calculate the local gradient (using Sobel filters)


gx, gy = cv.Sobel(fingerprint, cv.CV_32F, 1, 0), cv.Sobel(fingerprint, cv.CV_32F,
0, 1)
show((gx, 'Gx'), (gy, 'Gy'))

# Calculate the magnitude of the gradient for each pixel


gx2, gy2 = gx**2, gy**2
gm = np.sqrt(gx2 + gy2)
show((gx2, 'Gx**2'), (gy2, 'Gy**2'), (gm, 'Gradient magnitude'))

# Integral over a square window


sum_gm = cv.boxFilter(gm, -1, (25, 25), normalize = False)
show(sum_gm, 'Integral of the gradient magnitude')

# Use a simple threshold for segmenting the fingerprint pattern


thr = sum_gm.max() * 0.2
mask = cv.threshold(sum_gm, thr, 255, cv.THRESH_BINARY)[1].astype(np.uint8)
show(fingerprint, mask, cv.merge((mask, fingerprint, fingerprint)))

W = (23, 23)
gxx = cv.boxFilter(gx2, -1, W, normalize = False)
gyy = cv.boxFilter(gy2, -1, W, normalize = False)
gxy = cv.boxFilter(gx * gy, -1, W, normalize = False)
gxx_gyy = gxx - gyy
gxy2 = 2 * gxy

Roll No.: 21121A3353


Page No.:
Date:

orientations = (cv.phase(gxx_gyy, -gxy2) + np.pi) / 2 # '-' to adjust for y axis


direction
sum_gxx_gyy = gxx + gyy
strengths = np.divide(cv.sqrt((gxx_gyy**2 + gxy2**2)), sum_gxx_gyy,
out=np.zeros_like(gxx), where=sum_gxx_gyy!=0)
show(draw_orientations(fingerprint, orientations, strengths, mask, 1, 16),
'Orientation image')

region = fingerprint[10:90,80:130]
show(region)

# before computing the x-signature, the region is smoothed to reduce noise


smoothed = cv.blur(region, (5,5), -1)
xs = np.sum(smoothed, 1) # the x-signature of the region
print(xs)

x = np.arange(region.shape[0])
f, axarr = plt.subplots(1,2, sharey = True)
axarr[0].imshow(region,cmap='gray')
axarr[1].plot(xs, x)
axarr[1].set_ylim(region.shape[0]-1,0)
plt.show()

# Find the indices of the x-signature local maxima


local_maxima = np.nonzero(np.r_[False, xs[1:] > xs[:-1]] & np.r_[xs[:-1] >=
xs[1:], False])[0]

x = np.arange(region.shape[0])
plt.plot(x, xs)
plt.xticks(local_maxima)
plt.grid(True, axis='x')
plt.show()

# Calculate all the distances between consecutive peaks


distances = local_maxima[1:] - local_maxima[:-1]
print(distances)

# Estimate the ridge line period as the average of the above distances
ridge_period = np.average(distances)
print(ridge_period)

# Create the filter bank


or_count = 8
gabor_bank = [gabor_kernel(ridge_period, o) for o in np.arange(0, np.pi,
np.pi/or_count)]

# Filter the whole image with each filter


# Note that the negative image is actually used, to have white ridges on a black
background as a result
nf = 255-fingerprint
all_filtered = np.array([cv.filter2D(nf, cv.CV_32F, f) for f in gabor_bank])
show(nf, *all_filtered)
Roll No.: 21121A3353
Page No.:
Date:

y_coords, x_coords = np.indices(fingerprint.shape)


# For each pixel, find the index of the closest orientation in the gabor bank
orientation_idx = np.round(((orientations % np.pi) / np.pi) *
or_count).astype(np.int32) % or_count
# Take the corresponding convolution result for each pixel, to assemble the final
result
filtered = all_filtered[orientation_idx, y_coords, x_coords]
# Convert to gray scale and apply the mask
enhanced = mask & np.clip(filtered, 0, 255).astype(np.uint8)
show(fingerprint, enhanced)

# Binarization
_, ridge_lines = cv.threshold(enhanced, 32, 255, cv.THRESH_BINARY)
show(fingerprint, ridge_lines, cv.merge((ridge_lines, fingerprint, fingerprint)))

# Thinning
skeleton = cv.ximgproc.thinning(ridge_lines, thinningType =
cv.ximgproc.THINNING_GUOHALL)
show(skeleton, cv.merge((fingerprint, fingerprint, skeleton)))

def compute_crossing_number(values):
return np.count_nonzero(values < np.roll(values, -1))

# Create a filter that converts any 8-neighborhood into the corresponding byte
value [0,255]
cn_filter = np.array([[ 1, 2, 4],
[128, 0, 8],
[ 64, 32, 16]
])

# Create a lookup table that maps each byte value to the corresponding crossing
number
all_8_neighborhoods = [np.array([int(d) for d in f'{x:08b}'])[::-1] for x in
range(256)]
cn_lut = np.array([compute_crossing_number(x) for x in
all_8_neighborhoods]).astype(np.uint8)

# Skeleton: from 0/255 to 0/1 values


skeleton01 = np.where(skeleton!=0, 1, 0).astype(np.uint8)
# Apply the filter to encode the 8-neighborhood of each pixel into a byte [0,255]
neighborhood_values = cv.filter2D(skeleton01, -1, cn_filter, borderType =
cv.BORDER_CONSTANT)
# Apply the lookup table to obtain the crossing number of each pixel from the
byte value of its neighborhood
cn = cv.LUT(neighborhood_values, cn_lut)
# Keep only crossing numbers on the skeleton
cn[skeleton==0] = 0

# crossing number == 1 --> Termination, crossing number == 3 --> Bifurcation


minutiae = [(x,y,cn[y,x]==1) for y, x in zip(*np.where(np.isin(cn, [1,3])))]

Roll No.: 21121A3353


Page No.:
Date:
show(draw_minutiae(fingerprint, minutiae), skeleton, draw_minutiae(skeleton,
minutiae))

OUTPUT:

Roll No.: 21121A3353


Page No.:
Date:

Roll No.: 21121A3353


Page No.:
Date:

Roll No.: 21121A3353


Page No.:
Date:

RESULTS: The above program implements a program using Texture-based Image Classification, to find the
accuracy level using the Receiver Operating curve has been done successfully.

Roll No.: 21121A3353


Page No.:
Date:
Exp. No: 11

SLIDING WINDOW METHOD TO DETECT OBJECTS IN VIDEO

AIM: Implement a Sliding window method and detect the objects present in the Video Surveillance system.

SOURCE CODE:

import datetime
import tkinter as tk
from tkinter import filedialog
import cv2
import os

debug = True

# Load the pre-trained Haar Cascade classifier for face detection


face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

def detect_face_from_video():
file_path = filedialog.askopenfilename()

if not file_path:
return

cap = cv2.VideoCapture(file_path)

# define the codec and create a VideoWriter object to save the output video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_file = os.path.join(
"output/video",
f"output_video_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.avi")
out = cv2.VideoWriter(output_file, fourcc, 20.0, (640, 480))

if debug:
print("Video capture started")

while True:
ret, frame = cap.read()
if not ret:
break

gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)


faces = face_cascade.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

for (x, y, w, h) in faces:


cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

out.write(frame)

cv2.imshow('Face Detection from Video', frame)


if cv2.waitKey(1) & 0xFF == ord('q'):

Roll No.: 21121A3353


Page No.:
Date:
break

cap.release()
out.release()
cv2.destroyAllWindows()

if debug:
print("Video capture completed")

detect_face_from_video()

OUTPUT:

RESULTS: The above program to implement a Sliding window method and detect the objects present in the
Video Surveillance system has been done successfully.

Roll No.: 21121A3353

You might also like