Printout
Printout
:
Date:
Exp. No: 06
SOURCE CODE:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Red color range (two ranges needed to cover the red hue wrap-around in HSV)
lower_red1 = np.array([0, 120, 70])
upper_red1 = np.array([10, 255, 255])
lower_red2 = np.array([170, 120, 70])
upper_red2 = np.array([180, 255, 255])
plt.subplot(2, 2, 1)
plt.imshow(image_rgb)
plt.title('Original Image')
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(red_objects_rgb)
plt.title('Red Objects')
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(blue_objects_rgb)
plt.title('Blue Objects')
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(green_objects_rgb)
plt.title('Green Objects')
plt.axis('off')
plt.tight_layout()
plt.show()
OUTPUT:
RESULTS: The above program to implement color image processing to differentiate different objects has been
done successfully.
AIM: Implement Image segmentation using the K-means clustering method in a Fruit basket application..
SOURCE CODE:
import cv2
import numpy as np
import matplotlib.pyplot as plt
plt.subplot(1, 2, 2)
plt.imshow(segmented_image)
plt.title('Segmented Image with K = {}'.format(K))
plt.axis('off')
plt.show()
OUTPUT:
RESULTS: The above program to implement image segmentation using the K-means clustering method in a
fruit basket application has been successful.
AIM: Implement Image segmentation using Watershed transform in any Scenery photo.
SOURCE CODE:
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Image loading
img = cv2.imread("/content/coins.png")
imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), title="Original Image")
# Threshold Processing
ret, bin_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV +
cv2.THRESH_OTSU)
imshow(bin_img, cmap='gray', title="Threshold Image")
# Noise removal
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
bin_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel, iterations=2)
imshow(bin_img, cmap='gray', title="Noise Removed")
# Distance transform
dist = cv2.distanceTransform(bin_img, cv2.DIST_L2, 5)
imshow(dist, ax=axes[0, 1], cmap='gray', title='Distance Transform')
# Foreground area
ret, sure_fg = cv2.threshold(dist, 0.5 * dist.max(), 255, cv2.THRESH_BINARY)
sure_fg = sure_fg.astype(np.uint8)
imshow(sure_fg, ax=axes[1, 0], cmap='gray', title='Sure Foreground')
# Unknown area
unknown = cv2.subtract(sure_bg, sure_fg)
imshow(unknown, ax=axes[1, 1], cmap='gray', title='Unknown')
# Foreground area
ret, sure_fg = cv2.threshold(dist, 0.5 * dist.max(), 255, cv2.THRESH_BINARY)
sure_fg = sure_fg.astype(np.uint8)
imshow(sure_fg, ax=axes[1, 0], cmap='gray', title='Sure Foreground')
# Unknown area
unknown = cv2.subtract(sure_bg, sure_fg)
imshow(unknown, ax=axes[1, 1], cmap='gray', title='Unknown')
plt.show()
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Watershed Algorithm
markers = cv2.watershed(img, markers)
labels = np.unique(markers)
coins = []
for label in labels[2:]:
# Create a binary image where only the area of the label is in the foreground
target = np.where(markers == label, 255, 0).astype(np.uint8)
OUTPUT:
RESULTS: The above implementation of image segmentation using a watershed transform in any scenery
photo has been done successfully.
AIM: Implement a program for Features Identification in the Fingerprint Recognition System.
SOURCE CODE:
import math
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from utils import *
from ipywidgets import interact
W = (23, 23)
gxx = cv.boxFilter(gx2, -1, W, normalize = False)
gyy = cv.boxFilter(gy2, -1, W, normalize = False)
gxy = cv.boxFilter(gx * gy, -1, W, normalize = False)
gxx_gyy = gxx - gyy
gxy2 = 2 * gxy
OUTPUT:
RESULTS: The above program to Implement a program for Features Identification in the Fingerprint
Recognition System has been done successfully.
AIM: Implement a program using Texture based Image Classification, find the accuracy level using Receiver
Operating curve.
SOURCE CODE:
import math
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from utils import *
from ipywidgets import interact
W = (23, 23)
gxx = cv.boxFilter(gx2, -1, W, normalize = False)
gyy = cv.boxFilter(gy2, -1, W, normalize = False)
gxy = cv.boxFilter(gx * gy, -1, W, normalize = False)
gxx_gyy = gxx - gyy
gxy2 = 2 * gxy
region = fingerprint[10:90,80:130]
show(region)
x = np.arange(region.shape[0])
f, axarr = plt.subplots(1,2, sharey = True)
axarr[0].imshow(region,cmap='gray')
axarr[1].plot(xs, x)
axarr[1].set_ylim(region.shape[0]-1,0)
plt.show()
x = np.arange(region.shape[0])
plt.plot(x, xs)
plt.xticks(local_maxima)
plt.grid(True, axis='x')
plt.show()
# Estimate the ridge line period as the average of the above distances
ridge_period = np.average(distances)
print(ridge_period)
# Binarization
_, ridge_lines = cv.threshold(enhanced, 32, 255, cv.THRESH_BINARY)
show(fingerprint, ridge_lines, cv.merge((ridge_lines, fingerprint, fingerprint)))
# Thinning
skeleton = cv.ximgproc.thinning(ridge_lines, thinningType =
cv.ximgproc.THINNING_GUOHALL)
show(skeleton, cv.merge((fingerprint, fingerprint, skeleton)))
def compute_crossing_number(values):
return np.count_nonzero(values < np.roll(values, -1))
# Create a filter that converts any 8-neighborhood into the corresponding byte
value [0,255]
cn_filter = np.array([[ 1, 2, 4],
[128, 0, 8],
[ 64, 32, 16]
])
# Create a lookup table that maps each byte value to the corresponding crossing
number
all_8_neighborhoods = [np.array([int(d) for d in f'{x:08b}'])[::-1] for x in
range(256)]
cn_lut = np.array([compute_crossing_number(x) for x in
all_8_neighborhoods]).astype(np.uint8)
OUTPUT:
RESULTS: The above program implements a program using Texture-based Image Classification, to find the
accuracy level using the Receiver Operating curve has been done successfully.
AIM: Implement a Sliding window method and detect the objects present in the Video Surveillance system.
SOURCE CODE:
import datetime
import tkinter as tk
from tkinter import filedialog
import cv2
import os
debug = True
def detect_face_from_video():
file_path = filedialog.askopenfilename()
if not file_path:
return
cap = cv2.VideoCapture(file_path)
# define the codec and create a VideoWriter object to save the output video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_file = os.path.join(
"output/video",
f"output_video_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.avi")
out = cv2.VideoWriter(output_file, fourcc, 20.0, (640, 480))
if debug:
print("Video capture started")
while True:
ret, frame = cap.read()
if not ret:
break
out.write(frame)
cap.release()
out.release()
cv2.destroyAllWindows()
if debug:
print("Video capture completed")
detect_face_from_video()
OUTPUT:
RESULTS: The above program to implement a Sliding window method and detect the objects present in the
Video Surveillance system has been done successfully.