22am602 Lab Manual
22am602 Lab Manual
LEARNING
22AM602
S6 (AIML)
22AM602
S6 (AIML)
Prepared by Approved by
Dr.Rajasekar S S, AP/AIML Dr.A.Kodieswari, Head/AIML
Ms.Karthika S, AP/AIML
Exp. No. : 1
AIM/OBJECTIVE:
PROCEDURE:
PROGRAM:
import cv2
import numpy as np
def detect_shapes(image_path):
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blurred, 50, 150)
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
epsilon = 0.04 * cv2.arcLength(contour, True)
3
22AM602 – Computer Vision and Digital Imaging Laboratory
approx = cv2.approxPolyDP(contour, epsilon, True)
x, y, w, h = cv2.boundingRect(approx)
shape_name = "Unknown"
if len(approx) == 3:
shape_name = "Triangle"
elif len(approx) == 4:
aspect_ratio = float(w) / h
shape_name = "Square" if 0.9 <= aspect_ratio <= 1.1 else "Rectangle"
elif len(approx) > 4:
shape_name = "Circle"
cv2.drawContours(image, [approx], -1, (0, 255, 0), 2)
cv2.putText(image, shape_name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,
255), 2)
cv2.imshow('Detected Shapes', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output_detected_shapes.jpg', image)
detect_shapes('surveillance_image.jpg')
OUTPUT:
RESULT:
Exp. No. : 2
AIM/ OBJECTIVE
To design and implement Automated Image Noise Removal for Medical Imaging using
Python program.
Python
Online compiler
PROCEDURE:
PROGRAM:
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('bear.png')
dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 15)
plt.subplot(121), plt.imshow(img)
plt.subplot(122), plt.imshow(dst)
plt.show()
7
OUTPUT:
RESULT:
Thus designing and implementing Automated Image Noise Removal for Medical Imaging
using Python program was executed and output was verified successfully.
Exp. No. : 3
AIM/OBJECTIVE:
To design an app for Automated Shape Validation through Edge and Corner Detection using
Python program
Python
Online compiler
PROCEDURE:
OUTPUT:
13
RESULT:
Thus designing an app for Automated Shape Validation through Edge and Corner Detection using
Python program using python program was executed and output was verified successfully.
AIM/OBJECTIVE:
To design a lane detection system using perspective projection to improve road navigation
using python program.
MATERIALS REQUIRED/SOFTWARE REQUIRED:
Python
Online compiler
PROCEDURE:
PROGRAM:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def perspective_transform(image):
"""
Apply a perspective transformation to get a bird's-eye view of the road.
"""
height, width = image.shape[:2]
src_points = np.float32([
[width * 0.45, height * 0.65], # Bottom-left
[width * 0.55, height * 0.65], # Bottom-right
[width * 0.85, height * 0.90], # Top-right
[width * 0.15, height * 0.90] # Top-left
])
17
dst_points = np.float32([
[width * 0.2, 0], # New Bottom-left
[width * 0.8, 0], # New Bottom-right
[width * 0.8, height], # New Top-right
[width * 0.2, height] # New Top-left
])
matrix = cv2.getPerspectiveTransform(src_points, dst_points)
transformed = cv2.warpPerspective(image, matrix, (width, height))
return transformed
def detect_edges(image):
"""
Apply Canny edge detection to extract lane edges.
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blurred, 50, 150)
return edges
def detect_lanes(image):
"""
Detect lane lines using Hough Transform.
"""
edges = detect_edges(image)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, minLineLength=50, maxLineGap=150)
lane_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(lane_image, (x1, y1), (x2, y2), (0, 255, 0), 5)
lane_overlay = cv2.addWeighted(image, 0.8, lane_image, 1, 1)
return lane_overlay
image = cv2.imread('road_image.jpg')
if image is None:
print("Error: Image not found!")
exit()
19
bird_eye_view = perspective_transform(image)
lane_detected = detect_lanes(bird_eye_view)
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
titles = ['Original Image', 'Bird-Eye View (Transformed)', 'Lane Detection']
images = [image, bird_eye_view, lane_detected]
for ax, img, title in zip(axes, images, titles):
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.set_title(title)
ax.axis('off')
plt.show()
cv2.imwrite('bird_eye_view.jpg', bird_eye_view)
cv2.imwrite('lane_detected.jpg', lane_detected)
print("Lane detection completed! Images saved as 'bird_eye_view.jpg' and 'lane_detected.jpg'")
OUTPUT:
RESULT:
Thus design a lane detection system using perspective projection to improve road navigation
using python program was executed and output was verified successfully.
Exp. No.: 5
AIM/OBJECTIVE:
To detect the corners of an object using corner detection algorithm. Design a home automation
system where users can control smart home devices like lights, fans, or appliances using hand
gestures using python program.
Python
Online compiler
PROCEDURE:
PROCEDURE:
import cv2
import numpy as np
import mediapipe as mp
mp_hands = mp.solutions.hands
mp_draw = mp.solutions.drawing_utils
hands = mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
cap = cv2.VideoCapture(0)
def detect_corners(image):
gray = np.float32(gray)
else:
return "No Action"
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = cv2.flip(frame, 1)
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(rgb_frame)
if results.multi_hand_landmarks:
gesture = recognize_gesture(hand_landmarks.landmark)
cv2.putText(frame, f"Action: {gesture}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), 2)
if gesture == "Lights ON":
print("Turning Lights ON")
break
cap.release()
cv2.destroyAllWindows()
OUTPUT:
RESULT:
Thus detecting the corners of an object using corner detection algorithm. Design a home
automation system where users can control smart home devices like lights, fans, or appliances using
hand gestures using python program was executed and output was verified successfully.
27
DESIGN THE SYSTEM THAT CLASSIFY THE VEHICLES ON THE ROAD AND
COUNT THE NUMBER OF VEHICLES THAT TRAVEL THROUGH A ROAD USING
THE FASTER R-CNN ALGORITHM.
Exp. No. : 6
AIM/OBJECTIVE:
To design the system that Classify the vehicles on the road and count the number of vehicles
that travel through a road using the Faster R-CNN algorithm in python program.
Python
Online compiler
PROCEDURE:
PROGRAM:
import cv2
import torch
import torchvision
import numpy as np
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
# Define COCO dataset class labels (Faster R-CNN is trained on COCO dataset)
COCO_LABELS = [
def detect_vehicles(image):
transform = transforms.Compose([transforms.ToTensor()])
image_tensor = transform(image).unsqueeze(0)
with torch.no_grad():
predictions = model(image_tensor)
33
# Extract results
boxes = predictions[0]['boxes'].cpu().numpy()
labels = predictions[0]['labels'].cpu().numpy()
scores = predictions[0]['scores'].cpu().numpy()
confidence_threshold = 0.6
filtered_boxes = []
vehicle_count = 0
detected_vehicles = []
for i in range(len(scores)):
vehicle_count += 1
filtered_boxes.append(boxes[i])
detected_vehicles.append(COCO_LABELS[labels[i]])
image = cv2.imread(image_path)
if image is None:
exit()
label = vehicles[i]
# Display results
plt.figure(figsize=(10, 5))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
cv2.imwrite("detected_vehicles.jpg", image)
OUTPUT:
37
RESULT:
Thus develop a program for predicting Student Course Success Based on Attendance and
Participation using Bayesian Belief Network in python program was executed and output was verified
successfully.