0% found this document useful (0 votes)
30 views86 pages

Merged Notebooks

The document details a series of image processing operations using Python libraries such as NumPy, Matplotlib, and PIL. It covers reading, displaying, saving images in various formats, manipulating color planes, converting images to grayscale and binary, resizing and rotating images, and performing arithmetic operations on images. Additionally, it includes intensity transformations like log transformation, power law transformation, contrast stretching, and gray level slicing, with visual outputs for each operation.

Uploaded by

Akshat gupta
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
30 views86 pages

Merged Notebooks

The document details a series of image processing operations using Python libraries such as NumPy, Matplotlib, and PIL. It covers reading, displaying, saving images in various formats, manipulating color planes, converting images to grayscale and binary, resizing and rotating images, and performing arithmetic operations on images. Additionally, it includes intensity transformations like log transformation, power law transformation, contrast stretching, and gray level slicing, with visual outputs for each operation.

Uploaded by

Akshat gupta
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 86

merged-notebooks

April 27, 2025

[2]: # Import necessary libraries


import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import io

# a. Read, Display and write color image in other formats


# Replace 'your_image.jpg' with your actual image filename
image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\AKSHAT PHOTO.jpg'
img = Image.open(image_path)

# Display the original image


plt.figure(figsize=(10, 10))
plt.subplot(2, 3, 1)
plt.imshow(img)
plt.title('Original Image')
plt.axis('off')

# Save image in different formats


img.save('image_png.png')
img.save('image_bmp.bmp')
img.save('image_tiff.tiff')
print("Image saved in PNG, BMP, and TIFF formats.")

# b. Find RED, GREEN and BLUE planes of the color image


# Convert image to numpy array
img_array = np.array(img)

# Create RED plane (keep red, set others to 0)


red_plane = img_array.copy()
red_plane[:, :, 1] = 0 # Set green channel to 0
red_plane[:, :, 2] = 0 # Set blue channel to 0

# Create GREEN plane (keep green, set others to 0)


green_plane = img_array.copy()
green_plane[:, :, 0] = 0 # Set red channel to 0
green_plane[:, :, 2] = 0 # Set blue channel to 0

1
# Create BLUE plane (keep blue, set others to 0)
blue_plane = img_array.copy()
blue_plane[:, :, 0] = 0 # Set red channel to 0
blue_plane[:, :, 1] = 0 # Set green channel to 0

# Display the RGB planes


plt.subplot(2, 3, 2)
plt.imshow(red_plane)
plt.title('Red Plane')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(green_plane)
plt.title('Green Plane')
plt.axis('off')

plt.subplot(2, 3, 4)
plt.imshow(blue_plane)
plt.title('Blue Plane')
plt.axis('off')

# c. Convert color image to grayscale and binary image


# Convert to grayscale
img_gray = img.convert('L')
img_gray_array = np.array(img_gray)

# Convert to binary (threshold at 128)


threshold = 128
img_binary = img_gray_array > threshold
img_binary = img_binary.astype(np.uint8) * 255

# Display grayscale and binary images


plt.subplot(2, 3, 5)
plt.imshow(img_gray, cmap='gray')
plt.title('Grayscale Image')
plt.axis('off')

plt.subplot(2, 3, 6)
plt.imshow(img_binary, cmap='binary')
plt.title('Binary Image')
plt.axis('off')

plt.tight_layout()
plt.show()

# Save grayscale and binary images

2
img_gray.save('image_gray.png')
Image.fromarray(img_binary).save('image_binary.png')

# d. Resize and rotate the image


# Resize to half size
half_size = (img.width // 2, img.height // 2)
img_half = img.resize(half_size)

# Resize to quarter size


quarter_size = (img.width // 4, img.height // 4)
img_quarter = img.resize(quarter_size)

# Rotate the image


img_45 = img.rotate(45)
img_90 = img.rotate(90)
img_180 = img.rotate(180)

# Display resized and rotated images


plt.figure(figsize=(15, 10))

plt.subplot(2, 3, 1)
plt.imshow(img_half)
plt.title('Half Size')
plt.axis('off')

plt.subplot(2, 3, 2)
plt.imshow(img_quarter)
plt.title('Quarter Size')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(img_45)
plt.title('Rotated 45°')
plt.axis('off')

plt.subplot(2, 3, 4)
plt.imshow(img_90)
plt.title('Rotated 90°')
plt.axis('off')

plt.subplot(2, 3, 5)
plt.imshow(img_180)
plt.title('Rotated 180°')
plt.axis('off')

plt.tight_layout()
plt.show()

3
print("All operations completed successfully!")

Image saved in PNG, BMP, and TIFF formats.

4
All operations completed successfully!

[ ]:

[1]: import numpy as np


import matplotlib.pyplot as plt
from PIL import Image, ImageDraw

# Create image A with horizontal stripes


def create_horizontal_stripes(width, height, stripe_width):
img_array = np.zeros((height, width), dtype=np.uint8)
for i in range(0, height, stripe_width * 2):
if i + stripe_width <= height:
img_array[i:i+stripe_width, :] = 255
return Image.fromarray(img_array)

# Create image B with vertical stripes


def create_vertical_stripes(width, height, stripe_width):
img_array = np.zeros((height, width), dtype=np.uint8)
for i in range(0, width, stripe_width * 2):
if i + stripe_width <= width:

5
img_array[:, i:i+stripe_width] = 255
return Image.fromarray(img_array)

# Create the images


A = create_horizontal_stripes(1024, 1024, 128)
B = create_vertical_stripes(1024, 1024, 128)

# Display images A and B


plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.imshow(A, cmap='gray')
plt.title('Image A - Horizontal Stripes')
plt.axis('off')

plt.subplot(1, 2, 2)
plt.imshow(B, cmap='gray')
plt.title('Image B - Vertical Stripes')
plt.axis('off')

plt.tight_layout()
plt.show()

# a. Image addition of A and B


A_array = np.array(A)
B_array = np.array(B)

# Addition (with clipping to 255)


addition = np.clip(A_array + B_array, 0, 255).astype(np.uint8)
addition_img = Image.fromarray(addition)

# b. Subtraction of A and B
subtraction = np.clip(A_array - B_array, 0, 255).astype(np.uint8)
subtraction_img = Image.fromarray(subtraction)

# c. Multiplying Images of A and B


# Normalize to [0,1] for multiplication
A_norm = A_array / 255.0
B_norm = B_array / 255.0
multiplication = (A_norm * B_norm * 255).astype(np.uint8)
multiplication_img = Image.fromarray(multiplication)

# Display the results of operations


plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
plt.imshow(addition_img, cmap='gray')
plt.title('A + B')
plt.axis('off')

6
plt.subplot(1, 3, 2)
plt.imshow(subtraction_img, cmap='gray')
plt.title('A - B')
plt.axis('off')

plt.subplot(1, 3, 3)
plt.imshow(multiplication_img, cmap='gray')
plt.title('A * B')
plt.axis('off')

plt.tight_layout()
plt.show()

# d. Create grayscale image with sinusoidal intensity variation


def create_sinusoidal_image(width, height):
x = np.linspace(0, 2*np.pi, width) # One complete cycle across width
y = np.linspace(0, 8*np.pi, height) # Multiple cycles across height

# Create a meshgrid for 2D coordinates


X, Y = np.meshgrid(x, y)

# Create sinusoidal pattern


Z = (np.sin(X) + 1) * 127.5 # Scale to [0, 255]
return Image.fromarray(Z.astype(np.uint8))

# Create sinusoidal image


sinusoidal_img = create_sinusoidal_image(1024, 256)

plt.figure(figsize=(12, 4))
plt.imshow(sinusoidal_img, cmap='gray')
plt.title('Sinusoidal Intensity Image')
plt.axis('off')
plt.show()

# e. Create white image with black box at center


def create_image_with_box(width, height, box_size):
# Create white image
img = Image.new('L', (width, height), color=255)
draw = ImageDraw.Draw(img)

# Calculate box position to center it


x1 = (width - box_size) // 2
y1 = (height - box_size) // 2
x2 = x1 + box_size
y2 = y1 + box_size

7
# Draw black box
draw.rectangle([x1, y1, x2, y2], fill=0)

return img

# Create image with black box


box_img = create_image_with_box(256, 256, 58)

plt.figure(figsize=(6, 6))
plt.imshow(box_img, cmap='gray')
plt.title('White Image with Black Box (58x58) at Center')
plt.axis('off')
plt.show()

print("All operations completed successfully!")

8
All operations completed successfully!

[ ]:

9
[1]: import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

# Load grayscale image


image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png' #␣
↪Replace with your actual image path

img = Image.open(image_path).convert('L') # Ensure grayscale


img_array = np.array(img)

# Display original image


plt.figure(figsize=(5, 5))
plt.imshow(img_array, cmap='gray')
plt.title('Original Grayscale Image')
plt.axis('off')
plt.show()

# a. Image negative
def image_negative(img_array):
# Calculate negative: s = L-1-r where L is max intensity (255 for 8-bit)
return 255 - img_array

negative = image_negative(img_array)
plt.figure(figsize=(5, 5))
plt.imshow(negative, cmap='gray')
plt.title('Image Negative')
plt.axis('off')
plt.show()

# b. Log transformation with different c values


def log_transform(img_array, c):
# Add small value to avoid log(0)
return c * np.log1p(img_array.astype(np.float64))

def inverse_log_transform(img_array, c):


# Inverse log: r = e^(s/c) - 1
return np.expm1(img_array.astype(np.float64) / c)

# Experiment with different c values


c_values = [1, 10, 20, 40]
plt.figure(figsize=(15, 10))

for i, c in enumerate(c_values):
# Apply log transform
log_img = log_transform(img_array, c)

# Normalize to [0, 255] for display

10
log_img = 255 * (log_img - np.min(log_img)) / (np.max(log_img) - np.
↪min(log_img))
log_img = log_img.astype(np.uint8)

plt.subplot(2, len(c_values), i+1)


plt.imshow(log_img, cmap='gray')
plt.title(f'Log Transform (c={c})')
plt.axis('off')

# Apply inverse log transform to the transformed image


inverse_log_img = inverse_log_transform(log_img, c)

# Normalize to [0, 255]


inverse_log_img = 255 * (inverse_log_img - np.min(inverse_log_img)) / (np.
↪max(inverse_log_img) - np.min(inverse_log_img))

inverse_log_img = inverse_log_img.astype(np.uint8)

plt.subplot(2, len(c_values), i+1+len(c_values))


plt.imshow(inverse_log_img, cmap='gray')
plt.title(f'Inverse Log Transform (c={c})')
plt.axis('off')

plt.tight_layout()
plt.show()

# c. Power law transformation (Gamma correction)


def power_law_transform(img_array, gamma):
# s = c * r^gamma, with c=1 for simplicity
# Normalize input to [0, 1] for power operation
normalized = img_array / 255.0
return np.power(normalized, gamma) * 255

# Experiment with different gamma values


gamma_values = [0.1, 0.5, 1.0, 2.0, 5.0]
plt.figure(figsize=(15, 5))

for i, gamma in enumerate(gamma_values):


power_img = power_law_transform(img_array, gamma)
power_img = power_img.astype(np.uint8)

plt.subplot(1, len(gamma_values), i+1)


plt.imshow(power_img, cmap='gray')
plt.title(f'Gamma = {gamma}')
plt.axis('off')

plt.tight_layout()
plt.show()

11
# d. Contrast stretching
def contrast_stretching(img_array, r1, s1, r2, s2):
# Create an empty array with the same shape as input
result = np.zeros_like(img_array, dtype=np.float64)

# Apply the piecewise linear transformation


# For pixels with values � r1
mask1 = img_array <= r1
result[mask1] = (s1 / r1) * img_array[mask1]

# For pixels with values between r1 and r2


mask2 = (img_array > r1) & (img_array < r2)
result[mask2] = ((s2 - s1) / (r2 - r1)) * (img_array[mask2] - r1) + s1

# For pixels with values � r2


mask3 = img_array >= r2
result[mask3] = ((255 - s2) / (255 - r2)) * (img_array[mask3] - r2) + s2

return np.clip(result, 0, 255).astype(np.uint8)

# Calculate parameters for contrast stretching


min_val = np.min(img_array)
max_val = np.max(img_array)
mean_val = np.mean(img_array)

# Different parameter sets for contrast stretching


param_sets = [
# Format: (r1, s1, r2, s2)
(min_val, 0, max_val, 255), # Full stretch
(min_val, 50, max_val, 200), # Reduced contrast
(mean_val-50, 0, mean_val+50, 255) # Enhance middle range
]

plt.figure(figsize=(15, 5))

for i, params in enumerate(param_sets):


r1, s1, r2, s2 = params
stretched_img = contrast_stretching(img_array, r1, s1, r2, s2)

plt.subplot(1, len(param_sets), i+1)


plt.imshow(stretched_img, cmap='gray')
plt.title(f'Contrast Stretch\nr1={r1:.1f}, s1={s1}\nr2={r2:.1f}, s2={s2}')
plt.axis('off')

plt.tight_layout()
plt.show()

12
# e. Gray level slicing
def gray_level_slicing(img_array, lower, upper, background=False):
result = np.copy(img_array).astype(np.float64)

# Create mask for pixels in the specified range


mask = (img_array >= lower) & (img_array <= upper)

if background:
# Highlight range, preserve the rest
result[mask] = 255
else:
# Highlight range, set the rest to 0
result[~mask] = 0
result[mask] = 255

return result.astype(np.uint8)

# Different slicing ranges


slicing_ranges = [
# Format: (lower, upper, preserve_background)
(100, 150, False), # Highlight narrow range, set rest to 0
(100, 150, True), # Highlight narrow range, preserve rest
(50, 200, False) # Highlight wide range, set rest to 0
]

plt.figure(figsize=(15, 5))

for i, params in enumerate(slicing_ranges):


lower, upper, preserve = params
sliced_img = gray_level_slicing(img_array, lower, upper, preserve)

plt.subplot(1, len(slicing_ranges), i+1)


plt.imshow(sliced_img, cmap='gray')
plt.title(f'Gray Level Slicing\n{lower}-{upper}, Preserve={preserve}')
plt.axis('off')

plt.tight_layout()
plt.show()

print("All intensity transformations completed!")

13
14
15
All intensity transformations completed!

[ ]:

16
[ ]: import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

# Load grayscale image


image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png' #␣
↪Replace with your actual image path

img = Image.open(image_path).convert('L') # Ensure grayscale


img_array = np.array(img)

# Display original image


plt.figure(figsize=(5, 5))
plt.imshow(img_array, cmap='gray')
plt.title('Original Grayscale Image')
plt.axis('off')
plt.show()

# Helper function to pad image for filtering


def pad_image(image, pad_size):
padded = np.pad(image, pad_size, mode='reflect')
return padded

# a. Averaging Filter
def averaging_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image, dtype=np.float64)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate average
output[i, j] = np.mean(neighborhood)

return output.astype(np.uint8)

# Test averaging filter with different kernel sizes


kernel_sizes = [3, 5, 7, 9]
plt.figure(figsize=(15, 10))

# Original image for comparison

17
plt.subplot(2, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

for i, size in enumerate(kernel_sizes):


filtered_img = averaging_filter(img_array, size)

plt.subplot(2, 3, i+2)
plt.imshow(filtered_img, cmap='gray')
plt.title(f'Averaging Filter {size}x{size}')
plt.axis('off')

plt.tight_layout()
plt.show()

# b. Weighted Averaging Filter


def weighted_averaging_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image, dtype=np.float64)

# Create weighted kernel (Gaussian-like)


kernel = np.zeros((kernel_size, kernel_size))
center = kernel_size // 2

# Fill the kernel with distance-based weights


for i in range(kernel_size):
for j in range(kernel_size):
distance = np.sqrt((i - center)**2 + (j - center)**2)
kernel[i, j] = np.exp(-0.5 * (distance**2))

# Normalize kernel weights to sum to 1


kernel = kernel / np.sum(kernel)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Apply weighted average
output[i, j] = np.sum(neighborhood * kernel)

18
return output.astype(np.uint8)

# Test weighted averaging filter with different kernel sizes


plt.figure(figsize=(15, 10))

# Original image for comparison


plt.subplot(2, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

for i, size in enumerate(kernel_sizes):


filtered_img = weighted_averaging_filter(img_array, size)

plt.subplot(2, 3, i+2)
plt.imshow(filtered_img, cmap='gray')
plt.title(f'Weighted Averaging Filter {size}x{size}')
plt.axis('off')

plt.tight_layout()
plt.show()

# c. Median Filtering
def median_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate median
output[i, j] = np.median(neighborhood)

return output.astype(np.uint8)

# Test median filter with different kernel sizes


plt.figure(figsize=(15, 10))

# Original image for comparison


plt.subplot(2, 3, 1)

19
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

for i, size in enumerate(kernel_sizes):


filtered_img = median_filter(img_array, size)

plt.subplot(2, 3, i+2)
plt.imshow(filtered_img, cmap='gray')
plt.title(f'Median Filter {size}x{size}')
plt.axis('off')

plt.tight_layout()
plt.show()

# d. Max Filtering
def max_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate maximum
output[i, j] = np.max(neighborhood)

return output.astype(np.uint8)

# e. Min Filtering
def min_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

# Apply filter
for i in range(height):

20
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate minimum
output[i, j] = np.min(neighborhood)

return output.astype(np.uint8)

# Test max and min filters


filter_size = 3 # Use 3x3 window for max and min filters
max_filtered = max_filter(img_array, filter_size)
min_filtered = min_filter(img_array, filter_size)

plt.figure(figsize=(15, 5))

plt.subplot(1, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

plt.subplot(1, 3, 2)
plt.imshow(max_filtered, cmap='gray')
plt.title(f'Max Filter {filter_size}x{filter_size}')
plt.axis('off')

plt.subplot(1, 3, 3)
plt.imshow(min_filtered, cmap='gray')
plt.title(f'Min Filter {filter_size}x{filter_size}')
plt.axis('off')

plt.tight_layout()
plt.show()

# Compare all filtering methods with a fixed kernel size


comparison_size = 5 # 5x5 window for comparison
avg_img = averaging_filter(img_array, comparison_size)
weighted_img = weighted_averaging_filter(img_array, comparison_size)
median_img = median_filter(img_array, comparison_size)
max_img = max_filter(img_array, comparison_size)
min_img = min_filter(img_array, comparison_size)

plt.figure(figsize=(15, 10))

plt.subplot(2, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

21
plt.subplot(2, 3, 2)
plt.imshow(avg_img, cmap='gray')
plt.title(f'Average Filter {comparison_size}x{comparison_size}')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(weighted_img, cmap='gray')
plt.title(f'Weighted Average {comparison_size}x{comparison_size}')
plt.axis('off')

plt.subplot(2, 3, 4)
plt.imshow(median_img, cmap='gray')
plt.title(f'Median Filter {comparison_size}x{comparison_size}')
plt.axis('off')

plt.subplot(2, 3, 5)
plt.imshow(max_img, cmap='gray')
plt.title(f'Max Filter {comparison_size}x{comparison_size}')
plt.axis('off')

plt.subplot(2, 3, 6)
plt.imshow(min_img, cmap='gray')
plt.title(f'Min Filter {comparison_size}x{comparison_size}')
plt.axis('off')

plt.tight_layout()
plt.show()

print("Observations on filtering effects:")


print("1. Averaging Filter:")
print(" - Blur increases with window size")
print(" - Reduces noise but also reduces edge sharpness")
print(" - May create artifacts at strong edges")
print()
print("2. Weighted Averaging Filter:")
print(" - Provides smoother transitions than uniform averaging")
print(" - Better preserves edges while still reducing noise")
print(" - Blur is more natural-looking with less artifacts")
print()
print("3. Median Filter:")
print(" - Excellent at removing salt-and-pepper noise")
print(" - Preserves edges better than averaging")
print(" - Larger windows may remove fine details")
print()
print("4. Max Filter:")
print(" - Enhances bright regions")

22
print(" - Expands light objects (dilation-like effect)")
print(" - Useful for finding light objects on dark backgrounds")
print()
print("5. Min Filter:")
print(" - Enhances dark regions")
print(" - Expands dark objects (erosion-like effect)")
print(" - Useful for finding dark objects on light backgrounds")

23
24
25
26
Observations on filtering effects:
1. Averaging Filter:
- Blur increases with window size
- Reduces noise but also reduces edge sharpness
- May create artifacts at strong edges

2. Weighted Averaging Filter:


- Provides smoother transitions than uniform averaging
- Better preserves edges while still reducing noise
- Blur is more natural-looking with less artifacts

3. Median Filter:
- Excellent at removing salt-and-pepper noise
- Preserves edges better than averaging
- Larger windows may remove fine details

4. Max Filter:
- Enhances bright regions
- Expands light objects (dilation-like effect)
- Useful for finding light objects on dark backgrounds

27
5. Min Filter:
- Enhances dark regions
- Expands dark objects (erosion-like effect)
- Useful for finding dark objects on light backgrounds

[ ]:

[2]: import numpy as np


import matplotlib.pyplot as plt
from PIL import Image

# Load grayscale image


image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png' #␣
↪Replace with your actual image path

img = Image.open(image_path).convert('L') # Ensure grayscale


img_array = np.array(img)

# Function to add salt and pepper noise


def add_salt_pepper_noise(image, salt_prob=0.02, pepper_prob=0.02):
noisy_image = np.copy(image)
total_pixels = image.size

# Add salt noise (white pixels)


num_salt = int(total_pixels * salt_prob)
salt_coords = [np.random.randint(0, i - 1, num_salt) for i in image.shape]
noisy_image[salt_coords[0], salt_coords[1]] = 255

# Add pepper noise (black pixels)


num_pepper = int(total_pixels * pepper_prob)
pepper_coords = [np.random.randint(0, i - 1, num_pepper) for i in image.
↪shape]

noisy_image[pepper_coords[0], pepper_coords[1]] = 0

return noisy_image

# Add salt and pepper noise to the image


noisy_img = add_salt_pepper_noise(img_array)

# Display original and noisy images


plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

plt.subplot(1, 2, 2)
plt.imshow(noisy_img, cmap='gray')

28
plt.title('Image with Salt & Pepper Noise')
plt.axis('off')

plt.tight_layout()
plt.show()

# Helper function to pad image for filtering


def pad_image(image, pad_size):
padded = np.pad(image, pad_size, mode='reflect')
return padded

# a. Linear Smoothing (Simple Averaging)


def averaging_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image, dtype=np.float64)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate average
output[i, j] = np.mean(neighborhood)

return output.astype(np.uint8)

# b. Weighted Averaging Filter


def weighted_averaging_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image, dtype=np.float64)

# Create weighted kernel (Gaussian-like)


kernel = np.zeros((kernel_size, kernel_size))
center = kernel_size // 2

# Fill the kernel with distance-based weights


for i in range(kernel_size):

29
for j in range(kernel_size):
distance = np.sqrt((i - center)**2 + (j - center)**2)
kernel[i, j] = np.exp(-0.5 * (distance**2))

# Normalize kernel weights to sum to 1


kernel = kernel / np.sum(kernel)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Apply weighted average
output[i, j] = np.sum(neighborhood * kernel)

return output.astype(np.uint8)

# c. Median Filtering
def median_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate median
output[i, j] = np.median(neighborhood)

return output.astype(np.uint8)

# d. Max Filtering
def max_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

30
# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate maximum
output[i, j] = np.max(neighborhood)

return output.astype(np.uint8)

# e. Min Filtering
def min_filter(image, kernel_size):
# Create padded image
pad_size = kernel_size // 2
padded = pad_image(image, pad_size)

# Create output image


height, width = image.shape
output = np.zeros_like(image)

# Apply filter
for i in range(height):
for j in range(width):
# Extract neighborhood
neighborhood = padded[i:i+kernel_size, j:j+kernel_size]
# Calculate minimum
output[i, j] = np.min(neighborhood)

return output.astype(np.uint8)

# Apply all filters with different kernel sizes


kernel_sizes = [3, 5, 7]

# Create a dictionary to store results for each filter type and kernel size
results = {}

for size in kernel_sizes:


avg_img = averaging_filter(noisy_img, size)
weighted_img = weighted_averaging_filter(noisy_img, size)
median_img = median_filter(noisy_img, size)
max_img = max_filter(noisy_img, size)
min_img = min_filter(noisy_img, size)

results[f'avg_{size}'] = avg_img
results[f'weighted_{size}'] = weighted_img
results[f'median_{size}'] = median_img
results[f'max_{size}'] = max_img

31
results[f'min_{size}'] = min_img

# Function to calculate Peak Signal-to-Noise Ratio (PSNR)


def calculate_psnr(original, filtered):
mse = np.mean((original.astype(np.float64) - filtered.astype(np.float64))␣
↪** 2)

if mse == 0:
return float('inf')
max_pixel = 255.0
psnr = 20 * np.log10(max_pixel / np.sqrt(mse))
return psnr

# Calculate PSNR for each filtered image


psnr_values = {}
for key, filtered_img in results.items():
psnr_values[key] = calculate_psnr(img_array, filtered_img)

# Display results for 3x3 kernel


plt.figure(figsize=(15, 10))

plt.subplot(2, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

plt.subplot(2, 3, 2)
plt.imshow(noisy_img, cmap='gray')
plt.title('Noisy Image')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(results['avg_3'], cmap='gray')
plt.title(f'Average Filter 3x3\nPSNR: {psnr_values["avg_3"]:.2f} dB')
plt.axis('off')

plt.subplot(2, 3, 4)
plt.imshow(results['weighted_3'], cmap='gray')
plt.title(f'Weighted Average 3x3\nPSNR: {psnr_values["weighted_3"]:.2f} dB')
plt.axis('off')

plt.subplot(2, 3, 5)
plt.imshow(results['median_3'], cmap='gray')
plt.title(f'Median Filter 3x3\nPSNR: {psnr_values["median_3"]:.2f} dB')
plt.axis('off')

plt.tight_layout()
plt.show()

32
# Compare averaging vs median filtering with different kernel sizes
# FIX: Create a 2 x 3 figure instead of 2 x 3 with indices that go out of bounds
plt.figure(figsize=(15, 10))

# First row: Original and average filters


plt.subplot(2, 3, 1)
plt.imshow(noisy_img, cmap='gray')
plt.title('Noisy Image')
plt.axis('off')

for i, size in enumerate(kernel_sizes):


# Average filters in first row
plt.subplot(2, 3, i+2) # Position 2, 3, 4
plt.imshow(results[f'avg_{size}'], cmap='gray')
plt.title(f'Average Filter {size}x{size}\nPSNR: {psnr_values[f"avg_{size}"]:
↪.2f} dB')

plt.axis('off')

# Median filters in second row


plt.subplot(2, 3, i+4) # Position 4, 5, 6 (instead of 5, 6, 7)
plt.imshow(results[f'median_{size}'], cmap='gray')
plt.title(f'Median Filter {size}x{size}\nPSNR:␣
↪{psnr_values[f"median_{size}"]:.2f} dB')

plt.axis('off')

plt.tight_layout()
plt.show()

# Compare all filter types with a fixed kernel size (5x5)


plt.figure(figsize=(15, 10))

plt.subplot(2, 3, 1)
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')

plt.subplot(2, 3, 2)
plt.imshow(noisy_img, cmap='gray')
plt.title('Noisy Image')
plt.axis('off')

plt.subplot(2, 3, 3)
plt.imshow(results['avg_5'], cmap='gray')
plt.title(f'Average Filter 5x5\nPSNR: {psnr_values["avg_5"]:.2f} dB')
plt.axis('off')

33
plt.subplot(2, 3, 4)
plt.imshow(results['weighted_5'], cmap='gray')
plt.title(f'Weighted Average 5x5\nPSNR: {psnr_values["weighted_5"]:.2f} dB')
plt.axis('off')

plt.subplot(2, 3, 5)
plt.imshow(results['median_5'], cmap='gray')
plt.title(f'Median Filter 5x5\nPSNR: {psnr_values["median_5"]:.2f} dB')
plt.axis('off')

plt.tight_layout()
plt.show()

# Compare Max and Min filters


plt.figure(figsize=(15, 5))

plt.subplot(1, 3, 1)
plt.imshow(noisy_img, cmap='gray')
plt.title('Noisy Image')
plt.axis('off')

plt.subplot(1, 3, 2)
plt.imshow(results['max_3'], cmap='gray')
plt.title(f'Max Filter 3x3\nPSNR: {psnr_values["max_3"]:.2f} dB')
plt.axis('off')

plt.subplot(1, 3, 3)
plt.imshow(results['min_3'], cmap='gray')
plt.title(f'Min Filter 3x3\nPSNR: {psnr_values["min_3"]:.2f} dB')
plt.axis('off')

plt.tight_layout()
plt.show()

# Print comparative analysis


print("Comparative Analysis of Filtering Techniques for Salt and Pepper Noise:")
print("===================================================================")
print("1. Linear Smoothing (Average Filtering):")
print(" - Moderately effective at removing salt and pepper noise")
print(" - Tends to blur edges and fine details")
print(" - Performance increases with kernel size but at cost of more␣
↪blurring")

print("\n2. Weighted Averaging Filter:")


print(" - Slightly better than uniform averaging at preserving edges")
print(" - Still ineffective at completely removing impulse noise")
print(" - Creates a more natural blur than uniform averaging")

34
print("\n3. Median Filter:")
print(" - Excellent at removing salt and pepper noise")
print(" - Preserves edges much better than averaging filters")
print(" - Typically achieves highest PSNR values for salt and pepper noise")
print(" - Larger window sizes may remove small details")

print("\n4. Max Filter:")


print(" - Good at removing 'pepper' noise (black pixels)")
print(" - Enlarges 'salt' noise (white pixels)")
print(" - Brightens the overall image")

print("\n5. Min Filter:")


print(" - Good at removing 'salt' noise (white pixels)")
print(" - Enlarges 'pepper' noise (black pixels)")
print(" - Darkens the overall image")

print("\nPSNR Values Summary:")


for key, value in sorted(psnr_values.items()):
print(f" - {key}: {value:.2f} dB")

35
36
37
Comparative Analysis of Filtering Techniques for Salt and Pepper Noise:
===================================================================
1. Linear Smoothing (Average Filtering):
- Moderately effective at removing salt and pepper noise
- Tends to blur edges and fine details
- Performance increases with kernel size but at cost of more blurring

38
2. Weighted Averaging Filter:
- Slightly better than uniform averaging at preserving edges
- Still ineffective at completely removing impulse noise
- Creates a more natural blur than uniform averaging

3. Median Filter:
- Excellent at removing salt and pepper noise
- Preserves edges much better than averaging filters
- Typically achieves highest PSNR values for salt and pepper noise
- Larger window sizes may remove small details

4. Max Filter:
- Good at removing 'pepper' noise (black pixels)
- Enlarges 'salt' noise (white pixels)
- Brightens the overall image

5. Min Filter:
- Good at removing 'salt' noise (white pixels)
- Enlarges 'pepper' noise (black pixels)
- Darkens the overall image

PSNR Values Summary:


- avg_3: 27.59 dB
- avg_5: 27.57 dB
- avg_7: 25.55 dB
- max_3: 14.71 dB
- max_5: 10.94 dB
- max_7: 9.05 dB
- median_3: 40.02 dB
- median_5: 33.06 dB
- median_7: 27.74 dB
- min_3: 11.40 dB
- min_5: 7.63 dB
- min_7: 5.66 dB
- weighted_3: 27.35 dB
- weighted_5: 28.28 dB
- weighted_7: 28.32 dB

[ ]:

[1]: import numpy as np


import matplotlib.pyplot as plt
from scipy import ndimage
from PIL import Image

# Function to display multiple images side by side

39
def display_images(images, titles, figsize=(15, 10)):
n = len(images)
fig, axes = plt.subplots(1, n, figsize=figsize)
if n == 1:
axes = [axes]

for i, (image, title) in enumerate(zip(images, titles)):


axes[i].imshow(image, cmap='gray')
axes[i].set_title(title)
axes[i].axis('off')

plt.tight_layout()
plt.show()

# Load the image


image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png' #␣
↪Replace with your actual image path

original_img = np.array(Image.open(image_path).convert('L'), dtype=float)

# Display the original image


plt.figure(figsize=(8, 8))
plt.imshow(original_img, cmap='gray')
plt.title('Original Grayscale Image')
plt.axis('off')
plt.show()

# a. Laplacian filter
def laplacian_filter(image):
# Define the Laplacian filter kernel
laplacian_kernel = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
])

# Apply the filter


laplacian = ndimage.convolve(image, laplacian_kernel)

# Sharpen the image by subtracting the Laplacian


sharpened = image - laplacian

# Normalize to [0, 255]


sharpened = np.clip(sharpened, 0, 255)

return sharpened

# b. Filtering using composite mask (e.g., combined Laplacian and Sobel)

40
def composite_mask_filter(image):
# Define the composite kernel (combined Laplacian with directional␣
↪component)

composite_kernel = np.array([
[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]
])

# Apply the filter


filtered = ndimage.convolve(image, composite_kernel)

# Normalize to [0, 255]


filtered = np.clip(filtered, 0, 255)

return filtered

# c. Unsharp masking
def unsharp_masking(image, sigma=1.0, amount=1.0):
# Create a blurred version of the image
blurred = ndimage.gaussian_filter(image, sigma=sigma)

# Calculate the mask


mask = image - blurred

# Add the mask to the original image


sharpened = image + amount * mask

# Normalize to [0, 255]


sharpened = np.clip(sharpened, 0, 255)

return sharpened

# d. High boost filtering


def high_boost_filtering(image, k=2.0):
# Define the Laplacian filter kernel
laplacian_kernel = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
])

# Apply the filter


laplacian = ndimage.convolve(image, laplacian_kernel)

# High-boost filtering (original + k * laplacian)

41
# Note: Since laplacian represents edges, we subtract it (it's already␣
↪negative for edges)
high_boost = image - k * laplacian

# Normalize to [0, 255]


high_boost = np.clip(high_boost, 0, 255)

return high_boost

# e. Filtering using first order derivative operators


def sobel_filter(image):
# Define Sobel kernels
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
])

sobel_y = np.array([
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
])

# Apply the filters


grad_x = ndimage.convolve(image, sobel_x)
grad_y = ndimage.convolve(image, sobel_y)

# Calculate gradient magnitude


gradient = np.sqrt(grad_x**2 + grad_y**2)

# Normalize to [0, 255]


gradient = np.clip(gradient, 0, 255)

# Create a sharpened image by adding the gradient to the original


sharpened = image + gradient
sharpened = np.clip(sharpened, 0, 255)

return gradient, sharpened

def prewitt_filter(image):
# Define Prewitt kernels
prewitt_x = np.array([
[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]
])

42
prewitt_y = np.array([
[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]
])

# Apply the filters


grad_x = ndimage.convolve(image, prewitt_x)
grad_y = ndimage.convolve(image, prewitt_y)

# Calculate gradient magnitude


gradient = np.sqrt(grad_x**2 + grad_y**2)

# Normalize to [0, 255]


gradient = np.clip(gradient, 0, 255)

# Create a sharpened image by adding the gradient to the original


sharpened = image + gradient
sharpened = np.clip(sharpened, 0, 255)

return gradient, sharpened

# Apply all filters


laplacian_filtered = laplacian_filter(original_img)
composite_filtered = composite_mask_filter(original_img)
unsharp_filtered = unsharp_masking(original_img, sigma=1.0, amount=1.5)
high_boost_filtered = high_boost_filtering(original_img, k=2.0)
sobel_edges, sobel_sharpened = sobel_filter(original_img)
prewitt_edges, prewitt_sharpened = prewitt_filter(original_img)

# Display the results in groups


# Filter results
display_images(
[original_img, laplacian_filtered, composite_filtered, unsharp_filtered],
['Original Image', 'Laplacian Filter', 'Composite Mask', 'Unsharp Masking']
)

# High-boost and edge detection results


display_images(
[high_boost_filtered, sobel_edges, sobel_sharpened, prewitt_edges,␣
↪prewitt_sharpened],

['High Boost (k=2.0)', 'Sobel Edges', 'Sobel Sharpened', 'Prewitt Edges',␣


↪'Prewitt Sharpened']

# Compare all sharpening methods

43
display_images(
[original_img, laplacian_filtered, composite_filtered, unsharp_filtered,␣
↪high_boost_filtered, sobel_sharpened, prewitt_sharpened],

['Original', 'Laplacian', 'Composite', 'Unsharp', 'High Boost', 'Sobel␣


↪Sharpened', 'Prewitt Sharpened'],

figsize=(20, 10)
)

44
[ ]:

[3]: pip install scikit-image

Note: you may need to restart the kernel to use updated packages.

[notice] A new release of pip is available: 25.0.1 -> 25.1


[notice] To update, run: python.exe -m pip install --upgrade pip
Collecting scikit-image
Downloading scikit_image-0.25.2-cp313-cp313-win_amd64.whl.metadata (14 kB)
Requirement already satisfied: numpy>=1.24 in
c:\users\dkg19\appdata\local\programs\python\python313\lib\site-packages (from

45
scikit-image) (2.2.1)
Requirement already satisfied: scipy>=1.11.4 in
c:\users\dkg19\appdata\local\programs\python\python313\lib\site-packages (from
scikit-image) (1.15.1)
Requirement already satisfied: networkx>=3.0 in
c:\users\dkg19\appdata\local\programs\python\python313\lib\site-packages (from
scikit-image) (3.4.2)
Requirement already satisfied: pillow>=10.1 in
c:\users\dkg19\appdata\local\programs\python\python313\lib\site-packages (from
scikit-image) (11.1.0)
Collecting imageio!=2.35.0,>=2.33 (from scikit-image)
Downloading imageio-2.37.0-py3-none-any.whl.metadata (5.2 kB)
Collecting tifffile>=2022.8.12 (from scikit-image)
Downloading tifffile-2025.3.30-py3-none-any.whl.metadata (32 kB)
Requirement already satisfied: packaging>=21 in
c:\users\dkg19\appdata\roaming\python\python313\site-packages (from scikit-
image) (24.2)
Collecting lazy-loader>=0.4 (from scikit-image)
Downloading lazy_loader-0.4-py3-none-any.whl.metadata (7.6 kB)
Downloading scikit_image-0.25.2-cp313-cp313-win_amd64.whl (12.9 MB)
---------------------------------------- 0.0/12.9 MB ? eta -:--:--
--------------------------------------- 0.3/12.9 MB ? eta -:--:--
--------------------------------------- 0.3/12.9 MB ? eta -:--:--
- -------------------------------------- 0.5/12.9 MB 866.1 kB/s eta 0:00:15
- -------------------------------------- 0.5/12.9 MB 866.1 kB/s eta 0:00:15
-- ------------------------------------- 0.8/12.9 MB 687.3 kB/s eta 0:00:18
-- ------------------------------------- 0.8/12.9 MB 687.3 kB/s eta 0:00:18
--- ------------------------------------ 1.0/12.9 MB 705.4 kB/s eta 0:00:17
---- ----------------------------------- 1.3/12.9 MB 741.1 kB/s eta 0:00:16
---- ----------------------------------- 1.3/12.9 MB 741.1 kB/s eta 0:00:16
---- ----------------------------------- 1.3/12.9 MB 741.1 kB/s eta 0:00:16
---- ----------------------------------- 1.6/12.9 MB 633.3 kB/s eta 0:00:18
---- ----------------------------------- 1.6/12.9 MB 633.3 kB/s eta 0:00:18
----- ---------------------------------- 1.8/12.9 MB 627.7 kB/s eta 0:00:18
----- ---------------------------------- 1.8/12.9 MB 627.7 kB/s eta 0:00:18
------ --------------------------------- 2.1/12.9 MB 622.9 kB/s eta 0:00:18
------ --------------------------------- 2.1/12.9 MB 622.9 kB/s eta 0:00:18
------- -------------------------------- 2.4/12.9 MB 647.3 kB/s eta 0:00:17
-------- ------------------------------- 2.6/12.9 MB 670.9 kB/s eta 0:00:16
-------- ------------------------------- 2.6/12.9 MB 670.9 kB/s eta 0:00:16
-------- ------------------------------- 2.9/12.9 MB 684.6 kB/s eta 0:00:15
--------- ------------------------------ 3.1/12.9 MB 712.8 kB/s eta 0:00:14
--------- ------------------------------ 3.1/12.9 MB 712.8 kB/s eta 0:00:14
--------- ------------------------------ 3.1/12.9 MB 712.8 kB/s eta 0:00:14
---------- ----------------------------- 3.4/12.9 MB 673.0 kB/s eta 0:00:15
---------- ----------------------------- 3.4/12.9 MB 673.0 kB/s eta 0:00:15
----------- ---------------------------- 3.7/12.9 MB 675.6 kB/s eta 0:00:14
----------- ---------------------------- 3.7/12.9 MB 675.6 kB/s eta 0:00:14

46
----------- ---------------------------- 3.7/12.9 MB 675.6 kB/s eta 0:00:14
------------ --------------------------- 3.9/12.9 MB 637.4 kB/s eta 0:00:15
------------ --------------------------- 3.9/12.9 MB 637.4 kB/s eta 0:00:15
------------ --------------------------- 3.9/12.9 MB 637.4 kB/s eta 0:00:15
------------- -------------------------- 4.2/12.9 MB 619.0 kB/s eta 0:00:15
------------- -------------------------- 4.2/12.9 MB 619.0 kB/s eta 0:00:15
------------- -------------------------- 4.2/12.9 MB 619.0 kB/s eta 0:00:15
------------- -------------------------- 4.5/12.9 MB 606.1 kB/s eta 0:00:14
------------- -------------------------- 4.5/12.9 MB 606.1 kB/s eta 0:00:14
-------------- ------------------------- 4.7/12.9 MB 595.4 kB/s eta 0:00:14
-------------- ------------------------- 4.7/12.9 MB 595.4 kB/s eta 0:00:14
--------------- ------------------------ 5.0/12.9 MB 606.4 kB/s eta 0:00:14
---------------- ----------------------- 5.2/12.9 MB 623.8 kB/s eta 0:00:13
----------------- ---------------------- 5.8/12.9 MB 661.9 kB/s eta 0:00:11
------------------ --------------------- 6.0/12.9 MB 681.7 kB/s eta 0:00:11
-------------------- ------------------- 6.6/12.9 MB 722.1 kB/s eta 0:00:09
--------------------- ------------------ 7.1/12.9 MB 760.4 kB/s eta 0:00:08
---------------------- ----------------- 7.3/12.9 MB 776.4 kB/s eta 0:00:08
----------------------- ---------------- 7.6/12.9 MB 792.5 kB/s eta 0:00:07
------------------------ --------------- 7.9/12.9 MB 806.6 kB/s eta 0:00:07
------------------------- -------------- 8.1/12.9 MB 815.8 kB/s eta 0:00:06
------------------------- -------------- 8.1/12.9 MB 815.8 kB/s eta 0:00:06
-------------------------- ------------- 8.4/12.9 MB 809.4 kB/s eta 0:00:06
-------------------------- ------------- 8.7/12.9 MB 807.9 kB/s eta 0:00:06
-------------------------- ------------- 8.7/12.9 MB 807.9 kB/s eta 0:00:06
--------------------------- ------------ 8.9/12.9 MB 799.8 kB/s eta 0:00:05
---------------------------- ----------- 9.2/12.9 MB 805.2 kB/s eta 0:00:05
---------------------------- ----------- 9.2/12.9 MB 805.2 kB/s eta 0:00:05
------------------------------ --------- 9.7/12.9 MB 823.8 kB/s eta 0:00:04
------------------------------ --------- 10.0/12.9 MB 836.0 kB/s eta 0:00:04
------------------------------- -------- 10.2/12.9 MB 844.4 kB/s eta 0:00:04
------------------------------- -------- 10.2/12.9 MB 844.4 kB/s eta 0:00:04
-------------------------------- ------- 10.5/12.9 MB 843.9 kB/s eta 0:00:03
--------------------------------- ------ 10.7/12.9 MB 839.0 kB/s eta 0:00:03
--------------------------------- ------ 10.7/12.9 MB 839.0 kB/s eta 0:00:03
---------------------------------- ----- 11.0/12.9 MB 840.9 kB/s eta 0:00:03
---------------------------------- ----- 11.3/12.9 MB 847.0 kB/s eta 0:00:02
----------------------------------- ---- 11.5/12.9 MB 856.1 kB/s eta 0:00:02
------------------------------------ --- 11.8/12.9 MB 863.9 kB/s eta 0:00:02
-------------------------------------- - 12.3/12.9 MB 880.0 kB/s eta 0:00:01
--------------------------------------- 12.6/12.9 MB 889.8 kB/s eta 0:00:01
--------------------------------------- 12.6/12.9 MB 889.8 kB/s eta 0:00:01
---------------------------------------- 12.9/12.9 MB 881.4 kB/s eta 0:00:00
Downloading imageio-2.37.0-py3-none-any.whl (315 kB)
Downloading lazy_loader-0.4-py3-none-any.whl (12 kB)
Downloading tifffile-2025.3.30-py3-none-any.whl (226 kB)
Installing collected packages: tifffile, lazy-loader, imageio, scikit-image
Successfully installed imageio-2.37.0 lazy-loader-0.4 scikit-image-0.25.2

47
tifffile-2025.3.30

[1]: import numpy as np


import matplotlib.pyplot as plt
from PIL import Image
from skimage import color

def histogram_equalisation(input_image, no_of_bins):


"""
Enhance contrast of an image using histogram equalization.

Parameters:
- input_image: numpy array of the input image
- no_of_bins: number of bins to use for histogram calculation

Returns:
- enhanced_image: numpy array of the enhanced image with the same shape as␣
↪input

"""
# Check if the image is grayscale or color
if len(input_image.shape) == 2:
# Grayscale image
return _equalize_grayscale(input_image, no_of_bins)
elif len(input_image.shape) == 3 and input_image.shape[2] == 3:
# Color image
return _equalize_color(input_image, no_of_bins)
else:
raise ValueError("Unsupported image format")

def _equalize_grayscale(image, no_of_bins):


"""Helper function to perform histogram equalization on grayscale images."""
# Calculate the histogram
hist, bin_edges = np.histogram(image.flatten(), bins=no_of_bins, range=(0,␣
↪256))

# Calculate the cumulative distribution function (CDF)


cdf = hist.cumsum()

# Normalize the CDF to the range [0, 255]


# Handle special case where cdf[-1] might be 0
if cdf[-1] == 0:
cdf_normalized = np.zeros_like(cdf)
else:
cdf_normalized = (cdf * 255) / cdf[-1]

# Use the normalized CDF as a lookup table for pixel values


bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2

48
# Create a mapping function (LUT)
mapping_function = np.interp(np.arange(256), bin_centers, cdf_normalized)

# Apply the mapping function to the image


result = mapping_function[image.astype(np.uint8)]

return result.astype(np.uint8)

def _equalize_color(image, no_of_bins):


"""Helper function to perform histogram equalization on color images."""
# Convert to HSV color space (work with V channel)
# Convert RGB to float in [0, 1] range
img_norm = image.astype(float) / 255.0

# Convert to HSV
hsv_img = color.rgb2hsv(img_norm)

# Apply histogram equalization to the V channel


v_channel = (hsv_img[:, :, 2] * 255).astype(np.uint8)
equalized_v = _equalize_grayscale(v_channel, no_of_bins)

# Update the V channel


hsv_img[:, :, 2] = equalized_v / 255.0

# Convert back to RGB


rgb_img = color.hsv2rgb(hsv_img)

# Convert back to [0, 255] range


return (rgb_img * 255).astype(np.uint8)

def plot_histogram(ax, image, is_grayscale=True):


"""Plot histogram of an image."""
if is_grayscale:
ax.hist(image.flatten(), bins=50, color='black', alpha=0.7)
else:
for i, color in enumerate(['red', 'green', 'blue']):
ax.hist(image[:,:,i].flatten(), bins=50, color=color, alpha=0.5)
ax.set_xlim(0, 255)

def process_image(image_path, bin_values, is_grayscale=True):


"""Process a single image with different bin counts."""
# Load the image
img = Image.open(image_path)

if is_grayscale:
if img.mode != 'L':

49
img = img.convert('L')
original = np.array(img)
else:
if img.mode != 'RGB':
img = img.convert('RGB')
original = np.array(img)

# Create results dictionary


results = {'original': original}

# Process with different bin counts


for bins in bin_values:
enhanced = histogram_equalisation(original, bins)
results[f'bins_{bins}'] = enhanced

return results

def analyze_image_with_bins(image_path, bin_values=[8, 32, 64, 128, 256],␣


↪is_grayscale=True):

"""
Analyze a single image with different bin counts for histogram equalization.

Parameters:
- image_path: path to the input image
- bin_values: list of bin counts to test
- is_grayscale: whether to process as grayscale
"""
# Process the image
results = process_image(image_path, bin_values, is_grayscale)

# Create figure
fig, axes = plt.subplots(2, len(bin_values)+1,␣
↪figsize=(3*(len(bin_values)+1), 6))

# Display original image


if is_grayscale:
axes[0, 0].imshow(results['original'], cmap='gray')
else:
axes[0, 0].imshow(results['original'])

axes[0, 0].set_title('Original Image')


axes[0, 0].axis('off')
plot_histogram(axes[1, 0], results['original'], is_grayscale=is_grayscale)
axes[1, 0].set_title('Original Histogram')

# Display enhanced images


for i, bins in enumerate(bin_values):

50
col = i + 1 # Skip column 0 (original image)
key = f'bins_{bins}'

if is_grayscale:
axes[0, col].imshow(results[key], cmap='gray')
else:
axes[0, col].imshow(results[key])

axes[0, col].set_title(f'Bins: {bins}')


axes[0, col].axis('off')
plot_histogram(axes[1, col], results[key], is_grayscale=is_grayscale)
axes[1, col].set_title(f'Histogram (Bins: {bins})')

plt.tight_layout()
image_type = "Grayscale" if is_grayscale else "Color"
plt.suptitle(f'{image_type} Image - Histogram Equalization with Different␣
↪Bin Counts', fontsize=16, y=1.02)

plt.subplots_adjust(top=0.85)
plt.show()

return results

def visualize_cdf_transformation(image, no_of_bins=128):


"""Visualize how the CDF transforms pixel values."""
# If image is color, convert to grayscale for CDF visualization
if len(image.shape) == 3:
from skimage import color
img_norm = image.astype(float) / 255.0
hsv_img = color.rgb2hsv(img_norm)
image_for_hist = (hsv_img[:, :, 2] * 255).astype(np.uint8)
else:
image_for_hist = image

# Calculate histogram
hist, bin_edges = np.histogram(image_for_hist.flatten(), bins=no_of_bins,␣
↪range=(0, 256))

# Calculate CDF
cdf = hist.cumsum()
if cdf[-1] > 0:
cdf_normalized = (cdf * 255) / cdf[-1]
else:
cdf_normalized = np.zeros_like(cdf)

# Bin centers for mapping


bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2

51
# Create mapping function
mapping_function = np.interp(np.arange(256), bin_centers, cdf_normalized)

# Plot
fig, axes = plt.subplots(1, 2, figsize=(12, 5))

# Plot histogram and CDF


ax1 = axes[0]
ax1.bar(bin_centers, hist, width=256/no_of_bins, alpha=0.7)
ax1.set_xlim(0, 255)
ax1.set_title(f'Histogram (Bins: {no_of_bins})')
ax1.set_xlabel('Pixel Value')
ax1.set_ylabel('Frequency')

ax2 = ax1.twinx()
ax2.plot(bin_centers, cdf / cdf.max() if cdf.max() > 0 else cdf, 'r-',␣
↪linewidth=2)

ax2.set_ylabel('Cumulative Frequency', color='r')


ax2.tick_params(axis='y', labelcolor='r')

# Plot mapping function


axes[1].plot(np.arange(256), mapping_function, 'b-', linewidth=2)
axes[1].plot([0, 255], [0, 255], 'k--') # Identity line
axes[1].set_xlim(0, 255)
axes[1].set_ylim(0, 255)
axes[1].set_title('Transformation Function')
axes[1].set_xlabel('Input Pixel Value')
axes[1].set_ylabel('Output Pixel Value')
axes[1].grid(True)

plt.tight_layout()
plt.show()

return mapping_function

def compare_detailed_results(image, bin_values=[32, 128]):


"""Compare detailed results with selected bin counts."""
# Process image with selected bin counts
results = {}
for bins in bin_values:
results[bins] = histogram_equalisation(image, bins)

# Create figure
fig, axes = plt.subplots(1, len(bin_values) + 1, figsize=(5 *␣
↪(len(bin_values) + 1), 5))

# Display original image

52
if len(image.shape) == 2:
axes[0].imshow(image, cmap='gray')
else:
axes[0].imshow(image)

axes[0].set_title('Original Image')
axes[0].axis('off')

# Display enhanced images


for i, bins in enumerate(bin_values):
if len(image.shape) == 2:
axes[i+1].imshow(results[bins], cmap='gray')
else:
axes[i+1].imshow(results[bins])

axes[i+1].set_title(f'Enhanced (Bins: {bins})')


axes[i+1].axis('off')

plt.tight_layout()
plt.show()

# Compare histograms
fig, axes = plt.subplots(1, len(bin_values) + 1, figsize=(5 *␣
↪(len(bin_values) + 1), 4))

# Plot original histogram


is_grayscale = len(image.shape) == 2
plot_histogram(axes[0], image, is_grayscale=is_grayscale)
axes[0].set_title('Original Histogram')

# Plot enhanced histograms


for i, bins in enumerate(bin_values):
plot_histogram(axes[i+1], results[bins], is_grayscale=is_grayscale)
axes[i+1].set_title(f'Histogram (Bins: {bins})')

plt.tight_layout()
plt.show()

# Show transformation functions


for bins in bin_values:
print(f"\nTransformation function for {bins} bins:")
visualize_cdf_transformation(image, bins)

# Main execution
# Your specific image paths
image_path1 = r"C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_png.png"
image_path2 = r"C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png"

53
# Define bin values to test
bin_values = [8, 32, 64, 128, 256]

# Process the color image


print("Processing color image (image_png.png)...")
color_img = np.array(Image.open(image_path1))
color_results = analyze_image_with_bins(image_path1, bin_values,␣
↪is_grayscale=False)

# Process the grayscale image


print("\nProcessing grayscale image (image_gray.png)...")
gray_img = np.array(Image.open(image_path2).convert('L'))
gray_results = analyze_image_with_bins(image_path2, bin_values,␣
↪is_grayscale=True)

# Compare detailed results for selected bin counts


print("\nComparing detailed results for color image...")
compare_detailed_results(color_img, [32, 128])

print("\nComparing detailed results for grayscale image...")


compare_detailed_results(gray_img, [32, 128])

# Summary of findings
print("\nSummary of Findings:")
print("1. The effect of bin count on histogram equalization:")
print(" - Lower bin counts (8-32) tend to produce more dramatic contrast␣
↪changes")

print(" - Higher bin counts (128-256) create more subtle and natural-looking␣
↪enhancements")

print(" - The optimal bin count depends on the image's initial contrast␣
↪distribution")

print("\n2. Comparison between grayscale and color processing:")


print(" - Color images preserve hue while enhancing contrast in the value␣
↪channel")

print(" - Grayscale images show more pronounced enhancement effects")


print("\n3. The transformation functions reveal how pixel intensities are␣
↪redistributed")

print(" - Steeper sections of the transformation curve indicate more␣


↪aggressive contrast enhancement")

print(" - Flatter sections indicate compression of intensity ranges")

Processing color image (image_png.png)…

54
Processing grayscale image (image_gray.png)…

Comparing detailed results for color image…

55
Transformation function for 32 bins:

Transformation function for 128 bins:

56
Comparing detailed results for grayscale image…

Transformation function for 32 bins:

57
Transformation function for 128 bins:

Summary of Findings:
1. The effect of bin count on histogram equalization:
- Lower bin counts (8-32) tend to produce more dramatic contrast changes
- Higher bin counts (128-256) create more subtle and natural-looking
enhancements
- The optimal bin count depends on the image's initial contrast distribution

2. Comparison between grayscale and color processing:


- Color images preserve hue while enhancing contrast in the value channel
- Grayscale images show more pronounced enhancement effects

3. The transformation functions reveal how pixel intensities are redistributed


- Steeper sections of the transformation curve indicate more aggressive
contrast enhancement
- Flatter sections indicate compression of intensity ranges

[ ]:

[ ]: import numpy as np
import matplotlib.pyplot as plt
from skimage import io, exposure

def Histogram_sp(input_Image, specified_Image, no_of_bins):


# Validate inputs
if input_Image.ndim > 2:

58
input_Image = np.mean(input_Image, axis=2).astype(np.uint8) # Convert␣
↪to grayscale if needed

if specified_Image.ndim > 2:
specified_Image = np.mean(specified_Image, axis=2).astype(np.uint8) #␣
↪Convert to grayscale if needed

# Calculate the histogram of the input image


hist_input, bin_edges_input = np.histogram(input_Image.flatten(),␣
↪no_of_bins, range=(0, 256), density=True)

cdf_input = hist_input.cumsum()
cdf_input = 255 * cdf_input / cdf_input[-1] # Normalize to 0-255

# Calculate the histogram of the reference image


hist_ref, bin_edges_ref = np.histogram(specified_Image.flatten(),␣
↪no_of_bins, range=(0, 256), density=True)

cdf_ref = hist_ref.cumsum()
cdf_ref = 255 * cdf_ref / cdf_ref[-1] # Normalize to 0-255

# Create a lookup table for histogram matching


lookup_table = np.zeros(256, dtype=np.uint8)

# For each intensity level j in the input image, find the intensity level k␣
↪in the
# reference image such that cdf_input[j] � cdf_ref[k]
for i in range(256):
# Find bin containing intensity i
j = min(no_of_bins - 1, int(i / 256 * no_of_bins))

# Find the intensity in the reference image with the closest CDF value
diff = np.abs(cdf_input[j] - cdf_ref)
index = np.argmin(diff)
lookup_table[i] = int(index * 256 / no_of_bins)

# Apply the lookup table to the input image


matched_image = np.zeros_like(input_Image)
for i in range(256):
matched_image[input_Image == i] = lookup_table[i]

return matched_image

# Example usage with visualization


def demonstrate_histogram_specification():
# Load images or create sample images
# For demonstration, let's create synthetic images

# Create a low contrast image (A)

59
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
X, Y = np.meshgrid(x, y)
low_contrast = np.uint8((X + Y) * 50 + 50) # Values between 50 and 150

# Create a high contrast image (B)


high_contrast = np.uint8(np.sin(X * 10) * 127 + 128) # Full range of values

# Apply histogram specification


enhanced_image = Histogram_sp(low_contrast, high_contrast, no_of_bins=256)

# Display the results


fig, axes = plt.subplots(2, 3, figsize=(15, 10))

# Display images
axes[0, 0].imshow(low_contrast, cmap='gray')
axes[0, 0].set_title('Low Contrast Image (A)')
axes[0, 0].axis('off')

axes[0, 1].imshow(high_contrast, cmap='gray')


axes[0, 1].set_title('High Contrast Image (B)')
axes[0, 1].axis('off')

axes[0, 2].imshow(enhanced_image, cmap='gray')


axes[0, 2].set_title('Enhanced Image')
axes[0, 2].axis('off')

# Display histograms
axes[1, 0].hist(low_contrast.flatten(), bins=256, range=(0, 256),␣
↪color='black')

axes[1, 0].set_title('Histogram of A')


axes[1, 0].set_xlim([0, 256])

axes[1, 1].hist(high_contrast.flatten(), bins=256, range=(0, 256),␣


↪color='black')
axes[1, 1].set_title('Histogram of B')
axes[1, 1].set_xlim([0, 256])

axes[1, 2].hist(enhanced_image.flatten(), bins=256, range=(0, 256),␣


↪color='black')
axes[1, 2].set_title('Histogram of Enhanced Image')
axes[1, 2].set_xlim([0, 256])

plt.tight_layout()
plt.show()

return low_contrast, high_contrast, enhanced_image

60
if __name__ == "__main__":
demonstrate_histogram_specification()

[ ]:

[2]: import numpy as np


import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from skimage import io, color, img_as_float
from scipy import fftpack

def apply_frequency_filter(image, filter_function):


"""
Apply a frequency domain filter to an image

Parameters:
-----------
image : 2D numpy array
Input grayscale image
filter_function : 2D numpy array
The filter to be applied in the frequency domain

61
Returns:
--------
filtered_image : 2D numpy array
The filtered image
"""
# Convert to float for better precision
if image.dtype != np.float64:
image = img_as_float(image)

# Apply FFT to the image


f_transform = fftpack.fft2(image)

# Shift the zero-frequency component to the center


f_transform_shifted = fftpack.fftshift(f_transform)

# Apply the filter


filtered_f_transform = f_transform_shifted * filter_function

# Inverse shift
filtered_f_transform_unshifted = fftpack.ifftshift(filtered_f_transform)

# Apply inverse FFT


filtered_image = np.real(fftpack.ifft2(filtered_f_transform_unshifted))

# Normalize the image to 0-1 range


filtered_image = (filtered_image - filtered_image.min()) / (filtered_image.
↪max() - filtered_image.min())

return filtered_image

def create_ideal_lowpass_filter(shape, cutoff_frequency):


"""
Create an ideal low-pass filter with the specified cutoff frequency

Parameters:
-----------
shape : tuple
The shape of the filter (should match the image shape)
cutoff_frequency : float
The cutoff frequency of the filter

Returns:
--------
h : 2D numpy array
The ideal low-pass filter
"""
rows, cols = shape

62
crow, ccol = rows // 2, cols // 2

# Create a grid of distances from the center


y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
distances = np.sqrt(x*x + y*y)

# Create the filter mask (1 inside the circle, 0 outside)


mask = distances <= cutoff_frequency

# Convert to float
h = np.zeros((rows, cols))
h[mask] = 1.0

return h

def create_butterworth_lowpass_filter(shape, cutoff_frequency, order=2):


"""
Create a Butterworth low-pass filter with the specified cutoff frequency␣
↪and order

Parameters:
-----------
shape : tuple
The shape of the filter (should match the image shape)
cutoff_frequency : float
The cutoff frequency of the filter
order : int, optional
The order of the Butterworth filter (default is 2)

Returns:
--------
h : 2D numpy array
The Butterworth low-pass filter
"""
rows, cols = shape
crow, ccol = rows // 2, cols // 2

# Create a grid of distances from the center


y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
distances = np.sqrt(x*x + y*y)

# Create the filter


h = 1.0 / (1.0 + (distances / cutoff_frequency) ** (2 * order))

return h

def create_gaussian_lowpass_filter(shape, cutoff_frequency):

63
"""
Create a Gaussian low-pass filter with the specified cutoff frequency

Parameters:
-----------
shape : tuple
The shape of the filter (should match the image shape)
cutoff_frequency : float
The cutoff frequency of the filter (standard deviation)

Returns:
--------
h : 2D numpy array
The Gaussian low-pass filter
"""
rows, cols = shape
crow, ccol = rows // 2, cols // 2

# Create a grid of distances from the center


y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
distances_squared = x*x + y*y

# Create the filter


h = np.exp(-distances_squared / (2 * cutoff_frequency * cutoff_frequency))

return h

def visualize_filter_3d(filter_function, title, figsize=(8, 6)):


"""
Create a 3D visualization of the filter
"""
from mpl_toolkits.mplot3d import Axes3D

rows, cols = filter_function.shape


x = np.linspace(-cols//2, cols//2, cols)
y = np.linspace(-rows//2, rows//2, rows)
X, Y = np.meshgrid(x, y)

fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, filter_function, cmap='viridis')
ax.set_title(title)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Filter Value')

plt.tight_layout()

64
return fig

def compare_filter_types(image, cutoff_frequency=30):


"""
Compare Ideal, Butterworth, and Gaussian low-pass filters with the same␣
↪cutoff frequency

"""
# Create filters
ideal_filter = create_ideal_lowpass_filter(image.shape, cutoff_frequency)
butterworth_filter = create_butterworth_lowpass_filter(image.shape,␣
↪cutoff_frequency, order=2)

gaussian_filter = create_gaussian_lowpass_filter(image.shape,␣
↪cutoff_frequency)

# Apply filters
ideal_filtered = apply_frequency_filter(image, ideal_filter)
butterworth_filtered = apply_frequency_filter(image, butterworth_filter)
gaussian_filtered = apply_frequency_filter(image, gaussian_filter)

# Create figure
fig = plt.figure(figsize=(15, 12))
gs = GridSpec(3, 3, figure=fig)

# Original image
ax1 = fig.add_subplot(gs[0, 0])
ax1.imshow(image, cmap='gray')
ax1.set_title('Original Image')
ax1.axis('off')

# Filters
ax2 = fig.add_subplot(gs[0, 1])
ax2.imshow(ideal_filter, cmap='gray')
ax2.set_title(f'Ideal Filter (r={cutoff_frequency})')
ax2.axis('off')

ax3 = fig.add_subplot(gs[1, 1])


ax3.imshow(butterworth_filter, cmap='gray')
ax3.set_title(f'Butterworth Filter (r={cutoff_frequency}, n=2)')
ax3.axis('off')

ax4 = fig.add_subplot(gs[2, 1])


ax4.imshow(gaussian_filter, cmap='gray')
ax4.set_title(f'Gaussian Filter (r={cutoff_frequency})')
ax4.axis('off')

# Filtered images
ax5 = fig.add_subplot(gs[0, 2])

65
ax5.imshow(ideal_filtered, cmap='gray')
ax5.set_title('Ideal Filtered')
ax5.axis('off')

ax6 = fig.add_subplot(gs[1, 2])


ax6.imshow(butterworth_filtered, cmap='gray')
ax6.set_title('Butterworth Filtered')
ax6.axis('off')

ax7 = fig.add_subplot(gs[2, 2])


ax7.imshow(gaussian_filtered, cmap='gray')
ax7.set_title('Gaussian Filtered')
ax7.axis('off')

# Filter cross-sections
ax8 = fig.add_subplot(gs[1:, 0])
center_row = image.shape[0] // 2
ax8.plot(ideal_filter[center_row, :], label='Ideal')
ax8.plot(butterworth_filter[center_row, :], label='Butterworth')
ax8.plot(gaussian_filter[center_row, :], label='Gaussian')
ax8.set_title('Filter Cross-section')
ax8.set_xlabel('Spatial Frequency')
ax8.set_ylabel('Filter Response')
ax8.legend()
ax8.grid(True)

plt.tight_layout()
plt.suptitle(f'Comparison of Low-Pass Filters (Cutoff =␣
↪{cutoff_frequency})', fontsize=16)

plt.subplots_adjust(top=0.93)

return fig, {
'ideal': ideal_filtered,
'butterworth': butterworth_filtered,
'gaussian': gaussian_filtered
}

def demonstrate_ringing_effect(image):
"""
Demonstrate the ringing effect of Ideal low-pass filter
"""
# Apply Ideal filters with different cutoff frequencies
cutoffs = [10, 30, 60]
filtered_images = []

for cutoff in cutoffs:


ideal_filter = create_ideal_lowpass_filter(image.shape, cutoff)

66
filtered = apply_frequency_filter(image, ideal_filter)
filtered_images.append(filtered)

# Create figure with the correct number of subplots


fig, axes = plt.subplots(2, 4, figsize=(16, 8)) # Changed from 2,3 to 2,4

# Original image
axes[0, 0].imshow(image, cmap='gray')
axes[0, 0].set_title('Original Image')
axes[0, 0].axis('off')

# Filtered images - Fixed indexing issue here


for i, (cutoff, filtered) in enumerate(zip(cutoffs, filtered_images)):
axes[0, i+1].imshow(filtered, cmap='gray')
axes[0, i+1].set_title(f'Ideal LPF (r={cutoff})')
axes[0, i+1].axis('off')

# Hide the unused subplot


axes[0, 3].axis('off')

# Show edge profiles to highlight ringing


# Select a row or column that crosses an edge
row = image.shape[0] // 2
axes[1, 0].plot(image[row, :], label='Original')
axes[1, 0].set_title('Edge Profile (Original)')
axes[1, 0].grid(True)

for i, (cutoff, filtered) in enumerate(zip(cutoffs, filtered_images)):


axes[1, i+1].plot(filtered[row, :], color='red')
axes[1, i+1].set_title(f'Edge Profile (r={cutoff})')
axes[1, i+1].grid(True)

# Hide the unused subplot


axes[1, 3].axis('off')

plt.tight_layout()
plt.suptitle('Demonstration of Ringing Effect in Ideal Low-Pass Filter',␣
↪fontsize=16)

plt.subplots_adjust(top=0.93)

return fig

def compare_cutoff_frequencies(image, filter_type='butterworth', cutoffs=[5,␣


↪15, 30, 90, 120], order=2):

"""
Compare the effect of different cutoff frequencies for the specified filter␣
↪type

67
Parameters:
-----------
image : 2D numpy array
Input grayscale image
filter_type : str
'butterworth' or 'gaussian'
cutoffs : list
List of cutoff frequencies to compare
order : int
Order for Butterworth filter (ignored for Gaussian)
"""
filtered_images = []
filters = []

for cutoff in cutoffs:


if filter_type == 'butterworth':
filter_func = create_butterworth_lowpass_filter(image.shape,␣
↪cutoff, order=order)

title = f'Butterworth (n={order})'


else: # gaussian
filter_func = create_gaussian_lowpass_filter(image.shape, cutoff)
title = 'Gaussian'

filtered = apply_frequency_filter(image, filter_func)


filtered_images.append(filtered)
filters.append(filter_func)

# Create figure
rows = 2
cols = len(cutoffs) + 1
fig = plt.figure(figsize=(3*cols, 3*rows))

# Original image
ax = plt.subplot(rows, cols, 1)
ax.imshow(image, cmap='gray')
ax.set_title('Original Image')
ax.axis('off')

# Filtered images
for i, (cutoff, filtered) in enumerate(zip(cutoffs, filtered_images)):
ax = plt.subplot(rows, cols, i+2)
ax.imshow(filtered, cmap='gray')
ax.set_title(f'{title} (r={cutoff})')
ax.axis('off')

# Filters

68
ax = plt.subplot(rows, cols, cols+1)
center_row = image.shape[0] // 2
for i, (cutoff, filter_func) in enumerate(zip(cutoffs, filters)):
ax.plot(filter_func[center_row, :], label=f'r={cutoff}')
ax.set_title('Filter Cross-sections')
ax.legend()
ax.grid(True)

# Filter visualizations
for i, (cutoff, filter_func) in enumerate(zip(cutoffs, filters)):
ax = plt.subplot(rows, cols, cols+i+2)
ax.imshow(filter_func, cmap='viridis')
ax.set_title(f'Filter (r={cutoff})')
ax.axis('off')

plt.tight_layout()
plt.suptitle(f'Comparison of {title} Low-Pass Filter with Different Cutoff␣
↪Frequencies', fontsize=16)

plt.subplots_adjust(top=0.93)

return fig

def visualize_spectrum(image, log_transform=True, title='Frequency Spectrum'):


"""
Visualize the frequency spectrum of an image
"""
f_transform = fftpack.fft2(image)
f_transform_shifted = fftpack.fftshift(f_transform)

magnitude_spectrum = np.abs(f_transform_shifted)

if log_transform:
# Use log transform to enhance visualization
magnitude_spectrum = np.log1p(magnitude_spectrum)

# Normalize for display


magnitude_spectrum = (magnitude_spectrum - magnitude_spectrum.min()) /␣
↪(magnitude_spectrum.max() - magnitude_spectrum.min())

fig, ax = plt.subplots(figsize=(6, 6))


im = ax.imshow(magnitude_spectrum, cmap='viridis')
ax.set_title(title)
ax.axis('off')
fig.colorbar(im, ax=ax)

return fig, magnitude_spectrum

69
def main():
# Load image - using the specific path you provided
try:
image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png'
image = io.imread(image_path, as_gray=True)
print(f"Successfully loaded image: {image_path}")
print(f"Image shape: {image.shape}")
except Exception as e:
print(f"Error loading image: {e}")
print("Creating a sample image instead.")
size = 512
x = np.linspace(-10, 10, size)
y = np.linspace(-10, 10, size)
X, Y = np.meshgrid(x, y)
image = np.zeros((size, size))

# Add some shapes to the image


image += 0.5 * (np.sin(0.2 * X) + np.sin(0.2 * Y))
image += 0.5 * np.exp(-((X/2)**2 + (Y/2)**2))

# Add a square
square_size = size // 4
center = size // 2
image[center-square_size:center+square_size, center-square_size:
↪center+square_size] += 0.5

# Add some noise


image += 0.1 * np.random.randn(size, size)

# Normalize to [0, 1]
image = (image - image.min()) / (image.max() - image.min())

# Create output directory if it doesn't exist


import os
if not os.path.exists('output'):
os.makedirs('output')
print("Created output directory")

# Visualize the frequency spectrum of the original image


fig_spectrum, spectrum = visualize_spectrum(image, title="Original Image␣
↪Frequency Spectrum")

fig_spectrum.savefig('output/frequency_spectrum.png', dpi=300,␣
↪bbox_inches='tight')

# Part a: Compare Ideal, Butterworth, and Gaussian filters with the same␣
↪cutoff

cutoff = 30

70
print(f"Comparing filter types with cutoff radius = {cutoff}")
fig_a, filtered_images = compare_filter_types(image,␣
↪cutoff_frequency=cutoff)

fig_a.savefig('output/filter_comparison.png', dpi=300, bbox_inches='tight')

# Part b: Demonstrate ringing effect


print("Demonstrating ringing effect with Ideal low-pass filter")
fig_b = demonstrate_ringing_effect(image)
fig_b.savefig('output/ringing_effect.png', dpi=300, bbox_inches='tight')

# Part c: Compare Butterworth filters with different cutoff frequencies


print("Comparing Butterworth filters with different cutoff frequencies")
fig_c = compare_cutoff_frequencies(image, filter_type='butterworth',
cutoffs=[5, 15, 30, 90, 120], order=2)
fig_c.savefig('output/butterworth_cutoffs.png', dpi=300,␣
↪bbox_inches='tight')

# Part d: Compare Gaussian filters with different cutoff frequencies


print("Comparing Gaussian filters with different cutoff frequencies")
fig_d = compare_cutoff_frequencies(image, filter_type='gaussian',
cutoffs=[5, 15, 30, 90, 120])
fig_d.savefig('output/gaussian_cutoffs.png', dpi=300, bbox_inches='tight')

# Display 3D visualizations of filters


print("Creating 3D filter visualizations")
ideal_filter = create_ideal_lowpass_filter(image.shape, cutoff)
butterworth_filter = create_butterworth_lowpass_filter(image.shape, cutoff,␣
↪order=2)

gaussian_filter = create_gaussian_lowpass_filter(image.shape, cutoff)

fig_3d_ideal = visualize_filter_3d(ideal_filter, f'Ideal Low-Pass Filter␣


↪(r={cutoff})')
fig_3d_butterworth = visualize_filter_3d(butterworth_filter, f'Butterworth␣
↪Low-Pass Filter (r={cutoff}, n=2)')

fig_3d_gaussian = visualize_filter_3d(gaussian_filter, f'Gaussian Low-Pass␣


↪Filter (r={cutoff})')

fig_3d_ideal.savefig('output/ideal_filter_3d.png', dpi=300,␣
↪bbox_inches='tight')

fig_3d_butterworth.savefig('output/butterworth_filter_3d.png', dpi=300,␣
↪bbox_inches='tight')

fig_3d_gaussian.savefig('output/gaussian_filter_3d.png', dpi=300,␣
↪bbox_inches='tight')

print("All visualizations have been saved to the 'output' folder.")

71
# Save filtered images separately
io.imsave('output/ideal_filtered.png',␣
↪img_as_float(filtered_images['ideal']))

io.imsave('output/butterworth_filtered.png',␣
↪img_as_float(filtered_images['butterworth']))

io.imsave('output/gaussian_filtered.png',␣
↪img_as_float(filtered_images['gaussian']))

print("Filtered images have been saved.")

# Show all figures


plt.show()

if __name__ == '__main__':
main()

Successfully loaded image: C:\Users\dkg19\OneDrive\Desktop\DIP


LAB\image_gray.png
Image shape: (580, 451)
Comparing filter types with cutoff radius = 30
Demonstrating ringing effect with Ideal low-pass filter
Comparing Butterworth filters with different cutoff frequencies
Comparing Gaussian filters with different cutoff frequencies
Creating 3D filter visualizations
All visualizations have been saved to the 'output' folder.

---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\PIL\PngImagePlugin.
↪py:1363, in _save(im, fp, filename, chunk, save_all)

1362 try:
-> 1363 rawmode, bit_depth, color_type = _OUTMODES[outmode]
1364 except KeyError as e:

KeyError: 'F'

The above exception was the direct cause of the following exception:

OSError Traceback (most recent call last)


Cell In[2], line 473
470 plt.show()
472 if __name__ == '__main__':
--> 473 main()

Cell In[2], line 463, in main()


460 print("All visualizations have been saved to the 'output' folder.")

72
462 # Save filtered images separately
--> 463␣
↪io.imsave('output/ideal_filtered.png', img_as_float(filtered_images['ideal']))

464 io.imsave('output/butterworth_filtered.png',␣
↪img_as_float(filtered_images['butterworth']))

465 io.imsave('output/gaussian_filtered.png',␣
↪img_as_float(filtered_images['gaussian']))

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\skimage\_shared\util
↪py:328, in deprecate_parameter.__call__.<locals>.fixed_func(*args, **kwargs)

324 elif self.new_name is not None:


325 # Assign old value to new one
326 kwargs[self.new_name] = deprecated_value
--> 328 return func(*args, **kwargs)

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\skimage\io\_io.
↪py:206, in imsave(fname, arr, plugin, check_contrast, **plugin_args)

203 warn(f'{fname} is a low contrast image')


205 with _hide_plugin_deprecation_warnings():
--> 206 return␣
↪call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\skimage\_shared\util
↪py:538, in deprecate_func.__call__.<locals>.wrapped(*args, **kwargs)

536 stacklevel = 1 + self.get_stack_length(func) - stack_rank


537 warnings.warn(message, category=FutureWarning, stacklevel=stacklevel)
--> 538 return func(*args, **kwargs)

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\skimage\io\manage_pl
↪py:254, in call_plugin(kind, *args, **kwargs)

251 except IndexError:


252 raise RuntimeError(f'Could not find the plugin "{plugin}" for␣
↪{kind}.')

--> 254 return func(*args, **kwargs)

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\imageio\v3.
↪py:139, in imwrite(uri, image, plugin, extension, format_hint, **kwargs)

104 def imwrite(uri, image, *, plugin=None, extension=None,␣


↪format_hint=None, **kwargs):

105 """Write an ndimage to the given URI.


106
107 The exact behavior depends on the file type and plugin used. To␣
↪learn about

(…)

73
136
137 """
--> 139 with imopen(
140 uri,
141 "w",
142 legacy_mode=False,
143 plugin=plugin,
144 format_hint=format_hint,
145 extension=extension,
146 ) as img_file:
147 encoded = img_file.write(image, **kwargs)
149 return encoded

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\imageio\core\v3_plug
↪py:367, in PluginV3.__exit__(self, type, value, traceback)

366 def __exit__(self, type, value, traceback) -> None:


--> 367 self.close()

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\imageio\plugins\pill
↪py:144, in PillowPlugin.close(self)

143 def close(self) -> None:


--> 144 self._flush_writer()
146 if self._image:
147 self._image.close()

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\imageio\plugins\pill
↪py:485, in PillowPlugin._flush_writer(self)

482 self.save_args["save_all"] = True


483 self.save_args["append_images"] = self.images_to_write
--> 485 primary_image.save(self._request.get_file(), **self.save_args)
486 self.images_to_write.clear()
487 self.save_args.clear()

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\PIL\Image.
↪py:2596, in Image.save(self, fp, format, **params)

2593 fp = cast(IO[bytes], fp)


2595 try:
-> 2596 save_handler(self, fp, filename)
2597 except Exception:
2598 if open_fp:

File c:
↪\Users\dkg19\AppData\Local\Programs\Python\Python313\Lib\site-packages\PIL\PngImagePlugin.
↪py:1366, in _save(im, fp, filename, chunk, save_all)

1364 except KeyError as e:

74
1365 msg = f"cannot write mode {mode} as PNG"
-> 1366 raise OSError(msg) from e
1368 #
1369 # write minimal PNG file
1371 fp.write(_MAGIC)

OSError: cannot write mode F as PNG

75
76
77
78
79
[ ]:

[1]: # -----------------------------------------
# Import necessary libraries
# -----------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.fft import fft2, ifft2, fftshift, ifftshift

# -----------------------------------------
# Load the grayscale image
# -----------------------------------------

80
image_path = r'C:\Users\dkg19\OneDrive\Desktop\DIP LAB\image_gray.png'
img = Image.open(image_path).convert('L') # Ensure it's grayscale
img_array = np.array(img)

# Show original image


plt.figure(figsize=(6,6))
plt.imshow(img_array, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()

# Get image dimensions


M, N = img_array.shape

# -----------------------------------------
# Define filter functions
# -----------------------------------------
def ideal_high_pass_filter(shape, cutoff):
P, Q = shape
u = np.arange(0, P)
v = np.arange(0, Q)
U, V = np.meshgrid(u, v, indexing='ij')
D = np.sqrt((U - P//2)**2 + (V - Q//2)**2)
H = np.zeros((P, Q))
H[D > cutoff] = 1
return H

def butterworth_high_pass_filter(shape, cutoff, order):


P, Q = shape
u = np.arange(0, P)
v = np.arange(0, Q)
U, V = np.meshgrid(u, v, indexing='ij')
D = np.sqrt((U - P//2)**2 + (V - Q//2)**2)
H = 1 / (1 + (cutoff / (D + 1e-5))**(2*order)) # small constant to avoid␣
↪division by zero

return H

def gaussian_high_pass_filter(shape, cutoff):


P, Q = shape
u = np.arange(0, P)
v = np.arange(0, Q)
U, V = np.meshgrid(u, v, indexing='ij')
D = np.sqrt((U - P//2)**2 + (V - Q//2)**2)
H = 1 - np.exp(-(D**2) / (2*(cutoff**2)))
return H

# -----------------------------------------

81
# Define function to apply filter in frequency domain
# -----------------------------------------
def apply_filter(img_array, filter_mask):
F = fft2(img_array)
F_shifted = fftshift(F)
G = F_shifted * filter_mask
G_ishift = ifftshift(G)
img_filtered = np.abs(ifft2(G_ishift))
return img_filtered

# -----------------------------------------
# (a) Apply Ideal, Butterworth, Gaussian HPFs with same cutoff
# -----------------------------------------
cutoff = 30 # cutoff frequency

ideal_filter = ideal_high_pass_filter(img_array.shape, cutoff)


butterworth_filter = butterworth_high_pass_filter(img_array.shape, cutoff,␣
↪order=2)

gaussian_filter = gaussian_high_pass_filter(img_array.shape, cutoff)

img_ideal = apply_filter(img_array, ideal_filter)


img_butterworth = apply_filter(img_array, butterworth_filter)
img_gaussian = apply_filter(img_array, gaussian_filter)

# Show results
plt.figure(figsize=(18,6))

plt.subplot(1,3,1)
plt.imshow(img_ideal, cmap='gray')
plt.title('Ideal High Pass Filter')
plt.axis('off')

plt.subplot(1,3,2)
plt.imshow(img_butterworth, cmap='gray')
plt.title('Butterworth High Pass Filter (n=2)')
plt.axis('off')

plt.subplot(1,3,3)
plt.imshow(img_gaussian, cmap='gray')
plt.title('Gaussian High Pass Filter')
plt.axis('off')

plt.show()

# -----------------------------------------
# (b) Ringing effect demonstration with Ideal HPF
# -----------------------------------------

82
plt.figure(figsize=(6,6))
plt.imshow(img_ideal, cmap='gray')
plt.title('Ringing Effect - Ideal High Pass Filter')
plt.axis('off')
plt.show()

# -----------------------------------------
# (c) Compare Butterworth HPF for different cut-off frequencies
# -----------------------------------------
cutoffs = [5, 15, 30, 90, 120]

plt.figure(figsize=(20,10))

for i, cutoff in enumerate(cutoffs, 1):


butterworth_filter = butterworth_high_pass_filter(img_array.shape, cutoff,␣
↪order=2)

img_filtered = apply_filter(img_array, butterworth_filter)

plt.subplot(2, 3, i)
plt.imshow(img_filtered, cmap='gray')
plt.title(f'Butterworth HPF\nCutoff={cutoff}')
plt.axis('off')

plt.tight_layout()
plt.show()

# -----------------------------------------
# (d) Compare Gaussian HPF for different cut-off frequencies
# -----------------------------------------
plt.figure(figsize=(20,10))

for i, cutoff in enumerate(cutoffs, 1):


gaussian_filter = gaussian_high_pass_filter(img_array.shape, cutoff)
img_filtered = apply_filter(img_array, gaussian_filter)

plt.subplot(2, 3, i)
plt.imshow(img_filtered, cmap='gray')
plt.title(f'Gaussian HPF\nCutoff={cutoff}')
plt.axis('off')

plt.tight_layout()
plt.show()

83
84
85
[ ]:

86

You might also like