0% found this document useful (0 votes)
13 views12 pages

Arghyadip Sahu (Assignment - 7)

The document outlines a comprehensive implementation of various image processing techniques using Python, including filtering in the frequency domain (low-pass, high-pass, and band-pass), Walsh and Hadamard transforms, Discrete Cosine Transform (DCT), and Discrete Wavelet Transform (DWT). Each section includes code for loading images, applying the respective transformations, and visualizing the results. Additionally, it covers image compression techniques and hybrid transformations combining DCT and DWT.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
13 views12 pages

Arghyadip Sahu (Assignment - 7)

The document outlines a comprehensive implementation of various image processing techniques using Python, including filtering in the frequency domain (low-pass, high-pass, and band-pass), Walsh and Hadamard transforms, Discrete Cosine Transform (DCT), and Discrete Wavelet Transform (DWT). Each section includes code for loading images, applying the respective transformations, and visualizing the results. Additionally, it covers image compression techniques and hybrid transformations combining DCT and DWT.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 12

Name: Arghyadip Sahu ID: 221003003001

# 1. Implement Filtering (high-pass, low-pass & band-pass) in the Frequency Domain

import numpy as np
import cv2
import matplotlib.pyplot as plt
import requests
from io import BytesIO

# Step 1: Load the image from a URL or upload from your local machine
def load_image_from_url(url):
response = requests.get(url)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
return image

# Use a URL to load the image


image_url = 'https://cdna.artstation.com/p/assets/images/images/008/043/118/large/constantine-marin-constantine-marin
image = load_image_from_url(image_url)

# Alternatively, you can upload an image from your local machine


# Uncomment the following lines to enable file upload
'''
from google.colab import files
uploaded = files.upload()
for filename in uploaded.keys():
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
'''

# Display the original image


plt.imshow(image, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()

# Step A: FFT and frequency domain conversion


fft_image = np.fft.fft2(image)
fft_image_shifted = np.fft.fftshift(fft_image)
magnitude_spectrum = np.log(np.abs(fft_image_shifted) + 1)

plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('Magnitude Spectrum')
plt.axis('off')
plt.show()

# Step B: Low-Pass Filter


def low_pass_filter(shape, cutoff):
rows, cols = shape
crow, ccol = rows // 2, cols // 2
mask = np.zeros((rows, cols), np.uint8)
y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
mask_area = x**2 + y**2 <= cutoff**2
mask[mask_area] = 1
return mask

# Create and apply low-pass filter


lp_filter = low_pass_filter(image.shape, 30)
filtered_lp = fft_image_shifted * lp_filter
image_lp = np.fft.ifft2(np.fft.ifftshift(filtered_lp)).real

# Display the low-pass filtered image


plt.imshow(image_lp, cmap='gray')
plt.title('Low-Pass Filtered Image')
plt.axis('off')
plt.show()

# Step C: High-Pass Filter


def high_pass_filter(shape, cutoff):
rows, cols = shape
crow, ccol = rows // 2, cols // 2
mask = np.ones((rows, cols), np.uint8)
y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
mask_area = x**2 + y**2 <= cutoff**2
mask[mask_area] = 0
return mask

# Create and apply high-pass filter


hp_filter = high_pass_filter(image.shape, 30)
filtered_hp = fft_image_shifted * hp_filter
image_hp = np.fft.ifft2(np.fft.ifftshift(filtered_hp)).real

# Display the high-pass filtered image


plt.imshow(image_hp, cmap='gray')
plt.title('High-Pass Filtered Image')
plt.axis('off')
plt.show()

# Step E: Band-Pass Filter


def band_pass_filter(shape, low_cutoff, high_cutoff):
rows, cols = shape
crow, ccol = rows // 2, cols // 2
mask = np.zeros((rows, cols), np.uint8)

y, x = np.ogrid[-crow:rows-crow, -ccol:cols-ccol]
radius = np.sqrt(x**2 + y**2)

mask[(radius >= low_cutoff) & (radius <= high_cutoff)] = 1


return mask

# Create and apply band-pass filter


bp_filter = band_pass_filter(image.shape, 10, 50)
filtered_bp = fft_image_shifted * bp_filter
image_bp = np.fft.ifft2(np.fft.ifftshift(filtered_bp)).real

# Display the band-pass filtered image


plt.imshow(image_bp, cmap='gray')
plt.title('Band-Pass Filtered Image')
plt.axis('off')
plt.show()
# 2. Walsh and Hadamard transform

import numpy as np
import cv2
import matplotlib.pyplot as plt
import requests
from io import BytesIO

# Function to load image from a URL


def load_image_from_url(url):
response = requests.get(url)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
return image

# Replace with your image URL


image_url = "https://www.shutterstock.com/image-photo/paved-road-passing-through-dense-260nw-2489277965.jpg"
image = load_image_from_url(image_url)

# Resize the image to 64x64 for simplicity


image = cv2.resize(image, (64, 64))

# Task 1: Walsh Transform


def walsh_transform(image):
n = image.shape[0]
walsh_matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
walsh_matrix[i, j] = (-1) ** (bin(i & j).count('1'))
transformed = np.dot(walsh_matrix, np.dot(image, walsh_matrix))
return transformed

# Apply Walsh Transform


walsh_result = walsh_transform(image)

# Visualize Walsh Spectrum


plt.imshow(walsh_result, cmap='gray')
plt.title('Walsh Spectrum')
plt.axis('off')
plt.show()

# Save Walsh output


cv2.imwrite('walsh_transform_output.jpg', np.clip(walsh_result, 0, 255).astype(np.uint8))

# Task 2: Hadamard Transform


def hadamard_transform(image):
n = image.shape[0]
def hadamard_matrix(n):
if n == 1:
return np.array([[1]])
else:
H = hadamard_matrix(n // 2)
return np.block([[H, H], [H, -H]])

hadamard_matrix_n = hadamard_matrix(n)
transformed = np.dot(hadamard_matrix_n, np.dot(image, hadamard_matrix_n))
return transformed

# Apply Hadamard Transform


hadamard_result = hadamard_transform(image)

# Visualize Hadamard Spectrum


plt.imshow(hadamard_result, cmap='gray')
plt.title('Hadamard Spectrum')
plt.axis('off')
plt.show()
# 3. Discrete Cosine Transform (DCT)

import numpy as np
import cv2
import matplotlib.pyplot as plt
import requests
from io import BytesIO
from scipy.fftpack import dct, idct

# Function to load image from a URL


def load_image_from_url(url):
response = requests.get(url)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
return image

# Replace with your image URL


image_url = 'https://www.shutterstock.com/image-photo/paved-road-passing-through-dense-260nw-2489277965.jpg'
image = load_image_from_url(image_url)

# Resize the image for processing (optional)


image = cv2.resize(image, (256, 256))

# Step A: Apply 2D DCT


def dct_2d(image):
return dct(dct(image.T, norm='ortho').T, norm='ortho')

# Apply DCT
dct_result = dct_2d(image)

# Visualize DCT coefficients


plt.figure(figsize=(8, 8))
plt.imshow(np.log(np.abs(dct_result) + 1), cmap='gray')
plt.title('DCT Coefficients')
plt.axis('off')
plt.show()

# Step B: Implement Image Compression


def compress_dct(image, compression_ratio):
# Apply DCT
dct_image = dct_2d(image)

# Flatten the DCT coefficients and find the number to keep


flattened = np.abs(dct_image).flatten()
threshold = np.percentile(flattened, 100 - compression_ratio)

# Create a mask to keep only the largest coefficients


mask = np.abs(dct_image) >= threshold
compressed_dct = dct_image * mask

return compressed_dct, mask

# Step C: Reconstruct Image


def idct_2d(dct_image):
return idct(idct(dct_image.T, norm='ortho').T, norm='ortho')

# Compression ratios to test


compression_ratios = [10, 50] # Top 10% and top 50%

for ratio in compression_ratios:


# Compress the image
compressed_dct, mask = compress_dct(image, ratio)

# Reconstruct the image


reconstructed_image = idct_2d(compressed_dct)
reconstructed_image = np.clip(reconstructed_image, 0, 255).astype(np.uint8)

# Visualize the reconstructed image


plt.figure(figsize=(8, 8))
plt.imshow(reconstructed_image, cmap='gray')
plt.title(f'Reconstructed Image (Top {ratio}%)')
plt.axis('off')
plt.show()
# 4. Discrete Wavelet Transform (DWT)

import numpy as np
import cv2
import matplotlib.pyplot as plt
import requests
import pywt
from io import BytesIO
from scipy.fftpack import dct, idct

# Function to load an image from a URL


def load_image_from_url(url):
response = requests.get(url)
image = np.asarray(bytearray(response.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
return image

# Replace with your image URL


image_url = 'https://img.freepik.com/premium-photo/mountain-lake-with-mountain-range-background_867975-82.jpg'
image = load_image_from_url(image_url)

# Resize the image for processing


image = cv2.resize(image, (256, 256))

# Task 1: Implement 2D DWT


def dwt_2d(image):
coeffs = pywt.wavedec2(image, 'haar', level=2) # 2 levels of DWT
return coeffs

# Apply 2D DWT
coeffs = dwt_2d(image)

# Visualize Wavelet Decomposition


cA, (cH, cV, cD) = coeffs[0], coeffs[1]

plt.figure(figsize=(12, 8))
plt.subplot(2, 2, 1)
plt.imshow(cA, cmap='gray')
plt.title('Approximation Coefficients (cA)')
plt.axis('off')

plt.subplot(2, 2, 2)
plt.imshow(cH, cmap='gray')
plt.title('Horizontal Detail Coefficients (cH)')
plt.axis('off')

plt.subplot(2, 2, 3)
plt.imshow(cV, cmap='gray')
plt.title('Vertical Detail Coefficients (cV)')
plt.axis('off')

plt.subplot(2, 2, 4)
plt.imshow(cD, cmap='gray')
plt.title('Diagonal Detail Coefficients (cD)')
plt.axis('off')

plt.tight_layout()
plt.show()

# Save wavelet decomposition images


cv2.imwrite('approximation_coefficients.jpg', cA)
cv2.imwrite('horizontal_detail_coefficients.jpg', cH)
cv2.imwrite('vertical_detail_coefficients.jpg', cV)
cv2.imwrite('diagonal_detail_coefficients.jpg', cD)

# Task 2: Reconstruct Image from DWT


def reconstruct_image(coeffs):
return pywt.waverec2(coeffs, 'haar')

# Full reconstruction
reconstructed_image_full = reconstruct_image(coeffs)
reconstructed_image_full = np.clip(reconstructed_image_full, 0, 255).astype(np.uint8)

# Visualize reconstructed image


plt.figure(figsize=(8, 8))
plt.imshow(reconstructed_image_full, cmap='gray')
plt.title('Reconstructed Image (Full Coefficients)')
plt.axis('off')
plt.show()

# Experiment: Retaining Approximation Coefficients


def reconstruct_with_retained_approximation(coeffs, retain_size):
cA, (cH, cV, cD) = coeffs[0], coeffs[1]
cA_retained = np.zeros_like(cA)
cA_retained[:retain_size, :retain_size] = cA[:retain_size, :retain_size]
return reconstruct_image((cA_retained, (np.zeros_like(cH), np.zeros_like(cV), np.zeros_like(cD))))

# Experiment with retaining top left 50% of approximation coefficients


retained_image = reconstruct_with_retained_approximation(coeffs, 128)

# Visualize retained approximation image


plt.figure(figsize=(8, 8))
plt.imshow(retained_image, cmap='gray')
plt.title('Reconstructed Image (Retaining 50% of Approximation Coefficients)')
plt.axis('off')
plt.show()

# Save retained approximation output


cv2.imwrite('retained_approximation_image.jpg', retained_image)

# Optional: Hybrid Transform (DCT + DWT)


def hybrid_transform(image):
# Apply DWT
coeffs = dwt_2d(image)
cA, (cH, cV, cD) = coeffs[0], coeffs[1]

# Apply DCT to approximation coefficients


cA_dct = dct(dct(cA.T, norm='ortho').T, norm='ortho')

# Retain top 10% DCT coefficients


flattened_dct = np.abs(cA_dct).flatten()
threshold = np.percentile(flattened_dct, 90)
mask = np.abs(cA_dct) >= threshold
compressed_dct = cA_dct * mask

# Reconstruct image
cA_reconstructed = idct(idct(compressed_dct.T, norm='ortho').T, norm='ortho')
hybrid_coeffs = (cA_reconstructed, (np.zeros_like(cH), np.zeros_like(cV), np.zeros_like(cD)))

return reconstruct_image(hybrid_coeffs)

# Apply Hybrid Transform


hybrid_reconstructed_image = hybrid_transform(image)

# Visualize hybrid reconstructed image


plt.figure(figsize=(8, 8))
plt.imshow(hybrid_reconstructed_image, cmap='gray')
plt.title('Hybrid Reconstructed Image (DCT + DWT)')
plt.axis('off')
plt.show()
Loading [MathJax]/jax/output/CommonHTML/fonts/TeX/fontdata.js

You might also like