REF3 - Histograms
REF3 - Histograms
IMAGE HISTOGRAMS
GRAYSCALE HISTOGRAMS
def find_color_card(image):
# load the ArUCo dictionary, grab the ArUCo parameters, and
# detect the markers in the input image
arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
arucoParams = cv2.aruco.DetectorParameters_create()
(corners, ids, rejected) = cv2.aruco.detectMarkers(image,
arucoDict, parameters=arucoParams)
# try to extract the coordinates of the color correction card
try:
# otherwise, we've found the four ArUco markers, so we can
# continue by flattening the ArUco IDs list
ids = ids.flatten()
# extract the top-left marker
i = np.squeeze(np.where(ids == 923))
topLeft = np.squeeze(corners[i])[0]
# extract the top-right marker
i = np.squeeze(np.where(ids == 1001))
topRight = np.squeeze(corners[i])[1]
# extract the bottom-right marker
i = np.squeeze(np.where(ids == 241))
bottomRight = np.squeeze(corners[i])[2]
# extract the bottom-left marker
i = np.squeeze(np.where(ids == 1007))
bottomLeft = np.squeeze(corners[i])[3]
# we could not find color correction card, so gracefully return
except:
return None
# build our list of reference points and apply a perspective
# transform to obtain a top-down, bird’s-eye view of the color
# matching card
cardCoords = np.array([topLeft, topRight, bottomRight, bottomLeft])
card = four_point_transform(image, cardCoords)
# return the color matching card to the calling function
return card
Left: reference image. Note the shade of teal placed in the center of the card. Right:
input image. The shade of teal is brighter than the reference image. Our goal is to apply
color matching/correction to resolve this discrepancy.
Left: Detecting the color matching card in the reference image. Middle: Extracting the
color card from the input image. Right: Output after applying color matching. Notice how
the shade of teal on the right more closely resembles the shade in the input image.
6. DETECTING LOW CONTRAST IMAGES
# import the necessary packages
from skimage.exposure import is_low_contrast
from imutils.paths import list_images
import argparse
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input directory of images")
ap.add_argument("-t", "--thresh", type=float, default=0.35,
help="threshold for low contrast")
args = vars(ap.parse_args())
# grab the paths to the input images
imagePaths = sorted(list(list_images(args["input"])))
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# load the input image from disk, resize it, and convert it to grayscale
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
image = cv2.imread(imagePath)
image = imutils.resize(image, width=450)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur the image slightly and perform edge detection
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 30, 150)
# initialize the text and color to indicate that the input image
# is *not* low contrast
text = "Low contrast: No"
color = (0, 255, 0)
# check to see if the image is low contrast
if is_low_contrast(gray, fraction_threshold=args["thresh"]):
# update the text and color
text = "Low contrast: Yes"
color = (0, 0, 255)
# otherwise, the image is *not* low contrast, so we can continue processing it
else:
# find contours in the edge map and find the largest one, which we'll assume is the
# outline of our color correction card
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw the largest contour on the image
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
# draw the text on the output image
cv2.putText(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
color, 2)
# show the output image and edge map
cv2.imshow("Image", image)
cv2.imshow("Edge", edged)
cv2.waitKey(0)