0% found this document useful (0 votes)
9 views

Image Detection

The code samples show how to use Google Cloud Vision API to analyze images and extract information like dominant colors, detected text, and image properties. Various functions are defined to call different Vision API methods and output the results.

Uploaded by

bruno.lopezg
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views

Image Detection

The code samples show how to use Google Cloud Vision API to analyze images and extract information like dominant colors, detected text, and image properties. Various functions are defined to call different Vision API methods and output the results.

Uploaded by

bruno.lopezg
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 5

In [1]:

import os
import io
from google.cloud import vision
from google.cloud import vision_v1
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/Users/brunolopezgarcia/Documents/Image det
ection/imagedetection-1994-ac6b7eb93bf5.json"

In [2]:

print(dir(vision))
print(" ")
print(" ")
print(" ")
print(" ")
print(dir(vision_v1))

['AddProductToProductSetRequest', 'AnnotateFileRequest', 'AnnotateFileResponse', 'Annotat


eImageRequest', 'AnnotateImageResponse', 'AsyncAnnotateFileRequest', 'AsyncAnnotateFileRe
sponse', 'AsyncBatchAnnotateFilesRequest', 'AsyncBatchAnnotateFilesResponse', 'AsyncBatch
AnnotateImagesRequest', 'AsyncBatchAnnotateImagesResponse', 'BatchAnnotateFilesRequest',
'BatchAnnotateFilesResponse', 'BatchAnnotateImagesRequest', 'BatchAnnotateImagesResponse'
, 'BatchOperationMetadata', 'Block', 'BoundingPoly', 'ColorInfo', 'CreateProductRequest',
'CreateProductSetRequest', 'CreateReferenceImageRequest', 'CropHint', 'CropHintsAnnotatio
n', 'CropHintsParams', 'DeleteProductRequest', 'DeleteProductSetRequest', 'DeleteReferenc
eImageRequest', 'DominantColorsAnnotation', 'EntityAnnotation', 'FaceAnnotation', 'Featur
e', 'GcsDestination', 'GcsSource', 'GetProductRequest', 'GetProductSetRequest', 'GetRefer
enceImageRequest', 'Image', 'ImageAnnotationContext', 'ImageAnnotatorAsyncClient', 'Image
AnnotatorClient', 'ImageContext', 'ImageProperties', 'ImageSource', 'ImportProductSetsGcs
Source', 'ImportProductSetsInputConfig', 'ImportProductSetsRequest', 'ImportProductSetsRe
sponse', 'InputConfig', 'LatLongRect', 'Likelihood', 'ListProductSetsRequest', 'ListProdu
ctSetsResponse', 'ListProductsInProductSetRequest', 'ListProductsInProductSetResponse', '
ListProductsRequest', 'ListProductsResponse', 'ListReferenceImagesRequest', 'ListReferenc
eImagesResponse', 'LocalizedObjectAnnotation', 'LocationInfo', 'NormalizedVertex', 'Opera
tionMetadata', 'OutputConfig', 'Page', 'Paragraph', 'Position', 'Product', 'ProductSearch
AsyncClient', 'ProductSearchClient', 'ProductSearchParams', 'ProductSearchResults', 'Prod
uctSet', 'ProductSetPurgeConfig', 'Property', 'PurgeProductsRequest', 'ReferenceImage', '
RemoveProductFromProductSetRequest', 'SafeSearchAnnotation', 'Symbol', 'TextAnnotation',
'TextDetectionParams', 'UpdateProductRequest', 'UpdateProductSetRequest', 'Vertex', 'WebD
etection', 'WebDetectionParams', 'Word', '__all__', '__builtins__', '__cached__', '__doc_
_', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__versi
on__', 'gapic_version', 'package_version']

['AddProductToProductSetRequest', 'AnnotateFileRequest', 'AnnotateFileResponse', 'Annotat


eImageRequest', 'AnnotateImageResponse', 'AsyncAnnotateFileRequest', 'AsyncAnnotateFileRe
sponse', 'AsyncBatchAnnotateFilesRequest', 'AsyncBatchAnnotateFilesResponse', 'AsyncBatch
AnnotateImagesRequest', 'AsyncBatchAnnotateImagesResponse', 'BatchAnnotateFilesRequest',
'BatchAnnotateFilesResponse', 'BatchAnnotateImagesRequest', 'BatchAnnotateImagesResponse'
, 'BatchOperationMetadata', 'Block', 'BoundingPoly', 'ColorInfo', 'CreateProductRequest',
'CreateProductSetRequest', 'CreateReferenceImageRequest', 'CropHint', 'CropHintsAnnotatio
n', 'CropHintsParams', 'DeleteProductRequest', 'DeleteProductSetRequest', 'DeleteReferenc
eImageRequest', 'DominantColorsAnnotation', 'EntityAnnotation', 'FaceAnnotation', 'Featur
e', 'GcsDestination', 'GcsSource', 'GetProductRequest', 'GetProductSetRequest', 'GetRefer
enceImageRequest', 'IacImageAnnotatorClient', 'Image', 'ImageAnnotationContext', 'ImageAn
notatorAsyncClient', 'ImageAnnotatorClient', 'ImageContext', 'ImageProperties', 'ImageSou
rce', 'ImportProductSetsGcsSource', 'ImportProductSetsInputConfig', 'ImportProductSetsReq
uest', 'ImportProductSetsResponse', 'InputConfig', 'LatLongRect', 'Likelihood', 'ListProd
uctSetsRequest', 'ListProductSetsResponse', 'ListProductsInProductSetRequest', 'ListProdu
ctsInProductSetResponse', 'ListProductsRequest', 'ListProductsResponse', 'ListReferenceIm
agesRequest', 'ListReferenceImagesResponse', 'LocalizedObjectAnnotation', 'LocationInfo',
'NormalizedVertex', 'OperationMetadata', 'OutputConfig', 'Page', 'Paragraph', 'Position',
'Product', 'ProductSearchAsyncClient', 'ProductSearchClient', 'ProductSearchParams', 'Pro
ductSearchResults', 'ProductSet', 'ProductSetPurgeConfig', 'Property', 'PurgeProductsRequ
est', 'ReferenceImage', 'RemoveProductFromProductSetRequest', 'SafeSearchAnnotation', 'Sy
mbol', 'TextAnnotation', 'TextDetectionParams', 'UpdateProductRequest', 'UpdateProductSet
Request', 'Vertex', 'VisionHelpers', 'WebDetection', 'WebDetectionParams', 'Word', '__all
__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__pa
ckage__', '__path__', '__spec__', '__version__', 'add_single_feature_methods', 'gapic_ver
sion', 'package_version', 'services', 'types']

In [ ]:

In [51]:
client = vision.ImageAnnotatorClient()
path = '/Users/brunolopezgarcia/Documents/Image detection/Time_card.jpg'
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)

In [42]:
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties of the image:')

Properties of the image:

In [43]:
for color in props.dominant_colors.colors:
print('Fraction: {} '.format(color.pixel_fraction))
print('\t r: {} '.format(color.color.red))
print('\t g: {} '.format(color.color.green))
print('\t b: {} '.format(color.color.blue))

Fraction: 0.820965588092804
r: 238.0
g: 238.0
b: 238.0
Fraction: 0.1390211582183838
r: 196.0
g: 196.0
b: 196.0
Fraction: 0.03723544999957085
r: 166.0
g: 166.0
b: 166.0
Fraction: 0.002116402145475149
r: 128.0
g: 128.0
b: 128.0
Fraction: 0.00046296295477077365
r: 94.0
g: 94.0
b: 94.0
Fraction: 0.00019841270113829523
r: 15.0
g: 15.0
b: 15.0

In [ ]:

In [52]:
def detect_text(image_path):
client = vision_v1.ImageAnnotatorClient()

with open(path, 'rb') as image_file:


content = image_file.read()

image = vision_v1.Image(content=content)
response = client.text_detection(image=image)

texts = response.text_annotations
return texts

In [71]:
path = '/Users/brunolopezgarcia/Documents/Image detection/Time_card.jpg'
detected_texts = detect_text(path)

In [72]:
detected_texts[1]
Out[72]:
description: "Direforw"
bounding_poly {
vertices {
x: 105
y: 118
}
vertices {
x: 138
y: 118
}
vertices {
x: 138
y: 122
}
vertices {
x: 105
y: 122
}
}

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:
In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [1]:
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt

from google.cloud import vision_v1


from google.cloud.vision_v1 import types

In [2]:
# Set up the client
client = vision_v1.ImageAnnotatorClient.from_service_account_file('/Users/brunolopezgarci
a/Documents/Image detection/imagedetection-1994-ac6b7eb93bf5.json')

In [3]:
# Load the image
with io.open('/Users/brunolopezgarcia/Documents/Image detection/Time_card.png', 'rb') as
image_file:content = image_file.read()

image = types.Image(content=content)
# Define the bounding boxes
bounding_box = types.BoundingPoly(
vertices=[
types.Vertex(x=0, y=0),
types.Vertex(x=100, y=0),
types.Vertex(x=100, y=100),
types.Vertex(x=0, y=100)
]
)

In [4]:
# Call the API
response = client.text_detection(image=image)
texts = response.text_annotations

In [5]:
coor=[[],[],[]]
counter=0
while texts[counter].description!='Surefox':
counter=counter+1
for n in range(1,len(texts)):
x0=texts[n].bounding_poly.vertices[0].x
x1=texts[n].bounding_poly.vertices[1].x
x2=texts[n].bounding_poly.vertices[2].x
x3=texts[n].bounding_poly.vertices[3].x
y0=texts[n].bounding_poly.vertices[0].y
y1=texts[n].bounding_poly.vertices[1].y
y2=texts[n].bounding_poly.vertices[2].y
y3=texts[n].bounding_poly.vertices[3].y
s=((y0-y1)*(x2-x0)-(x0-x1)*(y2-y0))/((y3-y1)*(x2-x0)-(x3-x1)*(y2-y0))
if x1+(s*(x3-x1))<=texts[counter].bounding_poly.vertices[0].x and y1+(s*(y3-y1))<=te
xts[0].bounding_poly.vertices[3].y - (texts[0].bounding_poly.vertices[3].y - texts[0].bo
unding_poly.vertices[0].y)/3:
coor[0].append(x1+(s*(x3-x1)))
coor[1].append(y1+(s*(y3-y1)))
coor[2].append(texts[n].description)

In [ ]:

In [281]:
plt.figure(figsize=(15, 6))
for i in range(len(coor[0])):
plt.plot(coor[0][i],-coor[1][i],visible=False)
plt.text(coor[0][i],-coor[1][i],coor[2][i],fontsize=8.5,horizontalalignment='center'
,verticalalignment='center')
plt.show()

In [ ]:

In [ ]:

In [ ]:

In [ ]:

In [ ]:

You might also like