Image Detection
Image Detection
import os
import io
from google.cloud import vision
from google.cloud import vision_v1
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/Users/brunolopezgarcia/Documents/Image det
ection/imagedetection-1994-ac6b7eb93bf5.json"
In [2]:
print(dir(vision))
print(" ")
print(" ")
print(" ")
print(" ")
print(dir(vision_v1))
In [ ]:
In [51]:
client = vision.ImageAnnotatorClient()
path = '/Users/brunolopezgarcia/Documents/Image detection/Time_card.jpg'
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
In [42]:
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties of the image:')
In [43]:
for color in props.dominant_colors.colors:
print('Fraction: {} '.format(color.pixel_fraction))
print('\t r: {} '.format(color.color.red))
print('\t g: {} '.format(color.color.green))
print('\t b: {} '.format(color.color.blue))
Fraction: 0.820965588092804
r: 238.0
g: 238.0
b: 238.0
Fraction: 0.1390211582183838
r: 196.0
g: 196.0
b: 196.0
Fraction: 0.03723544999957085
r: 166.0
g: 166.0
b: 166.0
Fraction: 0.002116402145475149
r: 128.0
g: 128.0
b: 128.0
Fraction: 0.00046296295477077365
r: 94.0
g: 94.0
b: 94.0
Fraction: 0.00019841270113829523
r: 15.0
g: 15.0
b: 15.0
In [ ]:
In [52]:
def detect_text(image_path):
client = vision_v1.ImageAnnotatorClient()
image = vision_v1.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
return texts
In [71]:
path = '/Users/brunolopezgarcia/Documents/Image detection/Time_card.jpg'
detected_texts = detect_text(path)
In [72]:
detected_texts[1]
Out[72]:
description: "Direforw"
bounding_poly {
vertices {
x: 105
y: 118
}
vertices {
x: 138
y: 118
}
vertices {
x: 138
y: 122
}
vertices {
x: 105
y: 122
}
}
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [1]:
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
In [2]:
# Set up the client
client = vision_v1.ImageAnnotatorClient.from_service_account_file('/Users/brunolopezgarci
a/Documents/Image detection/imagedetection-1994-ac6b7eb93bf5.json')
In [3]:
# Load the image
with io.open('/Users/brunolopezgarcia/Documents/Image detection/Time_card.png', 'rb') as
image_file:content = image_file.read()
image = types.Image(content=content)
# Define the bounding boxes
bounding_box = types.BoundingPoly(
vertices=[
types.Vertex(x=0, y=0),
types.Vertex(x=100, y=0),
types.Vertex(x=100, y=100),
types.Vertex(x=0, y=100)
]
)
In [4]:
# Call the API
response = client.text_detection(image=image)
texts = response.text_annotations
In [5]:
coor=[[],[],[]]
counter=0
while texts[counter].description!='Surefox':
counter=counter+1
for n in range(1,len(texts)):
x0=texts[n].bounding_poly.vertices[0].x
x1=texts[n].bounding_poly.vertices[1].x
x2=texts[n].bounding_poly.vertices[2].x
x3=texts[n].bounding_poly.vertices[3].x
y0=texts[n].bounding_poly.vertices[0].y
y1=texts[n].bounding_poly.vertices[1].y
y2=texts[n].bounding_poly.vertices[2].y
y3=texts[n].bounding_poly.vertices[3].y
s=((y0-y1)*(x2-x0)-(x0-x1)*(y2-y0))/((y3-y1)*(x2-x0)-(x3-x1)*(y2-y0))
if x1+(s*(x3-x1))<=texts[counter].bounding_poly.vertices[0].x and y1+(s*(y3-y1))<=te
xts[0].bounding_poly.vertices[3].y - (texts[0].bounding_poly.vertices[3].y - texts[0].bo
unding_poly.vertices[0].y)/3:
coor[0].append(x1+(s*(x3-x1)))
coor[1].append(y1+(s*(y3-y1)))
coor[2].append(texts[n].description)
In [ ]:
In [281]:
plt.figure(figsize=(15, 6))
for i in range(len(coor[0])):
plt.plot(coor[0][i],-coor[1][i],visible=False)
plt.text(coor[0][i],-coor[1][i],coor[2][i],fontsize=8.5,horizontalalignment='center'
,verticalalignment='center')
plt.show()
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: