0% found this document useful (0 votes)
61 views

Facedetection

Uploaded by

Rizwan Saifi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
61 views

Facedetection

Uploaded by

Rizwan Saifi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 16

facedetection

June 5, 2024

1 1. Setup and Get Data


1.0.1 1.1 Install Dependencies and Setup

[1]: !pip install labelme tensorflow tensorflow-gpu opencv-python matplotlib␣


↪albumentations

Collecting labelme
Downloading labelme-5.4.1.tar.gz (1.4 MB)
---------------------------------------- 0.0/1.4 MB ? eta -:--:--
--------------------------------------- 0.0/1.4 MB 660.6 kB/s eta 0:00:03
- -------------------------------------- 0.1/1.4 MB 1.1 MB/s eta 0:00:02
------- -------------------------------- 0.3/1.4 MB 2.1 MB/s eta 0:00:01
----------- ---------------------------- 0.4/1.4 MB 2.3 MB/s eta 0:00:01
-------------- ------------------------- 0.5/1.4 MB 2.4 MB/s eta 0:00:01
---------------- ----------------------- 0.6/1.4 MB 2.3 MB/s eta 0:00:01
------------------ --------------------- 0.7/1.4 MB 2.2 MB/s eta 0:00:01
-------------------- ------------------- 0.7/1.4 MB 2.1 MB/s eta 0:00:01
----------------------- ---------------- 0.8/1.4 MB 2.1 MB/s eta 0:00:01
-------------------------- ------------- 0.9/1.4 MB 2.1 MB/s eta 0:00:01
----------------------------- ---------- 1.0/1.4 MB 2.1 MB/s eta 0:00:01
-------------------------------- ------- 1.2/1.4 MB 2.2 MB/s eta 0:00:01
---------------------------------- ----- 1.2/1.4 MB 2.2 MB/s eta 0:00:01
------------------------------------- -- 1.3/1.4 MB 2.1 MB/s eta 0:00:01
---------------------------------------- 1.4/1.4 MB 2.2 MB/s eta 0:00:00
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Installing backend dependencies: started
Installing backend dependencies: finished with status 'done'
Preparing metadata (pyproject.toml): started
Preparing metadata (pyproject.toml): finished with status 'done'
Collecting tensorflow
Downloading tensorflow-2.16.1-cp311-cp311-win_amd64.whl.metadata (3.5 kB)
Collecting tensorflow-gpu
Downloading tensorflow-gpu-2.12.0.tar.gz (2.6 kB)
Installing build dependencies: started

1
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'error'
error: subprocess-exited-with-error

× Getting requirements to build wheel did not run successfully.


� exit code: 1
��> [58 lines of output]
Traceback (most recent call last):
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-
packages\setuptools\_vendor\packaging\requirements.py", line 35, in __init__
parsed = _parse_requirement(requirement_string)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\_vendor\packaging\_parser.py",
line 64, in parse_requirement
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\_vendor\packaging\_parser.py",
line 82, in _parse_requirement
url, specifier, marker = _parse_requirement_details(tokenizer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\_vendor\packaging\_parser.py",
line 126, in _parse_requirement_details
marker = _parse_requirement_marker(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\_vendor\packaging\_parser.py",
line 147, in _parse_requirement_marker
tokenizer.raise_syntax_error(
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\_vendor\packaging\_tokenizer.py",
line 165, in raise_syntax_error
raise ParserSyntaxError(
setuptools.extern.packaging._tokenizer.ParserSyntaxError: Expected end or
semicolon (after name and no valid version specifier)
python_version>"3.7"
^

The above exception was the direct cause of the following exception:

Traceback (most recent call last):


File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_
3.11.2544.0_x64__qbz5n2kfra8p0\Lib\site-

2
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 353, in
<module>
main()
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_
3.11.2544.0_x64__qbz5n2kfra8p0\Lib\site-
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 335, in
main
json_out['return_val'] = hook(**hook_input['kwargs'])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_
3.11.2544.0_x64__qbz5n2kfra8p0\Lib\site-
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 118, in
get_requires_for_build_wheel
return hook(config_settings)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 325, in
get_requires_for_build_wheel
return self._get_build_requires(config_settings,
requirements=['wheel'])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 295, in
_get_build_requires
self.run_setup()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 487, in
run_setup
super().run_setup(setup_script=setup_script)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 311, in
run_setup
exec(code, locals())
File "<string>", line 40, in <module>
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\__init__.py", line 102, in setup
_install_setup_requires(attrs)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\__init__.py", line 73, in
_install_setup_requires
dist.parse_config_files(ignore_option_errors=True)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 634, in
parse_config_files
self._finalize_requires()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 368, in
_finalize_requires

3
self._normalize_requires()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 383, in
_normalize_requires
self.install_requires = list(map(str, _reqs.parse(install_requires)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-
packages\setuptools\_vendor\packaging\requirements.py", line 37, in __init__
raise InvalidRequirement(str(e)) from e
setuptools.extern.packaging.requirements.InvalidRequirement: Expected end
or semicolon (after name and no valid version specifier)
python_version>"3.7"
^
[end of output]

note: This error originates from a subprocess, and is likely not a problem
with pip.
error: subprocess-exited-with-error

× Getting requirements to build wheel did not run successfully.


� exit code: 1
��> See above for output.

note: This error originates from a subprocess, and is likely not a problem with
pip.

1.0.2 1.2 Collect Images Using OpenCV

[ ]: import os
import time
import uuid
import cv2

[ ]: IMAGES_PATH = os.path.join('data','images')
number_images = 30

[ ]: cap = cv2.VideoCapture(1)
for imgnum in range(number_images):
print('Collecting image {}'.format(imgnum))
ret, frame = cap.read()
imgname = os.path.join(IMAGES_PATH,f'{str(uuid.uuid1())}.jpg')
cv2.imwrite(imgname, frame)
cv2.imshow('frame', frame)
time.sleep(0.5)

if cv2.waitKey(1) & 0xFF == ord('q'):

4
break
cap.release()
cv2.destroyAllWindows()

1.0.3 1.3 Annotate Images with LabelMe

[ ]: !labelme

2 2. Review Dataset and Build Image Loading Function


2.0.1 2.1 Import TF and Deps

[ ]: import tensorflow as tf
import json
import numpy as np
from matplotlib import pyplot as plt

2.0.2 2.2 Limit GPU Memory Growth

[ ]: # Avoid OOM errors by setting GPU Memory Consumption Growth


gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)

[ ]: tf.config.list_physical_devices('GPU')

2.0.3 2.3 Load Image into TF Data Pipeline

[ ]: images = tf.data.Dataset.list_files('data\\images\\*.jpg')

[ ]: images.as_numpy_iterator().next()

[ ]: def load_image(x):
byte_img = tf.io.read_file(x)
img = tf.io.decode_jpeg(byte_img)
return img

[ ]: images = images.map(load_image)

[ ]: images.as_numpy_iterator().next()

[ ]: type(images)

5
2.0.4 2.4 View Raw Images with Matplotlib

[ ]: image_generator = images.batch(4).as_numpy_iterator()

[ ]: plot_images = image_generator.next()

[ ]: fig, ax = plt.subplots(ncols=4, figsize=(20,20))


for idx, image in enumerate(plot_images):
ax[idx].imshow(image)
plt.show()

3 3. Partition Unaugmented Data


3.0.1 3.1 MANUALLY SPLT DATA INTO TRAIN TEST AND VAL
[ ]: 90*.7 # 63 to train

[ ]: 90*.15 # 14 and 13 to test and val

3.0.2 3.2 Move the Matching Labels

[ ]: for folder in ['train','test','val']:


for file in os.listdir(os.path.join('data', folder, 'images')):

filename = file.split('.')[0]+'.json'
existing_filepath = os.path.join('data','labels', filename)
if os.path.exists(existing_filepath):
new_filepath = os.path.join('data',folder,'labels',filename)
os.replace(existing_filepath, new_filepath)

4 4. Apply Image Augmentation on Images and Labels using Al-


bumentations
4.0.1 4.1 Setup Albumentations Transform Pipeline

[ ]: import albumentations as alb

[ ]: augmentor = alb.Compose([alb.RandomCrop(width=450, height=450),


alb.HorizontalFlip(p=0.5),
alb.RandomBrightnessContrast(p=0.2),
alb.RandomGamma(p=0.2),
alb.RGBShift(p=0.2),
alb.VerticalFlip(p=0.5)],
bbox_params=alb.BboxParams(format='albumentations',

↪label_fields=['class_labels']))

6
4.0.2 4.2 Load a Test Image and Annotation with OpenCV and JSON

[ ]: img = cv2.imread(os.path.join('data','train',␣
↪'images','ffd85fc5-cc1a-11ec-bfb8-a0cec8d2d278.jpg'))

[ ]: with open(os.path.join('data', 'train', 'labels',␣


↪'ffd85fc5-cc1a-11ec-bfb8-a0cec8d2d278.json'), 'r') as f:

label = json.load(f)

[ ]: label['shapes'][0]['points']

4.0.3 4.3 Extract Coordinates and Rescale to Match Image Resolution

[ ]: coords = [0,0,0,0]
coords[0] = label['shapes'][0]['points'][0][0]
coords[1] = label['shapes'][0]['points'][0][1]
coords[2] = label['shapes'][0]['points'][1][0]
coords[3] = label['shapes'][0]['points'][1][1]

[ ]: coords

[ ]: coords = list(np.divide(coords, [640,480,640,480]))

[ ]: coords

4.0.4 4.4 Apply Augmentations and View Results

[ ]: augmented = augmentor(image=img, bboxes=[coords], class_labels=['face'])

[ ]: augmented['bboxes'][0][2:]

[ ]: augmented['bboxes']

[ ]: cv2.rectangle(augmented['image'],
tuple(np.multiply(augmented['bboxes'][0][:2], [450,450]).
↪astype(int)),

tuple(np.multiply(augmented['bboxes'][0][2:], [450,450]).
↪astype(int)),

(255,0,0), 2)

plt.imshow(augmented['image'])

7
5 5. Build and Run Augmentation Pipeline
5.0.1 5.1 Run Augmentation Pipeline
[ ]: for partition in ['train','test','val']:
for image in os.listdir(os.path.join('data', partition, 'images')):
img = cv2.imread(os.path.join('data', partition, 'images', image))

coords = [0,0,0.00001,0.00001]
label_path = os.path.join('data', partition, 'labels', f'{image.split(".
↪")[0]}.json')

if os.path.exists(label_path):
with open(label_path, 'r') as f:
label = json.load(f)

coords[0] = label['shapes'][0]['points'][0][0]
coords[1] = label['shapes'][0]['points'][0][1]
coords[2] = label['shapes'][0]['points'][1][0]
coords[3] = label['shapes'][0]['points'][1][1]
coords = list(np.divide(coords, [640,480,640,480]))

try:
for x in range(60):
augmented = augmentor(image=img, bboxes=[coords],␣
↪class_labels=['face'])

cv2.imwrite(os.path.join('aug_data', partition, 'images',␣


↪f'{image.split(".")[0]}.{x}.jpg'), augmented['image'])

annotation = {}
annotation['image'] = image

if os.path.exists(label_path):
if len(augmented['bboxes']) == 0:
annotation['bbox'] = [0,0,0,0]
annotation['class'] = 0
else:
annotation['bbox'] = augmented['bboxes'][0]
annotation['class'] = 1
else:
annotation['bbox'] = [0,0,0,0]
annotation['class'] = 0

with open(os.path.join('aug_data', partition, 'labels',␣


↪f'{image.split(".")[0]}.{x}.json'), 'w') as f:
json.dump(annotation, f)

8
except Exception as e:
print(e)

5.0.2 5.2 Load Augmented Images to Tensorflow Dataset

[ ]: train_images = tf.data.Dataset.list_files('aug_data\\train\\images\\*.jpg',␣
↪shuffle=False)

train_images = train_images.map(load_image)
train_images = train_images.map(lambda x: tf.image.resize(x, (120,120)))
train_images = train_images.map(lambda x: x/255)

[ ]: test_images = tf.data.Dataset.list_files('aug_data\\test\\images\\*.jpg',␣
↪shuffle=False)

test_images = test_images.map(load_image)
test_images = test_images.map(lambda x: tf.image.resize(x, (120,120)))
test_images = test_images.map(lambda x: x/255)

[ ]: val_images = tf.data.Dataset.list_files('aug_data\\val\\images\\*.jpg',␣
↪shuffle=False)

val_images = val_images.map(load_image)
val_images = val_images.map(lambda x: tf.image.resize(x, (120,120)))
val_images = val_images.map(lambda x: x/255)

[ ]: train_images.as_numpy_iterator().next()

6 6. Prepare Labels
6.0.1 6.1 Build Label Loading Function

[ ]: def load_labels(label_path):
with open(label_path.numpy(), 'r', encoding = "utf-8") as f:
label = json.load(f)

return [label['class']], label['bbox']

6.0.2 6.2 Load Labels to Tensorflow Dataset


[ ]: train_labels = tf.data.Dataset.list_files('aug_data\\train\\labels\\*.json',␣
↪shuffle=False)

train_labels = train_labels.map(lambda x: tf.py_function(load_labels, [x], [tf.


↪uint8, tf.float16]))

[ ]: test_labels = tf.data.Dataset.list_files('aug_data\\test\\labels\\*.json',␣
↪shuffle=False)

9
test_labels = test_labels.map(lambda x: tf.py_function(load_labels, [x], [tf.
↪uint8, tf.float16]))

[ ]: val_labels = tf.data.Dataset.list_files('aug_data\\val\\labels\\*.json',␣
↪shuffle=False)

val_labels = val_labels.map(lambda x: tf.py_function(load_labels, [x], [tf.


↪uint8, tf.float16]))

[ ]: train_labels.as_numpy_iterator().next()

7 7. Combine Label and Image Samples


7.0.1 7.1 Check Partition Lengths

[ ]: len(train_images), len(train_labels), len(test_images), len(test_labels),␣


↪len(val_images), len(val_labels)

7.0.2 7.2 Create Final Datasets (Images/Labels)

[ ]: train = tf.data.Dataset.zip((train_images, train_labels))


train = train.shuffle(5000)
train = train.batch(8)
train = train.prefetch(4)

[ ]: test = tf.data.Dataset.zip((test_images, test_labels))


test = test.shuffle(1300)
test = test.batch(8)
test = test.prefetch(4)

[ ]: val = tf.data.Dataset.zip((val_images, val_labels))


val = val.shuffle(1000)
val = val.batch(8)
val = val.prefetch(4)

[ ]: train.as_numpy_iterator().next()[1]

7.0.3 7.3 View Images and Annotations

[ ]: data_samples = train.as_numpy_iterator()

[ ]: res = data_samples.next()

[ ]: fig, ax = plt.subplots(ncols=4, figsize=(20,20))


for idx in range(4):
sample_image = res[0][idx]

10
sample_coords = res[1][1][idx]

cv2.rectangle(sample_image,
tuple(np.multiply(sample_coords[:2], [120,120]).astype(int)),
tuple(np.multiply(sample_coords[2:], [120,120]).astype(int)),
(255,0,0), 2)

ax[idx].imshow(sample_image)

8 8. Build Deep Learning using the Functional API


8.0.1 8.1 Import Layers and Base Network

[ ]: from tensorflow.keras.models import Model


from tensorflow.keras.layers import Input, Conv2D, Dense, GlobalMaxPooling2D
from tensorflow.keras.applications import VGG16

8.0.2 8.2 Download VGG16


[ ]: vgg = VGG16(include_top=False)

[ ]: vgg.summary()

8.0.3 8.3 Build instance of Network


[ ]: def build_model():
input_layer = Input(shape=(120,120,3))

vgg = VGG16(include_top=False)(input_layer)

# Classification Model
f1 = GlobalMaxPooling2D()(vgg)
class1 = Dense(2048, activation='relu')(f1)
class2 = Dense(1, activation='sigmoid')(class1)

# Bounding box model


f2 = GlobalMaxPooling2D()(vgg)
regress1 = Dense(2048, activation='relu')(f2)
regress2 = Dense(4, activation='sigmoid')(regress1)

facetracker = Model(inputs=input_layer, outputs=[class2, regress2])


return facetracker

11
8.0.4 8.4 Test out Neural Network
[ ]: facetracker = build_model()

[ ]: facetracker.summary()

[ ]: X, y = train.as_numpy_iterator().next()

[ ]: X.shape

[ ]: classes, coords = facetracker.predict(X)

[ ]: classes, coords

9 9. Define Losses and Optimizers


9.0.1 9.1 Define Optimizer and LR

[ ]: batches_per_epoch = len(train)
lr_decay = (1./0.75 -1)/batches_per_epoch

[ ]: opt = tf.keras.optimizers.Adam(learning_rate=0.0001, decay=lr_decay)

9.0.2 9.2 Create Localization Loss and Classification Loss


[ ]: def localization_loss(y_true, yhat):
delta_coord = tf.reduce_sum(tf.square(y_true[:,:2] - yhat[:,:2]))

h_true = y_true[:,3] - y_true[:,1]


w_true = y_true[:,2] - y_true[:,0]

h_pred = yhat[:,3] - yhat[:,1]


w_pred = yhat[:,2] - yhat[:,0]

delta_size = tf.reduce_sum(tf.square(w_true - w_pred) + tf.


↪square(h_true-h_pred))

return delta_coord + delta_size

[ ]: classloss = tf.keras.losses.BinaryCrossentropy()
regressloss = localization_loss

9.0.3 9.3 Test out Loss Metrics


[ ]: localization_loss(y[1], coords)

12
[ ]: classloss(y[0], classes)

[ ]: regressloss(y[1], coords)

10 10. Train Neural Network


10.0.1 10.1 Create Custom Model Class
[ ]: class FaceTracker(Model):
def __init__(self, eyetracker, **kwargs):
super().__init__(**kwargs)
self.model = eyetracker

def compile(self, opt, classloss, localizationloss, **kwargs):


super().compile(**kwargs)
self.closs = classloss
self.lloss = localizationloss
self.opt = opt

def train_step(self, batch, **kwargs):

X, y = batch

with tf.GradientTape() as tape:


classes, coords = self.model(X, training=True)

batch_classloss = self.closs(y[0], classes)


batch_localizationloss = self.lloss(tf.cast(y[1], tf.float32),␣
↪coords)

total_loss = batch_localizationloss+0.5*batch_classloss

grad = tape.gradient(total_loss, self.model.trainable_variables)

opt.apply_gradients(zip(grad, self.model.trainable_variables))

return {"total_loss":total_loss, "class_loss":batch_classloss,␣


↪"regress_loss":batch_localizationloss}

def test_step(self, batch, **kwargs):


X, y = batch

classes, coords = self.model(X, training=False)

batch_classloss = self.closs(y[0], classes)


batch_localizationloss = self.lloss(tf.cast(y[1], tf.float32), coords)

13
total_loss = batch_localizationloss+0.5*batch_classloss

return {"total_loss":total_loss, "class_loss":batch_classloss,␣


↪"regress_loss":batch_localizationloss}

def call(self, X, **kwargs):


return self.model(X, **kwargs)

[ ]: model = FaceTracker(facetracker)

[ ]: model.compile(opt, classloss, regressloss)

10.0.2 10.2 Train


[ ]: logdir='logs'

[ ]: tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)

[ ]: hist = model.fit(train, epochs=10, validation_data=val,␣


↪callbacks=[tensorboard_callback])

10.0.3 10.3 Plot Performance


[ ]: hist.history

[ ]: fig, ax = plt.subplots(ncols=3, figsize=(20,5))

ax[0].plot(hist.history['total_loss'], color='teal', label='loss')


ax[0].plot(hist.history['val_total_loss'], color='orange', label='val loss')
ax[0].title.set_text('Loss')
ax[0].legend()

ax[1].plot(hist.history['class_loss'], color='teal', label='class loss')


ax[1].plot(hist.history['val_class_loss'], color='orange', label='val class␣
↪loss')

ax[1].title.set_text('Classification Loss')
ax[1].legend()

ax[2].plot(hist.history['regress_loss'], color='teal', label='regress loss')


ax[2].plot(hist.history['val_regress_loss'], color='orange', label='val regress␣
↪loss')

ax[2].title.set_text('Regression Loss')
ax[2].legend()

plt.show()

14
11 11. Make Predictions
11.0.1 11.1 Make Predictions on Test Set
[ ]: test_data = test.as_numpy_iterator()

[ ]: test_sample = test_data.next()

[ ]: yhat = facetracker.predict(test_sample[0])

[ ]: fig, ax = plt.subplots(ncols=4, figsize=(20,20))


for idx in range(4):
sample_image = test_sample[0][idx]
sample_coords = yhat[1][idx]

if yhat[0][idx] > 0.9:


cv2.rectangle(sample_image,
tuple(np.multiply(sample_coords[:2], [120,120]).
↪astype(int)),

tuple(np.multiply(sample_coords[2:], [120,120]).
↪astype(int)),

(255,0,0), 2)

ax[idx].imshow(sample_image)

11.0.2 11.2 Save the Model


[ ]: from tensorflow.keras.models import load_model

[ ]: facetracker.save('facetracker.h5')

[ ]: facetracker = load_model('facetracker.h5')

11.0.3 11.3 Real Time Detection


[ ]: cap = cv2.VideoCapture(1)
while cap.isOpened():
_ , frame = cap.read()
frame = frame[50:500, 50:500,:]

rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)


resized = tf.image.resize(rgb, (120,120))

yhat = facetracker.predict(np.expand_dims(resized/255,0))
sample_coords = yhat[1][0]

if yhat[0] > 0.5:

15
# Controls the main rectangle
cv2.rectangle(frame,
tuple(np.multiply(sample_coords[:2], [450,450]).
↪astype(int)),

tuple(np.multiply(sample_coords[2:], [450,450]).
↪astype(int)),

(255,0,0), 2)
# Controls the label rectangle
cv2.rectangle(frame,
tuple(np.add(np.multiply(sample_coords[:2], [450,450]).
↪astype(int),

[0,-30])),
tuple(np.add(np.multiply(sample_coords[:2], [450,450]).
↪astype(int),

[80,0])),
(255,0,0), -1)

# Controls the text rendered


cv2.putText(frame, 'face', tuple(np.add(np.multiply(sample_coords[:2],␣
↪[450,450]).astype(int),

[0,-5])),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)

cv2.imshow('EyeTrack', frame)

if cv2.waitKey(1) & 0xFF == ord('q'):


break
cap.release()
cv2.destroyAllWindows()

[ ]:

16

You might also like