Facedetection
Facedetection
June 5, 2024
Collecting labelme
Downloading labelme-5.4.1.tar.gz (1.4 MB)
---------------------------------------- 0.0/1.4 MB ? eta -:--:--
--------------------------------------- 0.0/1.4 MB 660.6 kB/s eta 0:00:03
- -------------------------------------- 0.1/1.4 MB 1.1 MB/s eta 0:00:02
------- -------------------------------- 0.3/1.4 MB 2.1 MB/s eta 0:00:01
----------- ---------------------------- 0.4/1.4 MB 2.3 MB/s eta 0:00:01
-------------- ------------------------- 0.5/1.4 MB 2.4 MB/s eta 0:00:01
---------------- ----------------------- 0.6/1.4 MB 2.3 MB/s eta 0:00:01
------------------ --------------------- 0.7/1.4 MB 2.2 MB/s eta 0:00:01
-------------------- ------------------- 0.7/1.4 MB 2.1 MB/s eta 0:00:01
----------------------- ---------------- 0.8/1.4 MB 2.1 MB/s eta 0:00:01
-------------------------- ------------- 0.9/1.4 MB 2.1 MB/s eta 0:00:01
----------------------------- ---------- 1.0/1.4 MB 2.1 MB/s eta 0:00:01
-------------------------------- ------- 1.2/1.4 MB 2.2 MB/s eta 0:00:01
---------------------------------- ----- 1.2/1.4 MB 2.2 MB/s eta 0:00:01
------------------------------------- -- 1.3/1.4 MB 2.1 MB/s eta 0:00:01
---------------------------------------- 1.4/1.4 MB 2.2 MB/s eta 0:00:00
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Installing backend dependencies: started
Installing backend dependencies: finished with status 'done'
Preparing metadata (pyproject.toml): started
Preparing metadata (pyproject.toml): finished with status 'done'
Collecting tensorflow
Downloading tensorflow-2.16.1-cp311-cp311-win_amd64.whl.metadata (3.5 kB)
Collecting tensorflow-gpu
Downloading tensorflow-gpu-2.12.0.tar.gz (2.6 kB)
Installing build dependencies: started
1
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'error'
error: subprocess-exited-with-error
The above exception was the direct cause of the following exception:
2
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 353, in
<module>
main()
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_
3.11.2544.0_x64__qbz5n2kfra8p0\Lib\site-
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 335, in
main
json_out['return_val'] = hook(**hook_input['kwargs'])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_
3.11.2544.0_x64__qbz5n2kfra8p0\Lib\site-
packages\pip\_vendor\pyproject_hooks\_in_process\_in_process.py", line 118, in
get_requires_for_build_wheel
return hook(config_settings)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 325, in
get_requires_for_build_wheel
return self._get_build_requires(config_settings,
requirements=['wheel'])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 295, in
_get_build_requires
self.run_setup()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 487, in
run_setup
super().run_setup(setup_script=setup_script)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\build_meta.py", line 311, in
run_setup
exec(code, locals())
File "<string>", line 40, in <module>
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\__init__.py", line 102, in setup
_install_setup_requires(attrs)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\__init__.py", line 73, in
_install_setup_requires
dist.parse_config_files(ignore_option_errors=True)
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 634, in
parse_config_files
self._finalize_requires()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 368, in
_finalize_requires
3
self._normalize_requires()
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-packages\setuptools\dist.py", line 383, in
_normalize_requires
self.install_requires = list(map(str, _reqs.parse(install_requires)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\asus\AppData\Local\Temp\pip-build-env-
lugze6ke\overlay\Lib\site-
packages\setuptools\_vendor\packaging\requirements.py", line 37, in __init__
raise InvalidRequirement(str(e)) from e
setuptools.extern.packaging.requirements.InvalidRequirement: Expected end
or semicolon (after name and no valid version specifier)
python_version>"3.7"
^
[end of output]
note: This error originates from a subprocess, and is likely not a problem
with pip.
error: subprocess-exited-with-error
note: This error originates from a subprocess, and is likely not a problem with
pip.
[ ]: import os
import time
import uuid
import cv2
[ ]: IMAGES_PATH = os.path.join('data','images')
number_images = 30
[ ]: cap = cv2.VideoCapture(1)
for imgnum in range(number_images):
print('Collecting image {}'.format(imgnum))
ret, frame = cap.read()
imgname = os.path.join(IMAGES_PATH,f'{str(uuid.uuid1())}.jpg')
cv2.imwrite(imgname, frame)
cv2.imshow('frame', frame)
time.sleep(0.5)
4
break
cap.release()
cv2.destroyAllWindows()
[ ]: !labelme
[ ]: import tensorflow as tf
import json
import numpy as np
from matplotlib import pyplot as plt
[ ]: tf.config.list_physical_devices('GPU')
[ ]: images = tf.data.Dataset.list_files('data\\images\\*.jpg')
[ ]: images.as_numpy_iterator().next()
[ ]: def load_image(x):
byte_img = tf.io.read_file(x)
img = tf.io.decode_jpeg(byte_img)
return img
[ ]: images = images.map(load_image)
[ ]: images.as_numpy_iterator().next()
[ ]: type(images)
5
2.0.4 2.4 View Raw Images with Matplotlib
[ ]: image_generator = images.batch(4).as_numpy_iterator()
[ ]: plot_images = image_generator.next()
filename = file.split('.')[0]+'.json'
existing_filepath = os.path.join('data','labels', filename)
if os.path.exists(existing_filepath):
new_filepath = os.path.join('data',folder,'labels',filename)
os.replace(existing_filepath, new_filepath)
6
4.0.2 4.2 Load a Test Image and Annotation with OpenCV and JSON
[ ]: img = cv2.imread(os.path.join('data','train',␣
↪'images','ffd85fc5-cc1a-11ec-bfb8-a0cec8d2d278.jpg'))
label = json.load(f)
[ ]: label['shapes'][0]['points']
[ ]: coords = [0,0,0,0]
coords[0] = label['shapes'][0]['points'][0][0]
coords[1] = label['shapes'][0]['points'][0][1]
coords[2] = label['shapes'][0]['points'][1][0]
coords[3] = label['shapes'][0]['points'][1][1]
[ ]: coords
[ ]: coords
[ ]: augmented['bboxes'][0][2:]
[ ]: augmented['bboxes']
[ ]: cv2.rectangle(augmented['image'],
tuple(np.multiply(augmented['bboxes'][0][:2], [450,450]).
↪astype(int)),
tuple(np.multiply(augmented['bboxes'][0][2:], [450,450]).
↪astype(int)),
(255,0,0), 2)
plt.imshow(augmented['image'])
7
5 5. Build and Run Augmentation Pipeline
5.0.1 5.1 Run Augmentation Pipeline
[ ]: for partition in ['train','test','val']:
for image in os.listdir(os.path.join('data', partition, 'images')):
img = cv2.imread(os.path.join('data', partition, 'images', image))
coords = [0,0,0.00001,0.00001]
label_path = os.path.join('data', partition, 'labels', f'{image.split(".
↪")[0]}.json')
if os.path.exists(label_path):
with open(label_path, 'r') as f:
label = json.load(f)
coords[0] = label['shapes'][0]['points'][0][0]
coords[1] = label['shapes'][0]['points'][0][1]
coords[2] = label['shapes'][0]['points'][1][0]
coords[3] = label['shapes'][0]['points'][1][1]
coords = list(np.divide(coords, [640,480,640,480]))
try:
for x in range(60):
augmented = augmentor(image=img, bboxes=[coords],␣
↪class_labels=['face'])
annotation = {}
annotation['image'] = image
if os.path.exists(label_path):
if len(augmented['bboxes']) == 0:
annotation['bbox'] = [0,0,0,0]
annotation['class'] = 0
else:
annotation['bbox'] = augmented['bboxes'][0]
annotation['class'] = 1
else:
annotation['bbox'] = [0,0,0,0]
annotation['class'] = 0
8
except Exception as e:
print(e)
[ ]: train_images = tf.data.Dataset.list_files('aug_data\\train\\images\\*.jpg',␣
↪shuffle=False)
train_images = train_images.map(load_image)
train_images = train_images.map(lambda x: tf.image.resize(x, (120,120)))
train_images = train_images.map(lambda x: x/255)
[ ]: test_images = tf.data.Dataset.list_files('aug_data\\test\\images\\*.jpg',␣
↪shuffle=False)
test_images = test_images.map(load_image)
test_images = test_images.map(lambda x: tf.image.resize(x, (120,120)))
test_images = test_images.map(lambda x: x/255)
[ ]: val_images = tf.data.Dataset.list_files('aug_data\\val\\images\\*.jpg',␣
↪shuffle=False)
val_images = val_images.map(load_image)
val_images = val_images.map(lambda x: tf.image.resize(x, (120,120)))
val_images = val_images.map(lambda x: x/255)
[ ]: train_images.as_numpy_iterator().next()
6 6. Prepare Labels
6.0.1 6.1 Build Label Loading Function
[ ]: def load_labels(label_path):
with open(label_path.numpy(), 'r', encoding = "utf-8") as f:
label = json.load(f)
[ ]: test_labels = tf.data.Dataset.list_files('aug_data\\test\\labels\\*.json',␣
↪shuffle=False)
9
test_labels = test_labels.map(lambda x: tf.py_function(load_labels, [x], [tf.
↪uint8, tf.float16]))
[ ]: val_labels = tf.data.Dataset.list_files('aug_data\\val\\labels\\*.json',␣
↪shuffle=False)
[ ]: train_labels.as_numpy_iterator().next()
[ ]: train.as_numpy_iterator().next()[1]
[ ]: data_samples = train.as_numpy_iterator()
[ ]: res = data_samples.next()
10
sample_coords = res[1][1][idx]
cv2.rectangle(sample_image,
tuple(np.multiply(sample_coords[:2], [120,120]).astype(int)),
tuple(np.multiply(sample_coords[2:], [120,120]).astype(int)),
(255,0,0), 2)
ax[idx].imshow(sample_image)
[ ]: vgg.summary()
vgg = VGG16(include_top=False)(input_layer)
# Classification Model
f1 = GlobalMaxPooling2D()(vgg)
class1 = Dense(2048, activation='relu')(f1)
class2 = Dense(1, activation='sigmoid')(class1)
11
8.0.4 8.4 Test out Neural Network
[ ]: facetracker = build_model()
[ ]: facetracker.summary()
[ ]: X, y = train.as_numpy_iterator().next()
[ ]: X.shape
[ ]: classes, coords
[ ]: batches_per_epoch = len(train)
lr_decay = (1./0.75 -1)/batches_per_epoch
[ ]: classloss = tf.keras.losses.BinaryCrossentropy()
regressloss = localization_loss
12
[ ]: classloss(y[0], classes)
[ ]: regressloss(y[1], coords)
X, y = batch
total_loss = batch_localizationloss+0.5*batch_classloss
opt.apply_gradients(zip(grad, self.model.trainable_variables))
13
total_loss = batch_localizationloss+0.5*batch_classloss
[ ]: model = FaceTracker(facetracker)
[ ]: tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
ax[1].title.set_text('Classification Loss')
ax[1].legend()
ax[2].title.set_text('Regression Loss')
ax[2].legend()
plt.show()
14
11 11. Make Predictions
11.0.1 11.1 Make Predictions on Test Set
[ ]: test_data = test.as_numpy_iterator()
[ ]: test_sample = test_data.next()
[ ]: yhat = facetracker.predict(test_sample[0])
tuple(np.multiply(sample_coords[2:], [120,120]).
↪astype(int)),
(255,0,0), 2)
ax[idx].imshow(sample_image)
[ ]: facetracker.save('facetracker.h5')
[ ]: facetracker = load_model('facetracker.h5')
yhat = facetracker.predict(np.expand_dims(resized/255,0))
sample_coords = yhat[1][0]
15
# Controls the main rectangle
cv2.rectangle(frame,
tuple(np.multiply(sample_coords[:2], [450,450]).
↪astype(int)),
tuple(np.multiply(sample_coords[2:], [450,450]).
↪astype(int)),
(255,0,0), 2)
# Controls the label rectangle
cv2.rectangle(frame,
tuple(np.add(np.multiply(sample_coords[:2], [450,450]).
↪astype(int),
[0,-30])),
tuple(np.add(np.multiply(sample_coords[:2], [450,450]).
↪astype(int),
[80,0])),
(255,0,0), -1)
[0,-5])),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
cv2.imshow('EyeTrack', frame)
[ ]:
16