code_edge impulse
code_edge impulse
/resources/libraries')
import os
import tensorflow as tf
from tensorflow.keras.optimizers.legacy import Adam
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import BatchNormalization,
Conv2D, Softmax, Reshape
from tensorflow.keras.models import Model
from ei_tensorflow.constrained_object_detection import
models, dataset, metrics, util
from ei_tensorflow.velo import
train_keras_model_with_velo
from ei_shared.pretrained_weights import
get_or_download_pretrained_weights
import ei_tensorflow.training
WEIGHTS_PREFIX =
os.environ.get('WEIGHTS_PREFIX', os.getcwd())
Returns:
Uncompiled keras model.
Args:
num_classes: Number of classes in datasets. This
does not include
implied background class introduced by
segmentation map dataset
conversion.
learning_rate: Learning rate for Adam.
num_epochs: Number of epochs passed to
model.fit
alpha: Alpha used to construct MobileNet.
Pretrained weights will be
used if there is a matching set.
object_weight: The weighting to give the object in
the loss function
where background has an implied weight of
1.0.
train_dataset: Training dataset of (x, (bbox,
one_hot_y))
validation_dataset: Validation dataset of (x, (bbox,
one_hot_y))
best_model_path: location to save best model path.
note: weights
will be restored from this path based on best
val_f1 score.
input_shape: The shape of the model's input
batch_size: Training batch size
ensure_determinism: If true, functions that may be
non-
deterministic are disabled (e.g. autotuning
prefetch). This
should be true in test environments.
Returns:
Trained keras model.
Constructs a new constrained object detection
model with num_classes+1
outputs (denoting the classes with an implied
background class of 0).
Both training and validation datasets are adapted
from
(x, (bbox, one_hot_y)) to (x, segmentation_map).
Model is trained with a
custom weighted cross entropy function.
"""
nonlocal callbacks
num_classes_with_background = num_classes + 1
model = build_model(
input_shape=input_shape,
weights=weights,
alpha=alpha,
num_classes=num_classes_with_background
)
validation_dataset_for_callback =
(validation_dataset
.batch(batch_size, drop_remainder=False)
.prefetch(prefetch_policy))
if not use_velo:
model.compile(loss=weighted_xent,
optimizer=Adam(learning_rate=learning_rate))
callbacks.append(metrics.CentroidScoring(validation_da
taset_for_callback,
output_width_height,
num_classes_with_background))
callbacks.append(metrics.PrintPercentageTrained(num_
epochs))
tf.keras.callbacks.ModelCheckpoint(best_model_path,
monitor='val_f1', save_best_only=True,
mode='max',
save_weights_only=True, verbose=0))
if use_velo:
from tensorflow.python.framework.errors_impl
import ResourceExhaustedError
try:
train_keras_model_with_velo(
model,
train_segmentation_dataset,
validation_segmentation_dataset,
loss_fn=weighted_xent,
num_epochs=num_epochs,
callbacks=callbacks
)
except ResourceExhaustedError as e:
print(str(e))
raise Exception(
"ResourceExhaustedError caught during
train_keras_model_with_velo."
" Though VeLO encourages a large batch size,
the current"
f" size of {batch_size} may be too large. Please
try a lower"
" value. For further assistance please contact
support"
" at https://forum.edgeimpulse.com/")
else:
model.fit(train_segmentation_dataset,
validation_data=validation_segmentation_dataset,
epochs=num_epochs, callbacks=callbacks,
verbose=0)
return model
EPOCHS = args.epochs or 80
LEARNING_RATE = args.learning_rate or 0.0035
BATCH_SIZE = args.batch_size or 16
model = train(num_classes=classes,
learning_rate=LEARNING_RATE,
num_epochs=EPOCHS,
alpha=0.35,
object_weight=100,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
best_model_path=BEST_MODEL_PATH,
input_shape=MODEL_INPUT_SHAPE,
batch_size=BATCH_SIZE,
use_velo=False,
ensure_determinism=ensure_determinism)
disable_per_channel_quantization = False