Keras - Functional API
Keras - Functional API
io/guides/functional_api/
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
tf.keras.Sequential
inputs = keras.Input(shape=(784,))
(32, 32, 3)
inputs dtype
inputs.shape
TensorShape([None, 784])
inputs.dtype
1 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
tf.float32
inputs
dense x
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
Model
model.summary()
Model: "mnist_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 784)] 0
=================================================================
Total params: 55,050
Trainable params: 55,050
Non-trainable params: 0
_________________________________________________________________
keras.utils.plot_model(model, "my_first_model.png")
2 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Sequential
Model fit()
evaluate()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
Epoch 1/2
750/750 [==============================] - 2s 2ms/step - loss: 0.3435 - accuracy: 0.9026 -
val_loss: 0.1797 - val_accuracy: 0.9507
Epoch 2/2
750/750 [==============================] - 1s 2ms/step - loss: 0.1562 - accuracy: 0.9539 -
val_loss: 0.1307 - val_accuracy: 0.9603
313/313 - 0s - loss: 0.1305 - accuracy: 0.9609 - 248ms/epoch - 793us/step
Test loss: 0.1305118203163147
Test accuracy: 0.9609000086784363
3 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Sequential model.save()
compile
model.save("path_to_my_model")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("path_to_my_model")
encoder
autoencoder
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
4 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Model: "encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
img (InputLayer) [(None, 28, 28, 1)] 0
=================================================================
Total params: 18,672
Trainable params: 18,672
Non-trainable params: 0
_________________________________________________________________
Model: "autoencoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
img (InputLayer) [(None, 28, 28, 1)] 0
=================================================================
Total params: 28,241
Trainable params: 28,241
Non-trainable params: 0
_________________________________________________________________
(28, 28, 1)
Input
5 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
6 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Model: "encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
original_img (InputLayer) [(None, 28, 28, 1)] 0
=================================================================
Total params: 18,672
Trainable params: 18,672
Non-trainable params: 0
_________________________________________________________________
Model: "decoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
encoded_img (InputLayer) [(None, 16)] 0
=================================================================
Total params: 9,569
Trainable params: 9,569
Non-trainable params: 0
_________________________________________________________________
Model: "autoencoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
img (InputLayer) [(None, 28, 28, 1)] 0
=================================================================
Total params: 28,241
Trainable params: 28,241
Non-trainable params: 0
_________________________________________________________________
7 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
Sequential
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
8 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
Epoch 1/2
40/40 [==============================] - 3s 23ms/step - loss: 1.3256 - priority_loss: 0.7024 -
department_loss: 3.1160
Epoch 2/2
40/40 [==============================] - 1s 25ms/step - loss: 1.2926 - priority_loss: 0.6976 -
department_loss: 2.9749
<keras.callbacks.History at 0x1300d6110>
9 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Sequential
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
10 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
Model: "toy_resnet"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
img (InputLayer) [(None, 32, 32, 3)] 0 []
==================================================================================================
Total params: 223,242
Trainable params: 223,242
Non-trainable params: 0
__________________________________________________________________________________________________
11 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
12 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(x_train[:1000], y_train[:1000], batch_size=64, epochs=1, validation_split=0.2)
<keras.callbacks.History at 0x1305fee10>
Embedding
vgg19 = tf.keras.applications.VGG19()
13 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
tf.keras
Layer
call
build
__init__
tf.keras.layers.Dense
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
get_config
14 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
from_config(cls, config)
from_config
Model
Model
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
15 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
class MLP(keras.Model):
Input
get_config()
from_config()
tf.keras
Sequential
Sequential
16 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
units = 32
timesteps = 10
input_dim = 5
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, timesteps, input_dim)))
call
get_config
17 of 18 01-12-2022, 12:28 am
Firefox https://keras.io/guides/functional_api/
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, 10, 5)))
18 of 18 01-12-2022, 12:28 am