Skip to content

Module kerod.model.faster_rcnn

None

None

View Source
import tensorflow as tf

from kerod.core.standard_fields import BoxField, DatasetField

from kerod.model.backbone.fpn import FPN

from kerod.model.backbone.resnet import ResNet50, ResNet50PytorchStyle

from kerod.layers import FastRCNN, RegionProposalNetwork

from kerod.layers.post_processing import (post_process_fast_rcnn_boxes, post_process_rpn)

from kerod.utils.documentation import remove_unwanted_doc

from kerod.utils.training import apply_kernel_regularization

from tensorflow.python.keras.engine import data_adapter

__pdoc__ = {}

class FasterRcnnFPN(tf.keras.Model):

    """Build a FPN Resnet 50 Faster RCNN network ready to use for training.

    You can use it as follow:

    ```python

    model_faster_rcnn = FasterRcnnFPNResnet50(80)

    base_lr = 0.1

    optimizer = tf.keras.optimizers.SGD(learning_rate=base_lr)

    model_faster_rcnn.compile(optimizer=optimizer, loss=None)

    model_faster_rcnn.fit(ds_train, validation_data=ds_test, epochs=11,)

    ```

    Arguments:

        num_classes: The number of classes of your dataset

            (**do not include the background class** it is handle for you)

        backbone: A tensorflow Model.

    """

    def __init__(self, num_classes, backbone, **kwargs):

        super().__init__(**kwargs)

        self.num_classes = num_classes

        self.l2 = tf.keras.regularizers.l2(1e-4)

        self.backbone = backbone

        self.fpn = FPN(kernel_regularizer=self.l2)

        self.rpn = RegionProposalNetwork(kernel_regularizer=self.l2)

        self.fast_rcnn = FastRCNN(self.num_classes + 1, kernel_regularizer=self.l2)

        # See docstring self.export_for_serving for usage

        self._serving = False

    def call(self, inputs, training=None):

        """Perform an inference in training.

        Arguments:

            inputs: A dict with the following schema:

                `images`: A Tensor of shape [batch_size, height, width, 3]

                `image_informations`: A float32 Tensor of shape [batch_size, 2] where

                    the last dimension represents the original height and

                    width of the images (without the padding).

                `ground_truths`: A dict

                    - `BoxField.LABELS`: A 3-D tensor of shape [batch_size, num_gt, num_classes],

                    - `BoxField.BOXES`: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]

                    - `BoxField.LABELS`: A 3-D tensor of int32 and shape [batch_size, num_gt]

                    - `BoxField.WEIGHTS`: A 3-D tensor of float and shape [batch_size, num_gt]

                    - `BoxField.NUM_BOXES`: A 2-D tensor of int32 and shape [batch_size, 1]

                        which allows to remove the padding created by tf.Data.

                        Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)

                        then my second box has a padding of 1

            training: Is automatically set to `True` in train and test mode

                (normally test should be at false). Why? Through the call we the losses and the metrics

                of the rpn and fast_rcnn. They are automatically added with `add_loss` and `add_metrics`.

                In test we want to benefit from those and therefore we compute them. It is an inheritance

                from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the

                train_step and test_step. However for now this method benefit of the encapsulation of

                the `self.compiled_loss` method.

        Returns:

            Tuple:

                - `classification_pred`: A Tensor of shape [batch_size, num_boxes, num_classes]

                    representing the class probability.

                - `localization_pred`: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]

                - `anchors`: A Tensor of shape [batch_size, num_boxes, 4]

        """

        images = inputs[DatasetField.IMAGES]

        images_information = inputs[DatasetField.IMAGES_INFO]

        # The preprocessing dedicated to the backbone is done inside the model.

        x = self.backbone(images)

        pyramid = self.fpn(x)

        rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl = self.rpn(pyramid)

        if training and not self._serving:

            apply_kernel_regularization(self.l2, self.backbone)

            # add_loss stores the rpn losses computation in self.losses

            _ = self.rpn.compute_loss(rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl,

                                      inputs['ground_truths'])

        num_boxes = 2000 if training else 1000

        rois = post_process_rpn(rpn_cls_pred_per_lvl,

                                rpn_loc_pred_per_lvl,

                                anchors_per_lvl,

                                images_information,

                                pre_nms_topk_per_lvl=num_boxes,

                                post_nms_topk=num_boxes)

        if training and not self._serving:

            ground_truths = inputs['ground_truths']

            # Include the ground_truths as RoIs for the training

            rois = tf.concat([tf.cast(rois, self._compute_dtype), ground_truths[BoxField.BOXES]],

                             axis=1)

            # Sample the boxes needed for inference

            y_true, weights, rois = self.fast_rcnn.sample_boxes(rois, ground_truths)

        classification_pred, localization_pred = self.fast_rcnn([pyramid, rois])

        if training and not self._serving:

            # add_loss stores the fast_rcnn losses computation in self.losses

            _ = self.fast_rcnn.compute_loss(y_true, weights, classification_pred, localization_pred)

        classification_pred = tf.nn.softmax(classification_pred)

        return classification_pred, localization_pred, rois

    def train_step(self, data):

        # These are the only transformations `Model.fit` applies to user-input

        # data when a `tf.data.Dataset` is provided. These utilities will be exposed

        # publicly.

        data = data_adapter.expand_1d(data)

        x, y, _ = data_adapter.unpack_x_y_sample_weight(data)

        with tf.GradientTape() as tape:

            x['ground_truths'] = y

            y_pred = self(x, training=True)

            # All the losses are computed in the call. It can seems weird but it those

            # the job in a clean way. They are automatically added to self.losses

            loss = self.compiled_loss(None, y_pred, None, regularization_losses=self.losses)

        self.optimizer.minimize(loss, self.trainable_variables, tape=tape)

        return {m.name: m.result() for m in self.metrics}

    def test_step(self, data):

        data = data_adapter.expand_1d(data)

        x, y, _ = data_adapter.unpack_x_y_sample_weight(data)

        x['ground_truths'] = y

        # In our graph all the metrics are computed inside the call method

        # So we set training to True to benefit from those metrics

        # Of course there is no backpropagation at the test step

        y_pred = self(x, training=True)

        _ = self.compiled_loss(None, y_pred, None, regularization_losses=self.losses)

        return {m.name: m.result() for m in self.metrics}

    def predict_step(self, data):

        data = data_adapter.expand_1d(data)

        x, _, _ = data_adapter.unpack_x_y_sample_weight(data)

        classification_pred, localization_pred, rois = self(x, training=False)

        # Remove the background classes

        classification_pred = classification_pred[:, :, 1:]

        return post_process_fast_rcnn_boxes(classification_pred, localization_pred, rois,

                                            x[DatasetField.IMAGES_INFO], self.num_classes)

    @tf.function(input_signature=[

        tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32, name=DatasetField.IMAGES),

        tf.TensorSpec(shape=(None, 2), dtype=tf.float32, name=DatasetField.IMAGES_INFO)

    ])

    def serving_step(self, images, images_info):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow

        absolutely want it and will return an exception if the ground_truth isn't provided.

        """

        return self.predict_step({

            DatasetField.IMAGES: images,

            DatasetField.IMAGES_INFO: images_info

        })

    def save(self,

             filepath,

             overwrite=True,

             include_optimizer=True,

             save_format=None,

             signatures=None,

             options=None):

        try:

            super().save(filepath,

                         overwrite=overwrite,

                         include_optimizer=include_optimizer,

                         save_format=save_format,

                         signatures=signatures,

                         options=options)

        except Exception as e:

            raise Exception(

                'Saving does not work with dynamic inputs the ground_truths are injected in the inputs. '

                'Please use export_model method instead to bypass this error.')

    def export_for_serving(self, filepath):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow

        when the `training` arguments is defined int the method `call`, `tf.save_model.save` method

        performs a check on the graph for training=False and training=True.

        However, we don't want this check to be perform because our ground_truths inputs aren't defined.

        """

        self._serving = True

        call_output = self.serving_step.get_concrete_function()

        tf.saved_model.save(self, filepath, signatures={'serving_default': call_output})

        self._serving = False

class FasterRcnnFPNResnet50Caffe(FasterRcnnFPN):

    def __init__(self, num_classes, **kwargs):

        resnet = ResNet50(input_shape=[None, None, 3], weights='imagenet')

        super().__init__(num_classes, resnet, **kwargs)

class FasterRcnnFPNResnet50Pytorch(FasterRcnnFPN):

    def __init__(self, num_classes, **kwargs):

        resnet = ResNet50PytorchStyle(input_shape=[None, None, 3], weights='imagenet')

        super().__init__(num_classes, resnet, **kwargs)

remove_unwanted_doc(FasterRcnnFPN, __pdoc__)

remove_unwanted_doc(FasterRcnnFPNResnet50Caffe, __pdoc__)

remove_unwanted_doc(FasterRcnnFPNResnet50Pytorch, __pdoc__)

Classes

FasterRcnnFPN

class FasterRcnnFPN(
    num_classes,
    backbone,
    **kwargs
)

You can use it as follow:

model_faster_rcnn = FasterRcnnFPNResnet50(80)
base_lr = 0.1
optimizer = tf.keras.optimizers.SGD(learning_rate=base_lr)
model_faster_rcnn.compile(optimizer=optimizer, loss=None)
model_faster_rcnn.fit(ds_train, validation_data=ds_test, epochs=11,)

Arguments

Name Description
num_classes The number of classes of your dataset
(do not include the background class it is handle for you)
backbone A tensorflow Model.

Ancestors (in MRO)

  • tensorflow.python.keras.engine.training.Model
  • tensorflow.python.keras.engine.base_layer.Layer
  • tensorflow.python.module.module.Module
  • tensorflow.python.training.tracking.tracking.AutoTrackable
  • tensorflow.python.training.tracking.base.Trackable
  • tensorflow.python.keras.utils.version_utils.LayerVersionSelector
  • tensorflow.python.keras.utils.version_utils.ModelVersionSelector

Descendants

  • kerod.model.faster_rcnn.FasterRcnnFPNResnet50Caffe
  • kerod.model.faster_rcnn.FasterRcnnFPNResnet50Pytorch

Methods

add_loss

def add_loss(
    self,
    losses,
    **kwargs
)

Add loss tensor(s), potentially dependent on layer inputs.

Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.losses may be dependent on a and some on b. This method automatically keeps track of dependencies.

This method can be used inside a subclassed layer or model's call function, in which case losses should be a Tensor or list of Tensors.

Example:

class MyLayer(tf.keras.layers.Layer):
  def call(self, inputs):
    self.add_loss(tf.abs(tf.reduce_mean(inputs)))
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These losses become part of the model's topology and are tracked in get_config.

Example:

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))

If this is not the case for your loss (if, for example, your loss references a Variable of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized.

Example:

inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))

Arguments: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. **kwargs: Additional keyword arguments for backward compatibility. Accepted values: inputs - Deprecated, will be automatically inferred.

View Source
  def add_loss(self, losses, **kwargs):

    """Add loss tensor(s), potentially dependent on layer inputs.

    Some losses (for instance, activity regularization losses) may be dependent

    on the inputs passed when calling a layer. Hence, when reusing the same

    layer on different inputs `a` and `b`, some entries in `layer.losses` may

    be dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This method can be used inside a subclassed layer or model's `call`

    function, in which case `losses` should be a Tensor or list of Tensors.

    Example:

    ```python

    class MyLayer(tf.keras.layers.Layer):

      def call(self, inputs):

        self.add_loss(tf.abs(tf.reduce_mean(inputs)))

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any loss Tensors passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    losses become part of the model's topology and are tracked in `get_config`.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Activity regularization.

    model.add_loss(tf.abs(tf.reduce_mean(x)))

    ```

    If this is not the case for your loss (if, for example, your loss references

    a `Variable` of one of the model's layers), you can wrap your loss in a

    zero-argument lambda. These losses are not tracked as part of the model's

    topology since they can't be serialized.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    d = tf.keras.layers.Dense(10)

    x = d(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Weight regularization.

    model.add_loss(lambda: tf.reduce_mean(d.kernel))

    ```

    Arguments:

      losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses

        may also be zero-argument callables which create a loss tensor.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

          inputs - Deprecated, will be automatically inferred.

    """

    kwargs.pop('inputs', None)

    if kwargs:

      raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))

    def _tag_callable(loss):

      """Tags callable loss tensor as `_unconditional_loss`."""

      if callable(loss):

        # We run the loss without autocasting, as regularizers are often

        # numerically unstable in float16.

        with autocast_variable.enable_auto_cast_variables(None):

          loss = loss()

      if loss is None:

        return None  # Will be filtered out when computing the .losses property

      if not tensor_util.is_tensor(loss):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      loss._unconditional_loss = True  # pylint: disable=protected-access

      return loss

    losses = nest.flatten(losses)

    callable_losses = []

    eager_losses = []

    symbolic_losses = []

    for loss in losses:

      if callable(loss):

        callable_losses.append(functools.partial(_tag_callable, loss))

        continue

      if loss is None:

        continue

      if not tensor_util.is_tensor(loss) and not isinstance(

          loss, keras_tensor.KerasTensor):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      # TF Functions should take the eager path.

      if ((tf_utils.is_symbolic_tensor(loss) or

           isinstance(loss, keras_tensor.KerasTensor)) and

          not base_layer_utils.is_in_tf_function()):

        symbolic_losses.append(loss)

      elif tensor_util.is_tensor(loss):

        eager_losses.append(loss)

    self._callable_losses.extend(callable_losses)

    in_call_context = base_layer_utils.call_context().in_call

    if eager_losses and not in_call_context:

      raise ValueError(

          'Expected a symbolic Tensors or a callable for the loss value. '

          'Please wrap your loss computation in a zero argument `lambda`.')

    self._eager_losses.extend(eager_losses)

    if in_call_context and not keras_tensor.keras_tensors_enabled():

      for symbolic_loss in symbolic_losses:

        self._losses.append(symbolic_loss)

    else:

      for symbolic_loss in symbolic_losses:

        if getattr(self, '_is_graph_network', False):

          self._graph_network_add_loss(symbolic_loss)

        else:

          # Possible a loss was added in a Layer's `build`.

          self._losses.append(symbolic_loss)

add_metric

def add_metric(
    self,
    value,
    name=None,
    **kwargs
)

Adds metric tensor to the layer.

This method can be used inside the call() method of a subclassed layer or model.

class MyMetricLayer(tf.keras.layers.Layer):
  def __init__(self):
    super(MyMetricLayer, self).__init__(name='my_metric_layer')
    self.mean = tf.keras.metrics.Mean(name='metric_1')

  def call(self, inputs):
    self.add_metric(self.mean(x))
    self.add_metric(tf.reduce_sum(x), name='metric_2')
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any tensor passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These metrics become part of the model's topology and are tracked when you save the model via save().

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')

Note: Calling add_metric() with the result of a metric object on a Functional Model, as shown in the example below, is not supported. This is because we cannot trace the metric result tensor back to the model's inputs.

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

Parameters:

Name Description
value Metric tensor.
name String metric name.
**kwargs Additional keyword arguments for backward compatibility.
Accepted values:
aggregation - When the value tensor provided is not the result of
calling a keras.Metric instance, it will be aggregated by default
using a keras.Metric.Mean.
View Source
  def add_metric(self, value, name=None, **kwargs):

    """Adds metric tensor to the layer.

    This method can be used inside the `call()` method of a subclassed layer

    or model.

    ```python

    class MyMetricLayer(tf.keras.layers.Layer):

      def __init__(self):

        super(MyMetricLayer, self).__init__(name='my_metric_layer')

        self.mean = tf.keras.metrics.Mean(name='metric_1')

      def call(self, inputs):

        self.add_metric(self.mean(x))

        self.add_metric(tf.reduce_sum(x), name='metric_2')

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any tensor passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    metrics become part of the model's topology and are tracked when you

    save the model via `save()`.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(math_ops.reduce_sum(x), name='metric_1')

    ```

    Note: Calling `add_metric()` with the result of a metric object on a

    Functional Model, as shown in the example below, is not supported. This is

    because we cannot trace the metric result tensor back to the model's inputs.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

    ```

    Args:

      value: Metric tensor.

      name: String metric name.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

        `aggregation` - When the `value` tensor provided is not the result of

        calling a `keras.Metric` instance, it will be aggregated by default

        using a `keras.Metric.Mean`.

    """

    kwargs_keys = list(kwargs.keys())

    if (len(kwargs_keys) > 1 or

        (len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):

      raise TypeError('Unknown keyword arguments: ', str(kwargs.keys()))

    from_metric_obj = hasattr(value, '_metric_obj')

    if keras_tensor.keras_tensors_enabled():

      is_symbolic = isinstance(value, keras_tensor.KerasTensor)

    else:

      is_symbolic = tf_utils.is_symbolic_tensor(value)

    in_call_context = base_layer_utils.call_context().in_call

    if name is None and not from_metric_obj:

      # Eg. `self.add_metric(math_ops.reduce_sum(x))`

      # In eager mode, we use metric name to lookup a metric. Without a name,

      # a new Mean metric wrapper will be created on every model/layer call.

      # So, we raise an error when no name is provided.

      # We will do the same for symbolic mode for consistency although a name

      # will be generated if no name is provided.

      # We will not raise this error in the foll use case for the sake of

      # consistency as name in provided in the metric constructor.

      # mean = metrics.Mean(name='my_metric')

      # model.add_metric(mean(outputs))

      raise ValueError('Please provide a name for your metric like '

                       '`self.add_metric(tf.reduce_sum(inputs), '

                       'name=\'mean_activation\')`')

    elif from_metric_obj:

      name = value._metric_obj.name

    if not in_call_context and not is_symbolic:

      raise ValueError('Expected a symbolic Tensor for the metric value, '

                       'received: ' + str(value))

    # If a metric was added in a Layer's `call` or `build`.

    if in_call_context or not getattr(self, '_is_graph_network', False):

      # TF Function path should take the eager path.

      # If the given metric is available in `metrics` list we just update state

      # on it, otherwise we create a new metric instance and

      # add it to the `metrics` list.

      metric_obj = getattr(value, '_metric_obj', None)

      # Tensors that come from a Metric object already updated the Metric state.

      should_update_state = not metric_obj

      name = metric_obj.name if metric_obj else name

      with self._metrics_lock:

        match = self._get_existing_metric(name)

        if match:

          metric_obj = match

        elif metric_obj:

          self._metrics.append(metric_obj)

        else:

          # Build the metric object with the value's dtype if it defines one

          metric_obj = metrics_mod.Mean(

              name=name, dtype=getattr(value, 'dtype', None))

          self._metrics.append(metric_obj)

      if should_update_state:

        metric_obj(value)

    else:

      if from_metric_obj:

        raise ValueError('Using the result of calling a `Metric` object '

                         'when calling `add_metric` on a Functional '

                         'Model is not supported. Please pass the '

                         'Tensor to monitor directly.')

      # Insert layers into the Keras Graph Network.

      aggregation = None if from_metric_obj else 'mean'

      self._graph_network_add_metric(value, aggregation, name)

add_update

def add_update(
    self,
    updates,
    inputs=None
)

Add update op(s), potentially dependent on layer inputs.

Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.updates may be dependent on a and some on b. This method automatically keeps track of dependencies.

This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution).

Parameters:

Name Description
updates Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting trainable=False
on this Layer, when executing in Eager mode.
inputs Deprecated, will be automatically inferred.
View Source
  @doc_controls.do_not_doc_inheritable

  def add_update(self, updates, inputs=None):

    """Add update op(s), potentially dependent on layer inputs.

    Weight updates (for instance, the updates of the moving mean and variance

    in a BatchNormalization layer) may be dependent on the inputs passed

    when calling a layer. Hence, when reusing the same layer on

    different inputs `a` and `b`, some entries in `layer.updates` may be

    dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This call is ignored when eager execution is enabled (in that case, variable

    updates are run on the fly and thus do not need to be tracked for later

    execution).

    Arguments:

      updates: Update op, or list/tuple of update ops, or zero-arg callable

        that returns an update op. A zero-arg callable should be passed in

        order to disable running the updates by setting `trainable=False`

        on this Layer, when executing in Eager mode.

      inputs: Deprecated, will be automatically inferred.

    """

    if inputs is not None:

      tf_logging.warning(

          '`add_update` `inputs` kwarg has been deprecated. You no longer need '

          'to pass a value to `inputs` as it is being automatically inferred.')

    call_context = base_layer_utils.call_context()

    # No need to run updates during Functional API construction.

    if call_context.in_keras_graph:

      return

    # Callable updates are disabled by setting `trainable=False`.

    if not call_context.frozen:

      for update in nest.flatten(updates):

        if callable(update):

          update()  # pylint: disable=not-callable

add_variable

def add_variable(
    self,
    *args,
    **kwargs
)

Deprecated, do NOT use! Alias for add_weight.

View Source
  @doc_controls.do_not_doc_inheritable

  def add_variable(self, *args, **kwargs):

    """Deprecated, do NOT use! Alias for `add_weight`."""

    warnings.warn('`layer.add_variable` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.add_weight` method instead.')

    return self.add_weight(*args, **kwargs)

add_weight

def add_weight(
    self,
    name=None,
    shape=None,
    dtype=None,
    initializer=None,
    regularizer=None,
    trainable=None,
    constraint=None,
    use_resource=None,
    synchronization=<VariableSynchronization.AUTO: 0>,
    aggregation=<VariableAggregation.NONE: 0>,
    **kwargs
)

Adds a new variable to the layer.

Parameters:

Name Description
name Variable name.
shape Variable shape. Defaults to scalar if unspecified.
dtype The type of the variable. Defaults to self.dtype.
initializer Initializer instance (callable).
regularizer Regularizer instance (callable).
trainable Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that trainable cannot be True if synchronization
is set to ON_READ.
constraint Constraint instance (callable).
use_resource Whether to use ResourceVariable.
synchronization Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to
AUTO and the current DistributionStrategy chooses
when to synchronize. If synchronization is set to ON_READ,
trainable must not be set to True.
aggregation Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableAggregation.
**kwargs Additional keyword arguments. Accepted values are getter,
collections, experimental_autocast and caching_device.

Returns:

Type Description
None The variable created.

Raises:

Type Description
ValueError When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as ON_READ.
View Source
  @doc_controls.for_subclass_implementers

  def add_weight(self,

                 name=None,

                 shape=None,

                 dtype=None,

                 initializer=None,

                 regularizer=None,

                 trainable=None,

                 constraint=None,

                 use_resource=None,

                 synchronization=tf_variables.VariableSynchronization.AUTO,

                 aggregation=tf_variables.VariableAggregation.NONE,

                 **kwargs):

    """Adds a new variable to the layer.

    Arguments:

      name: Variable name.

      shape: Variable shape. Defaults to scalar if unspecified.

      dtype: The type of the variable. Defaults to `self.dtype`.

      initializer: Initializer instance (callable).

      regularizer: Regularizer instance (callable).

      trainable: Boolean, whether the variable should be part of the layer's

        "trainable_variables" (e.g. variables, biases)

        or "non_trainable_variables" (e.g. BatchNorm mean and variance).

        Note that `trainable` cannot be `True` if `synchronization`

        is set to `ON_READ`.

      constraint: Constraint instance (callable).

      use_resource: Whether to use `ResourceVariable`.

      synchronization: Indicates when a distributed a variable will be

        aggregated. Accepted values are constants defined in the class

        `tf.VariableSynchronization`. By default the synchronization is set to

        `AUTO` and the current `DistributionStrategy` chooses

        when to synchronize. If `synchronization` is set to `ON_READ`,

        `trainable` must not be set to `True`.

      aggregation: Indicates how a distributed variable will be aggregated.

        Accepted values are constants defined in the class

        `tf.VariableAggregation`.

      **kwargs: Additional keyword arguments. Accepted values are `getter`,

        `collections`, `experimental_autocast` and `caching_device`.

    Returns:

      The variable created.

    Raises:

      ValueError: When giving unsupported dtype and no initializer or when

        trainable has been set to True with synchronization set as `ON_READ`.

    """

    if shape is None:

      shape = ()

    kwargs.pop('partitioner', None)  # Ignored.

    # Validate optional keyword arguments.

    for kwarg in kwargs:

      if kwarg not in ['collections', 'experimental_autocast',

                       'caching_device', 'getter']:

        raise TypeError('Unknown keyword argument:', kwarg)

    collections_arg = kwargs.pop('collections', None)

    # 'experimental_autocast' can be set to False by the caller to indicate an

    # AutoCastVariable should never be created.

    autocast = kwargs.pop('experimental_autocast', True)

    # See the docstring for tf.Variable about the details for caching_device.

    caching_device = kwargs.pop('caching_device', None)

    if dtype is None:

      dtype = self.dtype or backend.floatx()

    dtype = dtypes.as_dtype(dtype)

    if self._dtype_policy.variable_dtype is None:

      # The policy is "_infer", so we infer the policy from the variable dtype.

      self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))

    initializer = initializers.get(initializer)

    regularizer = regularizers.get(regularizer)

    constraint = constraints.get(constraint)

    if synchronization == tf_variables.VariableSynchronization.ON_READ:

      if trainable:

        raise ValueError(

            'Synchronization value can be set to '

            'VariableSynchronization.ON_READ only for non-trainable variables. '

            'You have specified trainable=True and '

            'synchronization=VariableSynchronization.ON_READ.')

      else:

        # Set trainable to be false when variable is to be synced on read.

        trainable = False

    elif trainable is None:

      trainable = True

    # Initialize variable when no initializer provided

    if initializer is None:

      # If dtype is DT_FLOAT, provide a uniform unit scaling initializer

      if dtype.is_floating:

        initializer = initializers.get('glorot_uniform')

      # If dtype is DT_INT/DT_UINT, provide a default value `zero`

      # If dtype is DT_BOOL, provide a default value `FALSE`

      elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:

        initializer = initializers.get('zeros')

      # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?

      else:

        raise ValueError('An initializer for variable %s of type %s is required'

                         ' for layer %s' % (name, dtype.base_dtype, self.name))

    getter = kwargs.pop('getter', base_layer_utils.make_variable)

    if (autocast and

        self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype

        and dtype.is_floating):

      old_getter = getter

      # Wrap variable constructor to return an AutoCastVariable.

      def getter(*args, **kwargs):  # pylint: disable=function-redefined

        variable = old_getter(*args, **kwargs)

        return autocast_variable.create_autocast_variable(variable)

      # Also the caching_device does not work with the mixed precision API,

      # disable it if it is specified.

      # TODO(b/142020079): Reenable it once the bug is fixed.

      if caching_device is not None:

        tf_logging.warn('`caching_device` does not work with mixed precision '

                        'API. Ignoring user specified `caching_device`.')

        caching_device = None

    variable = self._add_variable_with_custom_getter(

        name=name,

        shape=shape,

        # TODO(allenl): a `make_variable` equivalent should be added as a

        # `Trackable` method.

        getter=getter,

        # Manage errors in Layer rather than Trackable.

        overwrite=True,

        initializer=initializer,

        dtype=dtype,

        constraint=constraint,

        trainable=trainable,

        use_resource=use_resource,

        collections=collections_arg,

        synchronization=synchronization,

        aggregation=aggregation,

        caching_device=caching_device)

    if regularizer is not None:

      # TODO(fchollet): in the future, this should be handled at the

      # level of variable creation, and weight regularization losses

      # should be variable attributes.

      name_in_scope = variable.name[:variable.name.find(':')]

      self._handle_weight_regularization(name_in_scope,

                                         variable,

                                         regularizer)

    if base_layer_utils.is_split_variable(variable):

      for v in variable:

        backend.track_variable(v)

        if trainable:

          self._trainable_weights.append(v)

        else:

          self._non_trainable_weights.append(v)

    else:

      backend.track_variable(variable)

      if trainable:

        self._trainable_weights.append(variable)

      else:

        self._non_trainable_weights.append(variable)

    return variable

apply

def apply(
    self,
    inputs,
    *args,
    **kwargs
)

Deprecated, do NOT use!

This is an alias of self.__call__.

Parameters:

Name Description
inputs Input tensor(s).
*args additional positional arguments to be passed to self.call.
**kwargs additional keyword arguments to be passed to self.call.

Returns:

Type Description
None Output tensor(s).
View Source
  @doc_controls.do_not_doc_inheritable

  def apply(self, inputs, *args, **kwargs):

    """Deprecated, do NOT use!

    This is an alias of `self.__call__`.

    Arguments:

      inputs: Input tensor(s).

      *args: additional positional arguments to be passed to `self.call`.

      **kwargs: additional keyword arguments to be passed to `self.call`.

    Returns:

      Output tensor(s).

    """

    warnings.warn('`layer.apply` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.__call__` method instead.')

    return self.__call__(inputs, *args, **kwargs)

call

def call(
    self,
    inputs,
    training=None
)

Perform an inference in training.

Parameters:

Name Description
inputs A dict with the following schema:
images: A Tensor of shape [batch_size, height, width, 3]
image_informations: A float32 Tensor of shape [batch_size, 2] where
the last dimension represents the original height and
width of the images (without the padding).

ground_truths: A dict
- BoxField.LABELS: A 3-D tensor of shape [batch_size, num_gt, num_classes],
- BoxField.BOXES: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]
- BoxField.LABELS: A 3-D tensor of int32 and shape [batch_size, num_gt]
- BoxField.WEIGHTS: A 3-D tensor of float and shape [batch_size, num_gt]
- BoxField.NUM_BOXES: A 2-D tensor of int32 and shape [batch_size, 1]
which allows to remove the padding created by tf.Data.
Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)
then my second box has a padding of 1
training Is automatically set to True in train and test mode
(normally test should be at false). Why? Through the call we the losses and the metrics
of the rpn and fast_rcnn. They are automatically added with add_loss and add_metrics.
In test we want to benefit from those and therefore we compute them. It is an inheritance
from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the
train_step and test_step. However for now this method benefit of the encapsulation of
the self.compiled_loss method.

Returns:

Type Description
Tuple - classification_pred: A Tensor of shape [batch_size, num_boxes, num_classes]
representing the class probability.
- localization_pred: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]
- anchors: A Tensor of shape [batch_size, num_boxes, 4]
View Source
    def call(self, inputs, training=None):

        """Perform an inference in training.

        Arguments:

            inputs: A dict with the following schema:

                `images`: A Tensor of shape [batch_size, height, width, 3]

                `image_informations`: A float32 Tensor of shape [batch_size, 2] where

                    the last dimension represents the original height and

                    width of the images (without the padding).

                `ground_truths`: A dict

                    - `BoxField.LABELS`: A 3-D tensor of shape [batch_size, num_gt, num_classes],

                    - `BoxField.BOXES`: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]

                    - `BoxField.LABELS`: A 3-D tensor of int32 and shape [batch_size, num_gt]

                    - `BoxField.WEIGHTS`: A 3-D tensor of float and shape [batch_size, num_gt]

                    - `BoxField.NUM_BOXES`: A 2-D tensor of int32 and shape [batch_size, 1]

                        which allows to remove the padding created by tf.Data.

                        Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)

                        then my second box has a padding of 1

            training: Is automatically set to `True` in train and test mode

                (normally test should be at false). Why? Through the call we the losses and the metrics

                of the rpn and fast_rcnn. They are automatically added with `add_loss` and `add_metrics`.

                In test we want to benefit from those and therefore we compute them. It is an inheritance

                from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the

                train_step and test_step. However for now this method benefit of the encapsulation of

                the `self.compiled_loss` method.

        Returns:

            Tuple:

                - `classification_pred`: A Tensor of shape [batch_size, num_boxes, num_classes]

                    representing the class probability.

                - `localization_pred`: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]

                - `anchors`: A Tensor of shape [batch_size, num_boxes, 4]

        """

        images = inputs[DatasetField.IMAGES]

        images_information = inputs[DatasetField.IMAGES_INFO]

        # The preprocessing dedicated to the backbone is done inside the model.

        x = self.backbone(images)

        pyramid = self.fpn(x)

        rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl = self.rpn(pyramid)

        if training and not self._serving:

            apply_kernel_regularization(self.l2, self.backbone)

            # add_loss stores the rpn losses computation in self.losses

            _ = self.rpn.compute_loss(rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl,

                                      inputs['ground_truths'])

        num_boxes = 2000 if training else 1000

        rois = post_process_rpn(rpn_cls_pred_per_lvl,

                                rpn_loc_pred_per_lvl,

                                anchors_per_lvl,

                                images_information,

                                pre_nms_topk_per_lvl=num_boxes,

                                post_nms_topk=num_boxes)

        if training and not self._serving:

            ground_truths = inputs['ground_truths']

            # Include the ground_truths as RoIs for the training

            rois = tf.concat([tf.cast(rois, self._compute_dtype), ground_truths[BoxField.BOXES]],

                             axis=1)

            # Sample the boxes needed for inference

            y_true, weights, rois = self.fast_rcnn.sample_boxes(rois, ground_truths)

        classification_pred, localization_pred = self.fast_rcnn([pyramid, rois])

        if training and not self._serving:

            # add_loss stores the fast_rcnn losses computation in self.losses

            _ = self.fast_rcnn.compute_loss(y_true, weights, classification_pred, localization_pred)

        classification_pred = tf.nn.softmax(classification_pred)

        return classification_pred, localization_pred, rois

compute_mask

def compute_mask(
    self,
    inputs,
    mask=None
)

Computes an output mask tensor.

Parameters:

Name Description
inputs Tensor or list of tensors.
mask Tensor or list of tensors.

Returns:

Type Description
None None or a tensor (or list of tensors,
one per output tensor of the layer).
View Source
  @generic_utils.default

  def compute_mask(self, inputs, mask=None):  # pylint: disable=unused-argument

    """Computes an output mask tensor.

    Arguments:

        inputs: Tensor or list of tensors.

        mask: Tensor or list of tensors.

    Returns:

        None or a tensor (or list of tensors,

            one per output tensor of the layer).

    """

    if not self._supports_masking:

      if any(m is not None for m in nest.flatten(mask)):

        raise TypeError('Layer ' + self.name + ' does not support masking, '

                        'but was passed an input_mask: ' + str(mask))

      # masking not explicitly supported: return None as mask.

      return None

    # if masking is explicitly supported, by default

    # carry over the input mask

    return mask

compute_output_shape

def compute_output_shape(
    self,
    input_shape
)

Computes the output shape of the layer.

If the layer has not been built, this method will call build on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here.

Parameters:

Name Description
input_shape Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.

Returns:

Type Description
None An input shape tuple.
View Source
  def compute_output_shape(self, input_shape):

    """Computes the output shape of the layer.

    If the layer has not been built, this method will call `build` on the

    layer. This assumes that the layer will later be used with inputs that

    match the input shape provided here.

    Arguments:

        input_shape: Shape tuple (tuple of integers)

            or list of shape tuples (one per output tensor of the layer).

            Shape tuples can include None for free dimensions,

            instead of an integer.

    Returns:

        An input shape tuple.

    """

    if context.executing_eagerly():

      # In this case we build the model first in order to do shape inference.

      # This is acceptable because the framework only calls

      # `compute_output_shape` on shape values that the layer would later be

      # built for. It would however cause issues in case a user attempts to

      # use `compute_output_shape` manually with shapes that are incompatible

      # with the shape the Layer will be called on (these users will have to

      # implement `compute_output_shape` themselves).

      self._maybe_build(input_shape)

      with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():

        input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)

        def _make_placeholder_like(shape):

          ph = backend.placeholder(shape=shape, dtype=self.dtype)

          ph._keras_mask = None

          return ph

        inputs = nest.map_structure(_make_placeholder_like, input_shape)

        try:

          outputs = self(inputs, training=False)

        except TypeError as e:

          six.raise_from(

              NotImplementedError(

                  'We could not automatically infer the static shape of the '

                  'layer\'s output. Please implement the '

                  '`compute_output_shape` method on your layer (%s).' %

                  self.__class__.__name__), e)

      return nest.map_structure(lambda t: t.shape, outputs)

    raise NotImplementedError(

        'Please run in eager mode or implement the `compute_output_shape` '

        'method on your layer (%s).' % self.__class__.__name__)

compute_output_signature

def compute_output_signature(
    self,
    input_signature
)

Compute the output tensor signature of the layer based on the inputs.

Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use compute_output_shape, and will assume that the output dtype matches the input dtype.

Parameters:

Name Description
input_signature Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.

Returns:

Type Description
None Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.

Raises:

Type Description
TypeError If input_signature contains a non-TensorSpec object.
View Source
  @doc_controls.for_subclass_implementers

  def compute_output_signature(self, input_signature):

    """Compute the output tensor signature of the layer based on the inputs.

    Unlike a TensorShape object, a TensorSpec object contains both shape

    and dtype information for a tensor. This method allows layers to provide

    output dtype information if it is different from the input dtype.

    For any layer that doesn't implement this function,

    the framework will fall back to use `compute_output_shape`, and will

    assume that the output dtype matches the input dtype.

    Args:

      input_signature: Single TensorSpec or nested structure of TensorSpec

        objects, describing a candidate input for the layer.

    Returns:

      Single TensorSpec or nested structure of TensorSpec objects, describing

        how the layer would transform the provided input.

    Raises:

      TypeError: If input_signature contains a non-TensorSpec object.

    """

    def check_type_return_shape(s):

      if not isinstance(s, tensor_spec.TensorSpec):

        raise TypeError(

            'Only TensorSpec signature types are supported, '

            'but saw signature signature entry: {}.'.format(s))

      return s.shape

    input_shape = nest.map_structure(check_type_return_shape, input_signature)

    output_shape = self.compute_output_shape(input_shape)

    dtype = self._compute_dtype

    if dtype is None:

      input_dtypes = [s.dtype for s in nest.flatten(input_signature)]

      # Default behavior when self.dtype is None, is to use the first input's

      # dtype.

      dtype = input_dtypes[0]

    return nest.map_structure(

        lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),

        output_shape)

count_params

def count_params(
    self
)

Count the total number of scalars composing the weights.

Returns:

Type Description
None An integer count.

Raises:

Type Description
ValueError if the layer isn't yet built
(in which case its weights aren't yet defined).
View Source
  def count_params(self):

    """Count the total number of scalars composing the weights.

    Returns:

        An integer count.

    Raises:

        ValueError: if the layer isn't yet built

          (in which case its weights aren't yet defined).

    """

    if not self.built:

      if getattr(self, '_is_graph_network', False):

        with tf_utils.maybe_init_scope(self):

          self._maybe_build(self.inputs)

      else:

        raise ValueError('You tried to call `count_params` on ' + self.name +

                         ', but the layer isn\'t built. '

                         'You can build it manually via: `' + self.name +

                         '.build(batch_input_shape)`.')

    return layer_utils.count_params(self.weights)

export_for_serving

def export_for_serving(
    self,
    filepath
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow when the training arguments is defined int the method call, tf.save_model.save method performs a check on the graph for training=False and training=True. However, we don't want this check to be perform because our ground_truths inputs aren't defined.

View Source
    def export_for_serving(self, filepath):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow

        when the `training` arguments is defined int the method `call`, `tf.save_model.save` method

        performs a check on the graph for training=False and training=True.

        However, we don't want this check to be perform because our ground_truths inputs aren't defined.

        """

        self._serving = True

        call_output = self.serving_step.get_concrete_function()

        tf.saved_model.save(self, filepath, signatures={'serving_default': call_output})

        self._serving = False

get_input_at

def get_input_at(
    self,
    node_index
)

Retrieves the input tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_at(self, node_index):

    """Retrieves the input tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_tensors',

                                             'input')

get_input_mask_at

def get_input_mask_at(
    self,
    node_index
)

Retrieves the input mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple inputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_mask_at(self, node_index):

    """Retrieves the input mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple inputs).

    """

    inputs = self.get_input_at(node_index)

    if isinstance(inputs, list):

      return [getattr(x, '_keras_mask', None) for x in inputs]

    else:

      return getattr(inputs, '_keras_mask', None)

get_input_shape_at

def get_input_shape_at(
    self,
    node_index
)

Retrieves the input shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_shape_at(self, node_index):

    """Retrieves the input shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_shapes',

                                             'input shape')

get_losses_for

def get_losses_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves losses relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of loss tensors of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_losses_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves losses relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of loss tensors of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_losses_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.losses` instead.')

    return self.losses

get_output_at

def get_output_at(
    self,
    node_index
)

Retrieves the output tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_at(self, node_index):

    """Retrieves the output tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_tensors',

                                             'output')

get_output_mask_at

def get_output_mask_at(
    self,
    node_index
)

Retrieves the output mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple outputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_mask_at(self, node_index):

    """Retrieves the output mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple outputs).

    """

    output = self.get_output_at(node_index)

    if isinstance(output, list):

      return [getattr(x, '_keras_mask', None) for x in output]

    else:

      return getattr(output, '_keras_mask', None)

get_output_shape_at

def get_output_shape_at(
    self,
    node_index
)

Retrieves the output shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_shape_at(self, node_index):

    """Retrieves the output shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_shapes',

                                             'output shape')

get_updates_for

def get_updates_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves updates relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of update ops of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_updates_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves updates relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of update ops of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_updates_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.updates` method instead.')

    return self.updates

serving_step

def serving_step(
    self,
    images,
    images_info
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow absolutely want it and will return an exception if the ground_truth isn't provided.

View Source
    @tf.function(input_signature=[

        tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32, name=DatasetField.IMAGES),

        tf.TensorSpec(shape=(None, 2), dtype=tf.float32, name=DatasetField.IMAGES_INFO)

    ])

    def serving_step(self, images, images_info):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow

        absolutely want it and will return an exception if the ground_truth isn't provided.

        """

        return self.predict_step({

            DatasetField.IMAGES: images,

            DatasetField.IMAGES_INFO: images_info

        })

set_weights

def set_weights(
    self,
    weights
)

Sets the weights of the layer, from Numpy arrays.

The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function by calling the layer.

For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer:

a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] b.set_weights(a.get_weights()) b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)]

Parameters:

Name Description
weights a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of get_weights).

Raises:

Type Description
ValueError If the provided weights list does not match the
layer's specifications.
View Source
  def set_weights(self, weights):

    """Sets the weights of the layer, from Numpy arrays.

    The weights of a layer represent the state of the layer. This function

    sets the weight values from numpy arrays. The weight values should be

    passed in the order they are created by the layer. Note that the layer's

    weights must be instantiated before calling this function by calling

    the layer.

    For example, a Dense layer returns a list of two values-- per-output

    weights and the bias value. These can be used to set the weights of another

    Dense layer:

    >>> a = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(1.))

    >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))

    >>> a.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    >>> b = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(2.))

    >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))

    >>> b.get_weights()

    [array([[2.],

           [2.],

           [2.]], dtype=float32), array([0.], dtype=float32)]

    >>> b.set_weights(a.get_weights())

    >>> b.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    Arguments:

        weights: a list of Numpy arrays. The number

            of arrays and their shape must match

            number of the dimensions of the weights

            of the layer (i.e. it should match the

            output of `get_weights`).

    Raises:

        ValueError: If the provided weights list does not match the

            layer's specifications.

    """

    params = self.weights

    expected_num_weights = 0

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        expected_num_weights += param.num_tensors

      else:

        expected_num_weights += 1

    if expected_num_weights != len(weights):

      raise ValueError(

          'You called `set_weights(weights)` on layer "%s" '

          'with a weight list of length %s, but the layer was '

          'expecting %s weights. Provided weights: %s...' %

          (self.name, len(weights), expected_num_weights, str(weights)[:50]))

    weight_index = 0

    weight_value_tuples = []

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        num_tensors = param.num_tensors

        tensors = weights[weight_index:weight_index + num_tensors]

        param.set_weights(tensors)

        weight_index += num_tensors

      else:

        weight = weights[weight_index]

        ref_shape = param.shape

        if not ref_shape.is_compatible_with(weight.shape):

          raise ValueError(

              'Layer weight shape %s not compatible with provided weight '

              'shape %s' % (ref_shape, weight.shape))

        weight_value_tuples.append((param, weight))

        weight_index += 1

    backend.batch_set_value(weight_value_tuples)

FasterRcnnFPNResnet50Caffe

class FasterRcnnFPNResnet50Caffe(
    num_classes,
    **kwargs
)

You can use it as follow:

model_faster_rcnn = FasterRcnnFPNResnet50(80)
base_lr = 0.1
optimizer = tf.keras.optimizers.SGD(learning_rate=base_lr)
model_faster_rcnn.compile(optimizer=optimizer, loss=None)
model_faster_rcnn.fit(ds_train, validation_data=ds_test, epochs=11,)

Arguments

Name Description
num_classes The number of classes of your dataset
(do not include the background class it is handle for you)
backbone A tensorflow Model.

Ancestors (in MRO)

  • kerod.model.faster_rcnn.FasterRcnnFPN
  • tensorflow.python.keras.engine.training.Model
  • tensorflow.python.keras.engine.base_layer.Layer
  • tensorflow.python.module.module.Module
  • tensorflow.python.training.tracking.tracking.AutoTrackable
  • tensorflow.python.training.tracking.base.Trackable
  • tensorflow.python.keras.utils.version_utils.LayerVersionSelector
  • tensorflow.python.keras.utils.version_utils.ModelVersionSelector

Methods

add_loss

def add_loss(
    self,
    losses,
    **kwargs
)

Add loss tensor(s), potentially dependent on layer inputs.

Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.losses may be dependent on a and some on b. This method automatically keeps track of dependencies.

This method can be used inside a subclassed layer or model's call function, in which case losses should be a Tensor or list of Tensors.

Example:

class MyLayer(tf.keras.layers.Layer):
  def call(self, inputs):
    self.add_loss(tf.abs(tf.reduce_mean(inputs)))
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These losses become part of the model's topology and are tracked in get_config.

Example:

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))

If this is not the case for your loss (if, for example, your loss references a Variable of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized.

Example:

inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))

Arguments: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. **kwargs: Additional keyword arguments for backward compatibility. Accepted values: inputs - Deprecated, will be automatically inferred.

View Source
  def add_loss(self, losses, **kwargs):

    """Add loss tensor(s), potentially dependent on layer inputs.

    Some losses (for instance, activity regularization losses) may be dependent

    on the inputs passed when calling a layer. Hence, when reusing the same

    layer on different inputs `a` and `b`, some entries in `layer.losses` may

    be dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This method can be used inside a subclassed layer or model's `call`

    function, in which case `losses` should be a Tensor or list of Tensors.

    Example:

    ```python

    class MyLayer(tf.keras.layers.Layer):

      def call(self, inputs):

        self.add_loss(tf.abs(tf.reduce_mean(inputs)))

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any loss Tensors passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    losses become part of the model's topology and are tracked in `get_config`.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Activity regularization.

    model.add_loss(tf.abs(tf.reduce_mean(x)))

    ```

    If this is not the case for your loss (if, for example, your loss references

    a `Variable` of one of the model's layers), you can wrap your loss in a

    zero-argument lambda. These losses are not tracked as part of the model's

    topology since they can't be serialized.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    d = tf.keras.layers.Dense(10)

    x = d(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Weight regularization.

    model.add_loss(lambda: tf.reduce_mean(d.kernel))

    ```

    Arguments:

      losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses

        may also be zero-argument callables which create a loss tensor.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

          inputs - Deprecated, will be automatically inferred.

    """

    kwargs.pop('inputs', None)

    if kwargs:

      raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))

    def _tag_callable(loss):

      """Tags callable loss tensor as `_unconditional_loss`."""

      if callable(loss):

        # We run the loss without autocasting, as regularizers are often

        # numerically unstable in float16.

        with autocast_variable.enable_auto_cast_variables(None):

          loss = loss()

      if loss is None:

        return None  # Will be filtered out when computing the .losses property

      if not tensor_util.is_tensor(loss):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      loss._unconditional_loss = True  # pylint: disable=protected-access

      return loss

    losses = nest.flatten(losses)

    callable_losses = []

    eager_losses = []

    symbolic_losses = []

    for loss in losses:

      if callable(loss):

        callable_losses.append(functools.partial(_tag_callable, loss))

        continue

      if loss is None:

        continue

      if not tensor_util.is_tensor(loss) and not isinstance(

          loss, keras_tensor.KerasTensor):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      # TF Functions should take the eager path.

      if ((tf_utils.is_symbolic_tensor(loss) or

           isinstance(loss, keras_tensor.KerasTensor)) and

          not base_layer_utils.is_in_tf_function()):

        symbolic_losses.append(loss)

      elif tensor_util.is_tensor(loss):

        eager_losses.append(loss)

    self._callable_losses.extend(callable_losses)

    in_call_context = base_layer_utils.call_context().in_call

    if eager_losses and not in_call_context:

      raise ValueError(

          'Expected a symbolic Tensors or a callable for the loss value. '

          'Please wrap your loss computation in a zero argument `lambda`.')

    self._eager_losses.extend(eager_losses)

    if in_call_context and not keras_tensor.keras_tensors_enabled():

      for symbolic_loss in symbolic_losses:

        self._losses.append(symbolic_loss)

    else:

      for symbolic_loss in symbolic_losses:

        if getattr(self, '_is_graph_network', False):

          self._graph_network_add_loss(symbolic_loss)

        else:

          # Possible a loss was added in a Layer's `build`.

          self._losses.append(symbolic_loss)

add_metric

def add_metric(
    self,
    value,
    name=None,
    **kwargs
)

Adds metric tensor to the layer.

This method can be used inside the call() method of a subclassed layer or model.

class MyMetricLayer(tf.keras.layers.Layer):
  def __init__(self):
    super(MyMetricLayer, self).__init__(name='my_metric_layer')
    self.mean = tf.keras.metrics.Mean(name='metric_1')

  def call(self, inputs):
    self.add_metric(self.mean(x))
    self.add_metric(tf.reduce_sum(x), name='metric_2')
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any tensor passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These metrics become part of the model's topology and are tracked when you save the model via save().

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')

Note: Calling add_metric() with the result of a metric object on a Functional Model, as shown in the example below, is not supported. This is because we cannot trace the metric result tensor back to the model's inputs.

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

Parameters:

Name Description
value Metric tensor.
name String metric name.
**kwargs Additional keyword arguments for backward compatibility.
Accepted values:
aggregation - When the value tensor provided is not the result of
calling a keras.Metric instance, it will be aggregated by default
using a keras.Metric.Mean.
View Source
  def add_metric(self, value, name=None, **kwargs):

    """Adds metric tensor to the layer.

    This method can be used inside the `call()` method of a subclassed layer

    or model.

    ```python

    class MyMetricLayer(tf.keras.layers.Layer):

      def __init__(self):

        super(MyMetricLayer, self).__init__(name='my_metric_layer')

        self.mean = tf.keras.metrics.Mean(name='metric_1')

      def call(self, inputs):

        self.add_metric(self.mean(x))

        self.add_metric(tf.reduce_sum(x), name='metric_2')

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any tensor passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    metrics become part of the model's topology and are tracked when you

    save the model via `save()`.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(math_ops.reduce_sum(x), name='metric_1')

    ```

    Note: Calling `add_metric()` with the result of a metric object on a

    Functional Model, as shown in the example below, is not supported. This is

    because we cannot trace the metric result tensor back to the model's inputs.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

    ```

    Args:

      value: Metric tensor.

      name: String metric name.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

        `aggregation` - When the `value` tensor provided is not the result of

        calling a `keras.Metric` instance, it will be aggregated by default

        using a `keras.Metric.Mean`.

    """

    kwargs_keys = list(kwargs.keys())

    if (len(kwargs_keys) > 1 or

        (len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):

      raise TypeError('Unknown keyword arguments: ', str(kwargs.keys()))

    from_metric_obj = hasattr(value, '_metric_obj')

    if keras_tensor.keras_tensors_enabled():

      is_symbolic = isinstance(value, keras_tensor.KerasTensor)

    else:

      is_symbolic = tf_utils.is_symbolic_tensor(value)

    in_call_context = base_layer_utils.call_context().in_call

    if name is None and not from_metric_obj:

      # Eg. `self.add_metric(math_ops.reduce_sum(x))`

      # In eager mode, we use metric name to lookup a metric. Without a name,

      # a new Mean metric wrapper will be created on every model/layer call.

      # So, we raise an error when no name is provided.

      # We will do the same for symbolic mode for consistency although a name

      # will be generated if no name is provided.

      # We will not raise this error in the foll use case for the sake of

      # consistency as name in provided in the metric constructor.

      # mean = metrics.Mean(name='my_metric')

      # model.add_metric(mean(outputs))

      raise ValueError('Please provide a name for your metric like '

                       '`self.add_metric(tf.reduce_sum(inputs), '

                       'name=\'mean_activation\')`')

    elif from_metric_obj:

      name = value._metric_obj.name

    if not in_call_context and not is_symbolic:

      raise ValueError('Expected a symbolic Tensor for the metric value, '

                       'received: ' + str(value))

    # If a metric was added in a Layer's `call` or `build`.

    if in_call_context or not getattr(self, '_is_graph_network', False):

      # TF Function path should take the eager path.

      # If the given metric is available in `metrics` list we just update state

      # on it, otherwise we create a new metric instance and

      # add it to the `metrics` list.

      metric_obj = getattr(value, '_metric_obj', None)

      # Tensors that come from a Metric object already updated the Metric state.

      should_update_state = not metric_obj

      name = metric_obj.name if metric_obj else name

      with self._metrics_lock:

        match = self._get_existing_metric(name)

        if match:

          metric_obj = match

        elif metric_obj:

          self._metrics.append(metric_obj)

        else:

          # Build the metric object with the value's dtype if it defines one

          metric_obj = metrics_mod.Mean(

              name=name, dtype=getattr(value, 'dtype', None))

          self._metrics.append(metric_obj)

      if should_update_state:

        metric_obj(value)

    else:

      if from_metric_obj:

        raise ValueError('Using the result of calling a `Metric` object '

                         'when calling `add_metric` on a Functional '

                         'Model is not supported. Please pass the '

                         'Tensor to monitor directly.')

      # Insert layers into the Keras Graph Network.

      aggregation = None if from_metric_obj else 'mean'

      self._graph_network_add_metric(value, aggregation, name)

add_update

def add_update(
    self,
    updates,
    inputs=None
)

Add update op(s), potentially dependent on layer inputs.

Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.updates may be dependent on a and some on b. This method automatically keeps track of dependencies.

This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution).

Parameters:

Name Description
updates Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting trainable=False
on this Layer, when executing in Eager mode.
inputs Deprecated, will be automatically inferred.
View Source
  @doc_controls.do_not_doc_inheritable

  def add_update(self, updates, inputs=None):

    """Add update op(s), potentially dependent on layer inputs.

    Weight updates (for instance, the updates of the moving mean and variance

    in a BatchNormalization layer) may be dependent on the inputs passed

    when calling a layer. Hence, when reusing the same layer on

    different inputs `a` and `b`, some entries in `layer.updates` may be

    dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This call is ignored when eager execution is enabled (in that case, variable

    updates are run on the fly and thus do not need to be tracked for later

    execution).

    Arguments:

      updates: Update op, or list/tuple of update ops, or zero-arg callable

        that returns an update op. A zero-arg callable should be passed in

        order to disable running the updates by setting `trainable=False`

        on this Layer, when executing in Eager mode.

      inputs: Deprecated, will be automatically inferred.

    """

    if inputs is not None:

      tf_logging.warning(

          '`add_update` `inputs` kwarg has been deprecated. You no longer need '

          'to pass a value to `inputs` as it is being automatically inferred.')

    call_context = base_layer_utils.call_context()

    # No need to run updates during Functional API construction.

    if call_context.in_keras_graph:

      return

    # Callable updates are disabled by setting `trainable=False`.

    if not call_context.frozen:

      for update in nest.flatten(updates):

        if callable(update):

          update()  # pylint: disable=not-callable

add_variable

def add_variable(
    self,
    *args,
    **kwargs
)

Deprecated, do NOT use! Alias for add_weight.

View Source
  @doc_controls.do_not_doc_inheritable

  def add_variable(self, *args, **kwargs):

    """Deprecated, do NOT use! Alias for `add_weight`."""

    warnings.warn('`layer.add_variable` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.add_weight` method instead.')

    return self.add_weight(*args, **kwargs)

add_weight

def add_weight(
    self,
    name=None,
    shape=None,
    dtype=None,
    initializer=None,
    regularizer=None,
    trainable=None,
    constraint=None,
    use_resource=None,
    synchronization=<VariableSynchronization.AUTO: 0>,
    aggregation=<VariableAggregation.NONE: 0>,
    **kwargs
)

Adds a new variable to the layer.

Parameters:

Name Description
name Variable name.
shape Variable shape. Defaults to scalar if unspecified.
dtype The type of the variable. Defaults to self.dtype.
initializer Initializer instance (callable).
regularizer Regularizer instance (callable).
trainable Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that trainable cannot be True if synchronization
is set to ON_READ.
constraint Constraint instance (callable).
use_resource Whether to use ResourceVariable.
synchronization Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to
AUTO and the current DistributionStrategy chooses
when to synchronize. If synchronization is set to ON_READ,
trainable must not be set to True.
aggregation Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableAggregation.
**kwargs Additional keyword arguments. Accepted values are getter,
collections, experimental_autocast and caching_device.

Returns:

Type Description
None The variable created.

Raises:

Type Description
ValueError When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as ON_READ.
View Source
  @doc_controls.for_subclass_implementers

  def add_weight(self,

                 name=None,

                 shape=None,

                 dtype=None,

                 initializer=None,

                 regularizer=None,

                 trainable=None,

                 constraint=None,

                 use_resource=None,

                 synchronization=tf_variables.VariableSynchronization.AUTO,

                 aggregation=tf_variables.VariableAggregation.NONE,

                 **kwargs):

    """Adds a new variable to the layer.

    Arguments:

      name: Variable name.

      shape: Variable shape. Defaults to scalar if unspecified.

      dtype: The type of the variable. Defaults to `self.dtype`.

      initializer: Initializer instance (callable).

      regularizer: Regularizer instance (callable).

      trainable: Boolean, whether the variable should be part of the layer's

        "trainable_variables" (e.g. variables, biases)

        or "non_trainable_variables" (e.g. BatchNorm mean and variance).

        Note that `trainable` cannot be `True` if `synchronization`

        is set to `ON_READ`.

      constraint: Constraint instance (callable).

      use_resource: Whether to use `ResourceVariable`.

      synchronization: Indicates when a distributed a variable will be

        aggregated. Accepted values are constants defined in the class

        `tf.VariableSynchronization`. By default the synchronization is set to

        `AUTO` and the current `DistributionStrategy` chooses

        when to synchronize. If `synchronization` is set to `ON_READ`,

        `trainable` must not be set to `True`.

      aggregation: Indicates how a distributed variable will be aggregated.

        Accepted values are constants defined in the class

        `tf.VariableAggregation`.

      **kwargs: Additional keyword arguments. Accepted values are `getter`,

        `collections`, `experimental_autocast` and `caching_device`.

    Returns:

      The variable created.

    Raises:

      ValueError: When giving unsupported dtype and no initializer or when

        trainable has been set to True with synchronization set as `ON_READ`.

    """

    if shape is None:

      shape = ()

    kwargs.pop('partitioner', None)  # Ignored.

    # Validate optional keyword arguments.

    for kwarg in kwargs:

      if kwarg not in ['collections', 'experimental_autocast',

                       'caching_device', 'getter']:

        raise TypeError('Unknown keyword argument:', kwarg)

    collections_arg = kwargs.pop('collections', None)

    # 'experimental_autocast' can be set to False by the caller to indicate an

    # AutoCastVariable should never be created.

    autocast = kwargs.pop('experimental_autocast', True)

    # See the docstring for tf.Variable about the details for caching_device.

    caching_device = kwargs.pop('caching_device', None)

    if dtype is None:

      dtype = self.dtype or backend.floatx()

    dtype = dtypes.as_dtype(dtype)

    if self._dtype_policy.variable_dtype is None:

      # The policy is "_infer", so we infer the policy from the variable dtype.

      self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))

    initializer = initializers.get(initializer)

    regularizer = regularizers.get(regularizer)

    constraint = constraints.get(constraint)

    if synchronization == tf_variables.VariableSynchronization.ON_READ:

      if trainable:

        raise ValueError(

            'Synchronization value can be set to '

            'VariableSynchronization.ON_READ only for non-trainable variables. '

            'You have specified trainable=True and '

            'synchronization=VariableSynchronization.ON_READ.')

      else:

        # Set trainable to be false when variable is to be synced on read.

        trainable = False

    elif trainable is None:

      trainable = True

    # Initialize variable when no initializer provided

    if initializer is None:

      # If dtype is DT_FLOAT, provide a uniform unit scaling initializer

      if dtype.is_floating:

        initializer = initializers.get('glorot_uniform')

      # If dtype is DT_INT/DT_UINT, provide a default value `zero`

      # If dtype is DT_BOOL, provide a default value `FALSE`

      elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:

        initializer = initializers.get('zeros')

      # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?

      else:

        raise ValueError('An initializer for variable %s of type %s is required'

                         ' for layer %s' % (name, dtype.base_dtype, self.name))

    getter = kwargs.pop('getter', base_layer_utils.make_variable)

    if (autocast and

        self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype

        and dtype.is_floating):

      old_getter = getter

      # Wrap variable constructor to return an AutoCastVariable.

      def getter(*args, **kwargs):  # pylint: disable=function-redefined

        variable = old_getter(*args, **kwargs)

        return autocast_variable.create_autocast_variable(variable)

      # Also the caching_device does not work with the mixed precision API,

      # disable it if it is specified.

      # TODO(b/142020079): Reenable it once the bug is fixed.

      if caching_device is not None:

        tf_logging.warn('`caching_device` does not work with mixed precision '

                        'API. Ignoring user specified `caching_device`.')

        caching_device = None

    variable = self._add_variable_with_custom_getter(

        name=name,

        shape=shape,

        # TODO(allenl): a `make_variable` equivalent should be added as a

        # `Trackable` method.

        getter=getter,

        # Manage errors in Layer rather than Trackable.

        overwrite=True,

        initializer=initializer,

        dtype=dtype,

        constraint=constraint,

        trainable=trainable,

        use_resource=use_resource,

        collections=collections_arg,

        synchronization=synchronization,

        aggregation=aggregation,

        caching_device=caching_device)

    if regularizer is not None:

      # TODO(fchollet): in the future, this should be handled at the

      # level of variable creation, and weight regularization losses

      # should be variable attributes.

      name_in_scope = variable.name[:variable.name.find(':')]

      self._handle_weight_regularization(name_in_scope,

                                         variable,

                                         regularizer)

    if base_layer_utils.is_split_variable(variable):

      for v in variable:

        backend.track_variable(v)

        if trainable:

          self._trainable_weights.append(v)

        else:

          self._non_trainable_weights.append(v)

    else:

      backend.track_variable(variable)

      if trainable:

        self._trainable_weights.append(variable)

      else:

        self._non_trainable_weights.append(variable)

    return variable

apply

def apply(
    self,
    inputs,
    *args,
    **kwargs
)

Deprecated, do NOT use!

This is an alias of self.__call__.

Parameters:

Name Description
inputs Input tensor(s).
*args additional positional arguments to be passed to self.call.
**kwargs additional keyword arguments to be passed to self.call.

Returns:

Type Description
None Output tensor(s).
View Source
  @doc_controls.do_not_doc_inheritable

  def apply(self, inputs, *args, **kwargs):

    """Deprecated, do NOT use!

    This is an alias of `self.__call__`.

    Arguments:

      inputs: Input tensor(s).

      *args: additional positional arguments to be passed to `self.call`.

      **kwargs: additional keyword arguments to be passed to `self.call`.

    Returns:

      Output tensor(s).

    """

    warnings.warn('`layer.apply` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.__call__` method instead.')

    return self.__call__(inputs, *args, **kwargs)

call

def call(
    self,
    inputs,
    training=None
)

Perform an inference in training.

Parameters:

Name Description
inputs A dict with the following schema:
images: A Tensor of shape [batch_size, height, width, 3]
image_informations: A float32 Tensor of shape [batch_size, 2] where
the last dimension represents the original height and
width of the images (without the padding).

ground_truths: A dict
- BoxField.LABELS: A 3-D tensor of shape [batch_size, num_gt, num_classes],
- BoxField.BOXES: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]
- BoxField.LABELS: A 3-D tensor of int32 and shape [batch_size, num_gt]
- BoxField.WEIGHTS: A 3-D tensor of float and shape [batch_size, num_gt]
- BoxField.NUM_BOXES: A 2-D tensor of int32 and shape [batch_size, 1]
which allows to remove the padding created by tf.Data.
Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)
then my second box has a padding of 1
training Is automatically set to True in train and test mode
(normally test should be at false). Why? Through the call we the losses and the metrics
of the rpn and fast_rcnn. They are automatically added with add_loss and add_metrics.
In test we want to benefit from those and therefore we compute them. It is an inheritance
from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the
train_step and test_step. However for now this method benefit of the encapsulation of
the self.compiled_loss method.

Returns:

Type Description
Tuple - classification_pred: A Tensor of shape [batch_size, num_boxes, num_classes]
representing the class probability.
- localization_pred: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]
- anchors: A Tensor of shape [batch_size, num_boxes, 4]
View Source
    def call(self, inputs, training=None):

        """Perform an inference in training.

        Arguments:

            inputs: A dict with the following schema:

                `images`: A Tensor of shape [batch_size, height, width, 3]

                `image_informations`: A float32 Tensor of shape [batch_size, 2] where

                    the last dimension represents the original height and

                    width of the images (without the padding).

                `ground_truths`: A dict

                    - `BoxField.LABELS`: A 3-D tensor of shape [batch_size, num_gt, num_classes],

                    - `BoxField.BOXES`: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]

                    - `BoxField.LABELS`: A 3-D tensor of int32 and shape [batch_size, num_gt]

                    - `BoxField.WEIGHTS`: A 3-D tensor of float and shape [batch_size, num_gt]

                    - `BoxField.NUM_BOXES`: A 2-D tensor of int32 and shape [batch_size, 1]

                        which allows to remove the padding created by tf.Data.

                        Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)

                        then my second box has a padding of 1

            training: Is automatically set to `True` in train and test mode

                (normally test should be at false). Why? Through the call we the losses and the metrics

                of the rpn and fast_rcnn. They are automatically added with `add_loss` and `add_metrics`.

                In test we want to benefit from those and therefore we compute them. It is an inheritance

                from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the

                train_step and test_step. However for now this method benefit of the encapsulation of

                the `self.compiled_loss` method.

        Returns:

            Tuple:

                - `classification_pred`: A Tensor of shape [batch_size, num_boxes, num_classes]

                    representing the class probability.

                - `localization_pred`: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]

                - `anchors`: A Tensor of shape [batch_size, num_boxes, 4]

        """

        images = inputs[DatasetField.IMAGES]

        images_information = inputs[DatasetField.IMAGES_INFO]

        # The preprocessing dedicated to the backbone is done inside the model.

        x = self.backbone(images)

        pyramid = self.fpn(x)

        rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl = self.rpn(pyramid)

        if training and not self._serving:

            apply_kernel_regularization(self.l2, self.backbone)

            # add_loss stores the rpn losses computation in self.losses

            _ = self.rpn.compute_loss(rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl,

                                      inputs['ground_truths'])

        num_boxes = 2000 if training else 1000

        rois = post_process_rpn(rpn_cls_pred_per_lvl,

                                rpn_loc_pred_per_lvl,

                                anchors_per_lvl,

                                images_information,

                                pre_nms_topk_per_lvl=num_boxes,

                                post_nms_topk=num_boxes)

        if training and not self._serving:

            ground_truths = inputs['ground_truths']

            # Include the ground_truths as RoIs for the training

            rois = tf.concat([tf.cast(rois, self._compute_dtype), ground_truths[BoxField.BOXES]],

                             axis=1)

            # Sample the boxes needed for inference

            y_true, weights, rois = self.fast_rcnn.sample_boxes(rois, ground_truths)

        classification_pred, localization_pred = self.fast_rcnn([pyramid, rois])

        if training and not self._serving:

            # add_loss stores the fast_rcnn losses computation in self.losses

            _ = self.fast_rcnn.compute_loss(y_true, weights, classification_pred, localization_pred)

        classification_pred = tf.nn.softmax(classification_pred)

        return classification_pred, localization_pred, rois

compute_mask

def compute_mask(
    self,
    inputs,
    mask=None
)

Computes an output mask tensor.

Parameters:

Name Description
inputs Tensor or list of tensors.
mask Tensor or list of tensors.

Returns:

Type Description
None None or a tensor (or list of tensors,
one per output tensor of the layer).
View Source
  @generic_utils.default

  def compute_mask(self, inputs, mask=None):  # pylint: disable=unused-argument

    """Computes an output mask tensor.

    Arguments:

        inputs: Tensor or list of tensors.

        mask: Tensor or list of tensors.

    Returns:

        None or a tensor (or list of tensors,

            one per output tensor of the layer).

    """

    if not self._supports_masking:

      if any(m is not None for m in nest.flatten(mask)):

        raise TypeError('Layer ' + self.name + ' does not support masking, '

                        'but was passed an input_mask: ' + str(mask))

      # masking not explicitly supported: return None as mask.

      return None

    # if masking is explicitly supported, by default

    # carry over the input mask

    return mask

compute_output_shape

def compute_output_shape(
    self,
    input_shape
)

Computes the output shape of the layer.

If the layer has not been built, this method will call build on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here.

Parameters:

Name Description
input_shape Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.

Returns:

Type Description
None An input shape tuple.
View Source
  def compute_output_shape(self, input_shape):

    """Computes the output shape of the layer.

    If the layer has not been built, this method will call `build` on the

    layer. This assumes that the layer will later be used with inputs that

    match the input shape provided here.

    Arguments:

        input_shape: Shape tuple (tuple of integers)

            or list of shape tuples (one per output tensor of the layer).

            Shape tuples can include None for free dimensions,

            instead of an integer.

    Returns:

        An input shape tuple.

    """

    if context.executing_eagerly():

      # In this case we build the model first in order to do shape inference.

      # This is acceptable because the framework only calls

      # `compute_output_shape` on shape values that the layer would later be

      # built for. It would however cause issues in case a user attempts to

      # use `compute_output_shape` manually with shapes that are incompatible

      # with the shape the Layer will be called on (these users will have to

      # implement `compute_output_shape` themselves).

      self._maybe_build(input_shape)

      with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():

        input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)

        def _make_placeholder_like(shape):

          ph = backend.placeholder(shape=shape, dtype=self.dtype)

          ph._keras_mask = None

          return ph

        inputs = nest.map_structure(_make_placeholder_like, input_shape)

        try:

          outputs = self(inputs, training=False)

        except TypeError as e:

          six.raise_from(

              NotImplementedError(

                  'We could not automatically infer the static shape of the '

                  'layer\'s output. Please implement the '

                  '`compute_output_shape` method on your layer (%s).' %

                  self.__class__.__name__), e)

      return nest.map_structure(lambda t: t.shape, outputs)

    raise NotImplementedError(

        'Please run in eager mode or implement the `compute_output_shape` '

        'method on your layer (%s).' % self.__class__.__name__)

compute_output_signature

def compute_output_signature(
    self,
    input_signature
)

Compute the output tensor signature of the layer based on the inputs.

Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use compute_output_shape, and will assume that the output dtype matches the input dtype.

Parameters:

Name Description
input_signature Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.

Returns:

Type Description
None Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.

Raises:

Type Description
TypeError If input_signature contains a non-TensorSpec object.
View Source
  @doc_controls.for_subclass_implementers

  def compute_output_signature(self, input_signature):

    """Compute the output tensor signature of the layer based on the inputs.

    Unlike a TensorShape object, a TensorSpec object contains both shape

    and dtype information for a tensor. This method allows layers to provide

    output dtype information if it is different from the input dtype.

    For any layer that doesn't implement this function,

    the framework will fall back to use `compute_output_shape`, and will

    assume that the output dtype matches the input dtype.

    Args:

      input_signature: Single TensorSpec or nested structure of TensorSpec

        objects, describing a candidate input for the layer.

    Returns:

      Single TensorSpec or nested structure of TensorSpec objects, describing

        how the layer would transform the provided input.

    Raises:

      TypeError: If input_signature contains a non-TensorSpec object.

    """

    def check_type_return_shape(s):

      if not isinstance(s, tensor_spec.TensorSpec):

        raise TypeError(

            'Only TensorSpec signature types are supported, '

            'but saw signature signature entry: {}.'.format(s))

      return s.shape

    input_shape = nest.map_structure(check_type_return_shape, input_signature)

    output_shape = self.compute_output_shape(input_shape)

    dtype = self._compute_dtype

    if dtype is None:

      input_dtypes = [s.dtype for s in nest.flatten(input_signature)]

      # Default behavior when self.dtype is None, is to use the first input's

      # dtype.

      dtype = input_dtypes[0]

    return nest.map_structure(

        lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),

        output_shape)

count_params

def count_params(
    self
)

Count the total number of scalars composing the weights.

Returns:

Type Description
None An integer count.

Raises:

Type Description
ValueError if the layer isn't yet built
(in which case its weights aren't yet defined).
View Source
  def count_params(self):

    """Count the total number of scalars composing the weights.

    Returns:

        An integer count.

    Raises:

        ValueError: if the layer isn't yet built

          (in which case its weights aren't yet defined).

    """

    if not self.built:

      if getattr(self, '_is_graph_network', False):

        with tf_utils.maybe_init_scope(self):

          self._maybe_build(self.inputs)

      else:

        raise ValueError('You tried to call `count_params` on ' + self.name +

                         ', but the layer isn\'t built. '

                         'You can build it manually via: `' + self.name +

                         '.build(batch_input_shape)`.')

    return layer_utils.count_params(self.weights)

export_for_serving

def export_for_serving(
    self,
    filepath
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow when the training arguments is defined int the method call, tf.save_model.save method performs a check on the graph for training=False and training=True. However, we don't want this check to be perform because our ground_truths inputs aren't defined.

View Source
    def export_for_serving(self, filepath):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow

        when the `training` arguments is defined int the method `call`, `tf.save_model.save` method

        performs a check on the graph for training=False and training=True.

        However, we don't want this check to be perform because our ground_truths inputs aren't defined.

        """

        self._serving = True

        call_output = self.serving_step.get_concrete_function()

        tf.saved_model.save(self, filepath, signatures={'serving_default': call_output})

        self._serving = False

get_input_at

def get_input_at(
    self,
    node_index
)

Retrieves the input tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_at(self, node_index):

    """Retrieves the input tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_tensors',

                                             'input')

get_input_mask_at

def get_input_mask_at(
    self,
    node_index
)

Retrieves the input mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple inputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_mask_at(self, node_index):

    """Retrieves the input mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple inputs).

    """

    inputs = self.get_input_at(node_index)

    if isinstance(inputs, list):

      return [getattr(x, '_keras_mask', None) for x in inputs]

    else:

      return getattr(inputs, '_keras_mask', None)

get_input_shape_at

def get_input_shape_at(
    self,
    node_index
)

Retrieves the input shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_shape_at(self, node_index):

    """Retrieves the input shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_shapes',

                                             'input shape')

get_losses_for

def get_losses_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves losses relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of loss tensors of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_losses_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves losses relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of loss tensors of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_losses_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.losses` instead.')

    return self.losses

get_output_at

def get_output_at(
    self,
    node_index
)

Retrieves the output tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_at(self, node_index):

    """Retrieves the output tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_tensors',

                                             'output')

get_output_mask_at

def get_output_mask_at(
    self,
    node_index
)

Retrieves the output mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple outputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_mask_at(self, node_index):

    """Retrieves the output mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple outputs).

    """

    output = self.get_output_at(node_index)

    if isinstance(output, list):

      return [getattr(x, '_keras_mask', None) for x in output]

    else:

      return getattr(output, '_keras_mask', None)

get_output_shape_at

def get_output_shape_at(
    self,
    node_index
)

Retrieves the output shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_shape_at(self, node_index):

    """Retrieves the output shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_shapes',

                                             'output shape')

get_updates_for

def get_updates_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves updates relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of update ops of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_updates_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves updates relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of update ops of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_updates_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.updates` method instead.')

    return self.updates

serving_step

def serving_step(
    self,
    images,
    images_info
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow absolutely want it and will return an exception if the ground_truth isn't provided.

View Source
    @tf.function(input_signature=[

        tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32, name=DatasetField.IMAGES),

        tf.TensorSpec(shape=(None, 2), dtype=tf.float32, name=DatasetField.IMAGES_INFO)

    ])

    def serving_step(self, images, images_info):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow

        absolutely want it and will return an exception if the ground_truth isn't provided.

        """

        return self.predict_step({

            DatasetField.IMAGES: images,

            DatasetField.IMAGES_INFO: images_info

        })

set_weights

def set_weights(
    self,
    weights
)

Sets the weights of the layer, from Numpy arrays.

The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function by calling the layer.

For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer:

a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] b.set_weights(a.get_weights()) b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)]

Parameters:

Name Description
weights a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of get_weights).

Raises:

Type Description
ValueError If the provided weights list does not match the
layer's specifications.
View Source
  def set_weights(self, weights):

    """Sets the weights of the layer, from Numpy arrays.

    The weights of a layer represent the state of the layer. This function

    sets the weight values from numpy arrays. The weight values should be

    passed in the order they are created by the layer. Note that the layer's

    weights must be instantiated before calling this function by calling

    the layer.

    For example, a Dense layer returns a list of two values-- per-output

    weights and the bias value. These can be used to set the weights of another

    Dense layer:

    >>> a = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(1.))

    >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))

    >>> a.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    >>> b = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(2.))

    >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))

    >>> b.get_weights()

    [array([[2.],

           [2.],

           [2.]], dtype=float32), array([0.], dtype=float32)]

    >>> b.set_weights(a.get_weights())

    >>> b.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    Arguments:

        weights: a list of Numpy arrays. The number

            of arrays and their shape must match

            number of the dimensions of the weights

            of the layer (i.e. it should match the

            output of `get_weights`).

    Raises:

        ValueError: If the provided weights list does not match the

            layer's specifications.

    """

    params = self.weights

    expected_num_weights = 0

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        expected_num_weights += param.num_tensors

      else:

        expected_num_weights += 1

    if expected_num_weights != len(weights):

      raise ValueError(

          'You called `set_weights(weights)` on layer "%s" '

          'with a weight list of length %s, but the layer was '

          'expecting %s weights. Provided weights: %s...' %

          (self.name, len(weights), expected_num_weights, str(weights)[:50]))

    weight_index = 0

    weight_value_tuples = []

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        num_tensors = param.num_tensors

        tensors = weights[weight_index:weight_index + num_tensors]

        param.set_weights(tensors)

        weight_index += num_tensors

      else:

        weight = weights[weight_index]

        ref_shape = param.shape

        if not ref_shape.is_compatible_with(weight.shape):

          raise ValueError(

              'Layer weight shape %s not compatible with provided weight '

              'shape %s' % (ref_shape, weight.shape))

        weight_value_tuples.append((param, weight))

        weight_index += 1

    backend.batch_set_value(weight_value_tuples)

FasterRcnnFPNResnet50Pytorch

class FasterRcnnFPNResnet50Pytorch(
    num_classes,
    **kwargs
)

You can use it as follow:

model_faster_rcnn = FasterRcnnFPNResnet50(80)
base_lr = 0.1
optimizer = tf.keras.optimizers.SGD(learning_rate=base_lr)
model_faster_rcnn.compile(optimizer=optimizer, loss=None)
model_faster_rcnn.fit(ds_train, validation_data=ds_test, epochs=11,)

Arguments

Name Description
num_classes The number of classes of your dataset
(do not include the background class it is handle for you)
backbone A tensorflow Model.

Ancestors (in MRO)

  • kerod.model.faster_rcnn.FasterRcnnFPN
  • tensorflow.python.keras.engine.training.Model
  • tensorflow.python.keras.engine.base_layer.Layer
  • tensorflow.python.module.module.Module
  • tensorflow.python.training.tracking.tracking.AutoTrackable
  • tensorflow.python.training.tracking.base.Trackable
  • tensorflow.python.keras.utils.version_utils.LayerVersionSelector
  • tensorflow.python.keras.utils.version_utils.ModelVersionSelector

Methods

add_loss

def add_loss(
    self,
    losses,
    **kwargs
)

Add loss tensor(s), potentially dependent on layer inputs.

Some losses (for instance, activity regularization losses) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.losses may be dependent on a and some on b. This method automatically keeps track of dependencies.

This method can be used inside a subclassed layer or model's call function, in which case losses should be a Tensor or list of Tensors.

Example:

class MyLayer(tf.keras.layers.Layer):
  def call(self, inputs):
    self.add_loss(tf.abs(tf.reduce_mean(inputs)))
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any loss Tensors passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These losses become part of the model's topology and are tracked in get_config.

Example:

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))

If this is not the case for your loss (if, for example, your loss references a Variable of one of the model's layers), you can wrap your loss in a zero-argument lambda. These losses are not tracked as part of the model's topology since they can't be serialized.

Example:

inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))

Arguments: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. **kwargs: Additional keyword arguments for backward compatibility. Accepted values: inputs - Deprecated, will be automatically inferred.

View Source
  def add_loss(self, losses, **kwargs):

    """Add loss tensor(s), potentially dependent on layer inputs.

    Some losses (for instance, activity regularization losses) may be dependent

    on the inputs passed when calling a layer. Hence, when reusing the same

    layer on different inputs `a` and `b`, some entries in `layer.losses` may

    be dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This method can be used inside a subclassed layer or model's `call`

    function, in which case `losses` should be a Tensor or list of Tensors.

    Example:

    ```python

    class MyLayer(tf.keras.layers.Layer):

      def call(self, inputs):

        self.add_loss(tf.abs(tf.reduce_mean(inputs)))

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any loss Tensors passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    losses become part of the model's topology and are tracked in `get_config`.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Activity regularization.

    model.add_loss(tf.abs(tf.reduce_mean(x)))

    ```

    If this is not the case for your loss (if, for example, your loss references

    a `Variable` of one of the model's layers), you can wrap your loss in a

    zero-argument lambda. These losses are not tracked as part of the model's

    topology since they can't be serialized.

    Example:

    ```python

    inputs = tf.keras.Input(shape=(10,))

    d = tf.keras.layers.Dense(10)

    x = d(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    # Weight regularization.

    model.add_loss(lambda: tf.reduce_mean(d.kernel))

    ```

    Arguments:

      losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses

        may also be zero-argument callables which create a loss tensor.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

          inputs - Deprecated, will be automatically inferred.

    """

    kwargs.pop('inputs', None)

    if kwargs:

      raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))

    def _tag_callable(loss):

      """Tags callable loss tensor as `_unconditional_loss`."""

      if callable(loss):

        # We run the loss without autocasting, as regularizers are often

        # numerically unstable in float16.

        with autocast_variable.enable_auto_cast_variables(None):

          loss = loss()

      if loss is None:

        return None  # Will be filtered out when computing the .losses property

      if not tensor_util.is_tensor(loss):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      loss._unconditional_loss = True  # pylint: disable=protected-access

      return loss

    losses = nest.flatten(losses)

    callable_losses = []

    eager_losses = []

    symbolic_losses = []

    for loss in losses:

      if callable(loss):

        callable_losses.append(functools.partial(_tag_callable, loss))

        continue

      if loss is None:

        continue

      if not tensor_util.is_tensor(loss) and not isinstance(

          loss, keras_tensor.KerasTensor):

        loss = ops.convert_to_tensor_v2_with_dispatch(

            loss, dtype=backend.floatx())

      # TF Functions should take the eager path.

      if ((tf_utils.is_symbolic_tensor(loss) or

           isinstance(loss, keras_tensor.KerasTensor)) and

          not base_layer_utils.is_in_tf_function()):

        symbolic_losses.append(loss)

      elif tensor_util.is_tensor(loss):

        eager_losses.append(loss)

    self._callable_losses.extend(callable_losses)

    in_call_context = base_layer_utils.call_context().in_call

    if eager_losses and not in_call_context:

      raise ValueError(

          'Expected a symbolic Tensors or a callable for the loss value. '

          'Please wrap your loss computation in a zero argument `lambda`.')

    self._eager_losses.extend(eager_losses)

    if in_call_context and not keras_tensor.keras_tensors_enabled():

      for symbolic_loss in symbolic_losses:

        self._losses.append(symbolic_loss)

    else:

      for symbolic_loss in symbolic_losses:

        if getattr(self, '_is_graph_network', False):

          self._graph_network_add_loss(symbolic_loss)

        else:

          # Possible a loss was added in a Layer's `build`.

          self._losses.append(symbolic_loss)

add_metric

def add_metric(
    self,
    value,
    name=None,
    **kwargs
)

Adds metric tensor to the layer.

This method can be used inside the call() method of a subclassed layer or model.

class MyMetricLayer(tf.keras.layers.Layer):
  def __init__(self):
    super(MyMetricLayer, self).__init__(name='my_metric_layer')
    self.mean = tf.keras.metrics.Mean(name='metric_1')

  def call(self, inputs):
    self.add_metric(self.mean(x))
    self.add_metric(tf.reduce_sum(x), name='metric_2')
    return inputs

This method can also be called directly on a Functional Model during construction. In this case, any tensor passed to this Model must be symbolic and be able to be traced back to the model's Inputs. These metrics become part of the model's topology and are tracked when you save the model via save().

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')

Note: Calling add_metric() with the result of a metric object on a Functional Model, as shown in the example below, is not supported. This is because we cannot trace the metric result tensor back to the model's inputs.

inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

Parameters:

Name Description
value Metric tensor.
name String metric name.
**kwargs Additional keyword arguments for backward compatibility.
Accepted values:
aggregation - When the value tensor provided is not the result of
calling a keras.Metric instance, it will be aggregated by default
using a keras.Metric.Mean.
View Source
  def add_metric(self, value, name=None, **kwargs):

    """Adds metric tensor to the layer.

    This method can be used inside the `call()` method of a subclassed layer

    or model.

    ```python

    class MyMetricLayer(tf.keras.layers.Layer):

      def __init__(self):

        super(MyMetricLayer, self).__init__(name='my_metric_layer')

        self.mean = tf.keras.metrics.Mean(name='metric_1')

      def call(self, inputs):

        self.add_metric(self.mean(x))

        self.add_metric(tf.reduce_sum(x), name='metric_2')

        return inputs

    ```

    This method can also be called directly on a Functional Model during

    construction. In this case, any tensor passed to this Model must

    be symbolic and be able to be traced back to the model's `Input`s. These

    metrics become part of the model's topology and are tracked when you

    save the model via `save()`.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(math_ops.reduce_sum(x), name='metric_1')

    ```

    Note: Calling `add_metric()` with the result of a metric object on a

    Functional Model, as shown in the example below, is not supported. This is

    because we cannot trace the metric result tensor back to the model's inputs.

    ```python

    inputs = tf.keras.Input(shape=(10,))

    x = tf.keras.layers.Dense(10)(inputs)

    outputs = tf.keras.layers.Dense(1)(x)

    model = tf.keras.Model(inputs, outputs)

    model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')

    ```

    Args:

      value: Metric tensor.

      name: String metric name.

      **kwargs: Additional keyword arguments for backward compatibility.

        Accepted values:

        `aggregation` - When the `value` tensor provided is not the result of

        calling a `keras.Metric` instance, it will be aggregated by default

        using a `keras.Metric.Mean`.

    """

    kwargs_keys = list(kwargs.keys())

    if (len(kwargs_keys) > 1 or

        (len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):

      raise TypeError('Unknown keyword arguments: ', str(kwargs.keys()))

    from_metric_obj = hasattr(value, '_metric_obj')

    if keras_tensor.keras_tensors_enabled():

      is_symbolic = isinstance(value, keras_tensor.KerasTensor)

    else:

      is_symbolic = tf_utils.is_symbolic_tensor(value)

    in_call_context = base_layer_utils.call_context().in_call

    if name is None and not from_metric_obj:

      # Eg. `self.add_metric(math_ops.reduce_sum(x))`

      # In eager mode, we use metric name to lookup a metric. Without a name,

      # a new Mean metric wrapper will be created on every model/layer call.

      # So, we raise an error when no name is provided.

      # We will do the same for symbolic mode for consistency although a name

      # will be generated if no name is provided.

      # We will not raise this error in the foll use case for the sake of

      # consistency as name in provided in the metric constructor.

      # mean = metrics.Mean(name='my_metric')

      # model.add_metric(mean(outputs))

      raise ValueError('Please provide a name for your metric like '

                       '`self.add_metric(tf.reduce_sum(inputs), '

                       'name=\'mean_activation\')`')

    elif from_metric_obj:

      name = value._metric_obj.name

    if not in_call_context and not is_symbolic:

      raise ValueError('Expected a symbolic Tensor for the metric value, '

                       'received: ' + str(value))

    # If a metric was added in a Layer's `call` or `build`.

    if in_call_context or not getattr(self, '_is_graph_network', False):

      # TF Function path should take the eager path.

      # If the given metric is available in `metrics` list we just update state

      # on it, otherwise we create a new metric instance and

      # add it to the `metrics` list.

      metric_obj = getattr(value, '_metric_obj', None)

      # Tensors that come from a Metric object already updated the Metric state.

      should_update_state = not metric_obj

      name = metric_obj.name if metric_obj else name

      with self._metrics_lock:

        match = self._get_existing_metric(name)

        if match:

          metric_obj = match

        elif metric_obj:

          self._metrics.append(metric_obj)

        else:

          # Build the metric object with the value's dtype if it defines one

          metric_obj = metrics_mod.Mean(

              name=name, dtype=getattr(value, 'dtype', None))

          self._metrics.append(metric_obj)

      if should_update_state:

        metric_obj(value)

    else:

      if from_metric_obj:

        raise ValueError('Using the result of calling a `Metric` object '

                         'when calling `add_metric` on a Functional '

                         'Model is not supported. Please pass the '

                         'Tensor to monitor directly.')

      # Insert layers into the Keras Graph Network.

      aggregation = None if from_metric_obj else 'mean'

      self._graph_network_add_metric(value, aggregation, name)

add_update

def add_update(
    self,
    updates,
    inputs=None
)

Add update op(s), potentially dependent on layer inputs.

Weight updates (for instance, the updates of the moving mean and variance in a BatchNormalization layer) may be dependent on the inputs passed when calling a layer. Hence, when reusing the same layer on different inputs a and b, some entries in layer.updates may be dependent on a and some on b. This method automatically keeps track of dependencies.

This call is ignored when eager execution is enabled (in that case, variable updates are run on the fly and thus do not need to be tracked for later execution).

Parameters:

Name Description
updates Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting trainable=False
on this Layer, when executing in Eager mode.
inputs Deprecated, will be automatically inferred.
View Source
  @doc_controls.do_not_doc_inheritable

  def add_update(self, updates, inputs=None):

    """Add update op(s), potentially dependent on layer inputs.

    Weight updates (for instance, the updates of the moving mean and variance

    in a BatchNormalization layer) may be dependent on the inputs passed

    when calling a layer. Hence, when reusing the same layer on

    different inputs `a` and `b`, some entries in `layer.updates` may be

    dependent on `a` and some on `b`. This method automatically keeps track

    of dependencies.

    This call is ignored when eager execution is enabled (in that case, variable

    updates are run on the fly and thus do not need to be tracked for later

    execution).

    Arguments:

      updates: Update op, or list/tuple of update ops, or zero-arg callable

        that returns an update op. A zero-arg callable should be passed in

        order to disable running the updates by setting `trainable=False`

        on this Layer, when executing in Eager mode.

      inputs: Deprecated, will be automatically inferred.

    """

    if inputs is not None:

      tf_logging.warning(

          '`add_update` `inputs` kwarg has been deprecated. You no longer need '

          'to pass a value to `inputs` as it is being automatically inferred.')

    call_context = base_layer_utils.call_context()

    # No need to run updates during Functional API construction.

    if call_context.in_keras_graph:

      return

    # Callable updates are disabled by setting `trainable=False`.

    if not call_context.frozen:

      for update in nest.flatten(updates):

        if callable(update):

          update()  # pylint: disable=not-callable

add_variable

def add_variable(
    self,
    *args,
    **kwargs
)

Deprecated, do NOT use! Alias for add_weight.

View Source
  @doc_controls.do_not_doc_inheritable

  def add_variable(self, *args, **kwargs):

    """Deprecated, do NOT use! Alias for `add_weight`."""

    warnings.warn('`layer.add_variable` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.add_weight` method instead.')

    return self.add_weight(*args, **kwargs)

add_weight

def add_weight(
    self,
    name=None,
    shape=None,
    dtype=None,
    initializer=None,
    regularizer=None,
    trainable=None,
    constraint=None,
    use_resource=None,
    synchronization=<VariableSynchronization.AUTO: 0>,
    aggregation=<VariableAggregation.NONE: 0>,
    **kwargs
)

Adds a new variable to the layer.

Parameters:

Name Description
name Variable name.
shape Variable shape. Defaults to scalar if unspecified.
dtype The type of the variable. Defaults to self.dtype.
initializer Initializer instance (callable).
regularizer Regularizer instance (callable).
trainable Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that trainable cannot be True if synchronization
is set to ON_READ.
constraint Constraint instance (callable).
use_resource Whether to use ResourceVariable.
synchronization Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to
AUTO and the current DistributionStrategy chooses
when to synchronize. If synchronization is set to ON_READ,
trainable must not be set to True.
aggregation Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableAggregation.
**kwargs Additional keyword arguments. Accepted values are getter,
collections, experimental_autocast and caching_device.

Returns:

Type Description
None The variable created.

Raises:

Type Description
ValueError When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as ON_READ.
View Source
  @doc_controls.for_subclass_implementers

  def add_weight(self,

                 name=None,

                 shape=None,

                 dtype=None,

                 initializer=None,

                 regularizer=None,

                 trainable=None,

                 constraint=None,

                 use_resource=None,

                 synchronization=tf_variables.VariableSynchronization.AUTO,

                 aggregation=tf_variables.VariableAggregation.NONE,

                 **kwargs):

    """Adds a new variable to the layer.

    Arguments:

      name: Variable name.

      shape: Variable shape. Defaults to scalar if unspecified.

      dtype: The type of the variable. Defaults to `self.dtype`.

      initializer: Initializer instance (callable).

      regularizer: Regularizer instance (callable).

      trainable: Boolean, whether the variable should be part of the layer's

        "trainable_variables" (e.g. variables, biases)

        or "non_trainable_variables" (e.g. BatchNorm mean and variance).

        Note that `trainable` cannot be `True` if `synchronization`

        is set to `ON_READ`.

      constraint: Constraint instance (callable).

      use_resource: Whether to use `ResourceVariable`.

      synchronization: Indicates when a distributed a variable will be

        aggregated. Accepted values are constants defined in the class

        `tf.VariableSynchronization`. By default the synchronization is set to

        `AUTO` and the current `DistributionStrategy` chooses

        when to synchronize. If `synchronization` is set to `ON_READ`,

        `trainable` must not be set to `True`.

      aggregation: Indicates how a distributed variable will be aggregated.

        Accepted values are constants defined in the class

        `tf.VariableAggregation`.

      **kwargs: Additional keyword arguments. Accepted values are `getter`,

        `collections`, `experimental_autocast` and `caching_device`.

    Returns:

      The variable created.

    Raises:

      ValueError: When giving unsupported dtype and no initializer or when

        trainable has been set to True with synchronization set as `ON_READ`.

    """

    if shape is None:

      shape = ()

    kwargs.pop('partitioner', None)  # Ignored.

    # Validate optional keyword arguments.

    for kwarg in kwargs:

      if kwarg not in ['collections', 'experimental_autocast',

                       'caching_device', 'getter']:

        raise TypeError('Unknown keyword argument:', kwarg)

    collections_arg = kwargs.pop('collections', None)

    # 'experimental_autocast' can be set to False by the caller to indicate an

    # AutoCastVariable should never be created.

    autocast = kwargs.pop('experimental_autocast', True)

    # See the docstring for tf.Variable about the details for caching_device.

    caching_device = kwargs.pop('caching_device', None)

    if dtype is None:

      dtype = self.dtype or backend.floatx()

    dtype = dtypes.as_dtype(dtype)

    if self._dtype_policy.variable_dtype is None:

      # The policy is "_infer", so we infer the policy from the variable dtype.

      self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))

    initializer = initializers.get(initializer)

    regularizer = regularizers.get(regularizer)

    constraint = constraints.get(constraint)

    if synchronization == tf_variables.VariableSynchronization.ON_READ:

      if trainable:

        raise ValueError(

            'Synchronization value can be set to '

            'VariableSynchronization.ON_READ only for non-trainable variables. '

            'You have specified trainable=True and '

            'synchronization=VariableSynchronization.ON_READ.')

      else:

        # Set trainable to be false when variable is to be synced on read.

        trainable = False

    elif trainable is None:

      trainable = True

    # Initialize variable when no initializer provided

    if initializer is None:

      # If dtype is DT_FLOAT, provide a uniform unit scaling initializer

      if dtype.is_floating:

        initializer = initializers.get('glorot_uniform')

      # If dtype is DT_INT/DT_UINT, provide a default value `zero`

      # If dtype is DT_BOOL, provide a default value `FALSE`

      elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:

        initializer = initializers.get('zeros')

      # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?

      else:

        raise ValueError('An initializer for variable %s of type %s is required'

                         ' for layer %s' % (name, dtype.base_dtype, self.name))

    getter = kwargs.pop('getter', base_layer_utils.make_variable)

    if (autocast and

        self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype

        and dtype.is_floating):

      old_getter = getter

      # Wrap variable constructor to return an AutoCastVariable.

      def getter(*args, **kwargs):  # pylint: disable=function-redefined

        variable = old_getter(*args, **kwargs)

        return autocast_variable.create_autocast_variable(variable)

      # Also the caching_device does not work with the mixed precision API,

      # disable it if it is specified.

      # TODO(b/142020079): Reenable it once the bug is fixed.

      if caching_device is not None:

        tf_logging.warn('`caching_device` does not work with mixed precision '

                        'API. Ignoring user specified `caching_device`.')

        caching_device = None

    variable = self._add_variable_with_custom_getter(

        name=name,

        shape=shape,

        # TODO(allenl): a `make_variable` equivalent should be added as a

        # `Trackable` method.

        getter=getter,

        # Manage errors in Layer rather than Trackable.

        overwrite=True,

        initializer=initializer,

        dtype=dtype,

        constraint=constraint,

        trainable=trainable,

        use_resource=use_resource,

        collections=collections_arg,

        synchronization=synchronization,

        aggregation=aggregation,

        caching_device=caching_device)

    if regularizer is not None:

      # TODO(fchollet): in the future, this should be handled at the

      # level of variable creation, and weight regularization losses

      # should be variable attributes.

      name_in_scope = variable.name[:variable.name.find(':')]

      self._handle_weight_regularization(name_in_scope,

                                         variable,

                                         regularizer)

    if base_layer_utils.is_split_variable(variable):

      for v in variable:

        backend.track_variable(v)

        if trainable:

          self._trainable_weights.append(v)

        else:

          self._non_trainable_weights.append(v)

    else:

      backend.track_variable(variable)

      if trainable:

        self._trainable_weights.append(variable)

      else:

        self._non_trainable_weights.append(variable)

    return variable

apply

def apply(
    self,
    inputs,
    *args,
    **kwargs
)

Deprecated, do NOT use!

This is an alias of self.__call__.

Parameters:

Name Description
inputs Input tensor(s).
*args additional positional arguments to be passed to self.call.
**kwargs additional keyword arguments to be passed to self.call.

Returns:

Type Description
None Output tensor(s).
View Source
  @doc_controls.do_not_doc_inheritable

  def apply(self, inputs, *args, **kwargs):

    """Deprecated, do NOT use!

    This is an alias of `self.__call__`.

    Arguments:

      inputs: Input tensor(s).

      *args: additional positional arguments to be passed to `self.call`.

      **kwargs: additional keyword arguments to be passed to `self.call`.

    Returns:

      Output tensor(s).

    """

    warnings.warn('`layer.apply` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.__call__` method instead.')

    return self.__call__(inputs, *args, **kwargs)

call

def call(
    self,
    inputs,
    training=None
)

Perform an inference in training.

Parameters:

Name Description
inputs A dict with the following schema:
images: A Tensor of shape [batch_size, height, width, 3]
image_informations: A float32 Tensor of shape [batch_size, 2] where
the last dimension represents the original height and
width of the images (without the padding).

ground_truths: A dict
- BoxField.LABELS: A 3-D tensor of shape [batch_size, num_gt, num_classes],
- BoxField.BOXES: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]
- BoxField.LABELS: A 3-D tensor of int32 and shape [batch_size, num_gt]
- BoxField.WEIGHTS: A 3-D tensor of float and shape [batch_size, num_gt]
- BoxField.NUM_BOXES: A 2-D tensor of int32 and shape [batch_size, 1]
which allows to remove the padding created by tf.Data.
Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)
then my second box has a padding of 1
training Is automatically set to True in train and test mode
(normally test should be at false). Why? Through the call we the losses and the metrics
of the rpn and fast_rcnn. They are automatically added with add_loss and add_metrics.
In test we want to benefit from those and therefore we compute them. It is an inheritance
from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the
train_step and test_step. However for now this method benefit of the encapsulation of
the self.compiled_loss method.

Returns:

Type Description
Tuple - classification_pred: A Tensor of shape [batch_size, num_boxes, num_classes]
representing the class probability.
- localization_pred: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]
- anchors: A Tensor of shape [batch_size, num_boxes, 4]
View Source
    def call(self, inputs, training=None):

        """Perform an inference in training.

        Arguments:

            inputs: A dict with the following schema:

                `images`: A Tensor of shape [batch_size, height, width, 3]

                `image_informations`: A float32 Tensor of shape [batch_size, 2] where

                    the last dimension represents the original height and

                    width of the images (without the padding).

                `ground_truths`: A dict

                    - `BoxField.LABELS`: A 3-D tensor of shape [batch_size, num_gt, num_classes],

                    - `BoxField.BOXES`: A 3-D tensor of shape [batch_size, num_gt, (y1, x1, y2, x2)]

                    - `BoxField.LABELS`: A 3-D tensor of int32 and shape [batch_size, num_gt]

                    - `BoxField.WEIGHTS`: A 3-D tensor of float and shape [batch_size, num_gt]

                    - `BoxField.NUM_BOXES`: A 2-D tensor of int32 and shape [batch_size, 1]

                        which allows to remove the padding created by tf.Data.

                        Example: if batch_size=2 and this field equal tf.constant([[2], [1]], tf.int32)

                        then my second box has a padding of 1

            training: Is automatically set to `True` in train and test mode

                (normally test should be at false). Why? Through the call we the losses and the metrics

                of the rpn and fast_rcnn. They are automatically added with `add_loss` and `add_metrics`.

                In test we want to benefit from those and therefore we compute them. It is an inheritance

                from tensorflow 2.0 and 2.1 and I'll think to move them in a more traditional way inside the

                train_step and test_step. However for now this method benefit of the encapsulation of

                the `self.compiled_loss` method.

        Returns:

            Tuple:

                - `classification_pred`: A Tensor of shape [batch_size, num_boxes, num_classes]

                    representing the class probability.

                - `localization_pred`: A Tensor of shape [batch_size, num_boxes, 4 * (num_classes - 1)]

                - `anchors`: A Tensor of shape [batch_size, num_boxes, 4]

        """

        images = inputs[DatasetField.IMAGES]

        images_information = inputs[DatasetField.IMAGES_INFO]

        # The preprocessing dedicated to the backbone is done inside the model.

        x = self.backbone(images)

        pyramid = self.fpn(x)

        rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl = self.rpn(pyramid)

        if training and not self._serving:

            apply_kernel_regularization(self.l2, self.backbone)

            # add_loss stores the rpn losses computation in self.losses

            _ = self.rpn.compute_loss(rpn_loc_pred_per_lvl, rpn_cls_pred_per_lvl, anchors_per_lvl,

                                      inputs['ground_truths'])

        num_boxes = 2000 if training else 1000

        rois = post_process_rpn(rpn_cls_pred_per_lvl,

                                rpn_loc_pred_per_lvl,

                                anchors_per_lvl,

                                images_information,

                                pre_nms_topk_per_lvl=num_boxes,

                                post_nms_topk=num_boxes)

        if training and not self._serving:

            ground_truths = inputs['ground_truths']

            # Include the ground_truths as RoIs for the training

            rois = tf.concat([tf.cast(rois, self._compute_dtype), ground_truths[BoxField.BOXES]],

                             axis=1)

            # Sample the boxes needed for inference

            y_true, weights, rois = self.fast_rcnn.sample_boxes(rois, ground_truths)

        classification_pred, localization_pred = self.fast_rcnn([pyramid, rois])

        if training and not self._serving:

            # add_loss stores the fast_rcnn losses computation in self.losses

            _ = self.fast_rcnn.compute_loss(y_true, weights, classification_pred, localization_pred)

        classification_pred = tf.nn.softmax(classification_pred)

        return classification_pred, localization_pred, rois

compute_mask

def compute_mask(
    self,
    inputs,
    mask=None
)

Computes an output mask tensor.

Parameters:

Name Description
inputs Tensor or list of tensors.
mask Tensor or list of tensors.

Returns:

Type Description
None None or a tensor (or list of tensors,
one per output tensor of the layer).
View Source
  @generic_utils.default

  def compute_mask(self, inputs, mask=None):  # pylint: disable=unused-argument

    """Computes an output mask tensor.

    Arguments:

        inputs: Tensor or list of tensors.

        mask: Tensor or list of tensors.

    Returns:

        None or a tensor (or list of tensors,

            one per output tensor of the layer).

    """

    if not self._supports_masking:

      if any(m is not None for m in nest.flatten(mask)):

        raise TypeError('Layer ' + self.name + ' does not support masking, '

                        'but was passed an input_mask: ' + str(mask))

      # masking not explicitly supported: return None as mask.

      return None

    # if masking is explicitly supported, by default

    # carry over the input mask

    return mask

compute_output_shape

def compute_output_shape(
    self,
    input_shape
)

Computes the output shape of the layer.

If the layer has not been built, this method will call build on the layer. This assumes that the layer will later be used with inputs that match the input shape provided here.

Parameters:

Name Description
input_shape Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.

Returns:

Type Description
None An input shape tuple.
View Source
  def compute_output_shape(self, input_shape):

    """Computes the output shape of the layer.

    If the layer has not been built, this method will call `build` on the

    layer. This assumes that the layer will later be used with inputs that

    match the input shape provided here.

    Arguments:

        input_shape: Shape tuple (tuple of integers)

            or list of shape tuples (one per output tensor of the layer).

            Shape tuples can include None for free dimensions,

            instead of an integer.

    Returns:

        An input shape tuple.

    """

    if context.executing_eagerly():

      # In this case we build the model first in order to do shape inference.

      # This is acceptable because the framework only calls

      # `compute_output_shape` on shape values that the layer would later be

      # built for. It would however cause issues in case a user attempts to

      # use `compute_output_shape` manually with shapes that are incompatible

      # with the shape the Layer will be called on (these users will have to

      # implement `compute_output_shape` themselves).

      self._maybe_build(input_shape)

      with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():

        input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)

        def _make_placeholder_like(shape):

          ph = backend.placeholder(shape=shape, dtype=self.dtype)

          ph._keras_mask = None

          return ph

        inputs = nest.map_structure(_make_placeholder_like, input_shape)

        try:

          outputs = self(inputs, training=False)

        except TypeError as e:

          six.raise_from(

              NotImplementedError(

                  'We could not automatically infer the static shape of the '

                  'layer\'s output. Please implement the '

                  '`compute_output_shape` method on your layer (%s).' %

                  self.__class__.__name__), e)

      return nest.map_structure(lambda t: t.shape, outputs)

    raise NotImplementedError(

        'Please run in eager mode or implement the `compute_output_shape` '

        'method on your layer (%s).' % self.__class__.__name__)

compute_output_signature

def compute_output_signature(
    self,
    input_signature
)

Compute the output tensor signature of the layer based on the inputs.

Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use compute_output_shape, and will assume that the output dtype matches the input dtype.

Parameters:

Name Description
input_signature Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.

Returns:

Type Description
None Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.

Raises:

Type Description
TypeError If input_signature contains a non-TensorSpec object.
View Source
  @doc_controls.for_subclass_implementers

  def compute_output_signature(self, input_signature):

    """Compute the output tensor signature of the layer based on the inputs.

    Unlike a TensorShape object, a TensorSpec object contains both shape

    and dtype information for a tensor. This method allows layers to provide

    output dtype information if it is different from the input dtype.

    For any layer that doesn't implement this function,

    the framework will fall back to use `compute_output_shape`, and will

    assume that the output dtype matches the input dtype.

    Args:

      input_signature: Single TensorSpec or nested structure of TensorSpec

        objects, describing a candidate input for the layer.

    Returns:

      Single TensorSpec or nested structure of TensorSpec objects, describing

        how the layer would transform the provided input.

    Raises:

      TypeError: If input_signature contains a non-TensorSpec object.

    """

    def check_type_return_shape(s):

      if not isinstance(s, tensor_spec.TensorSpec):

        raise TypeError(

            'Only TensorSpec signature types are supported, '

            'but saw signature signature entry: {}.'.format(s))

      return s.shape

    input_shape = nest.map_structure(check_type_return_shape, input_signature)

    output_shape = self.compute_output_shape(input_shape)

    dtype = self._compute_dtype

    if dtype is None:

      input_dtypes = [s.dtype for s in nest.flatten(input_signature)]

      # Default behavior when self.dtype is None, is to use the first input's

      # dtype.

      dtype = input_dtypes[0]

    return nest.map_structure(

        lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),

        output_shape)

count_params

def count_params(
    self
)

Count the total number of scalars composing the weights.

Returns:

Type Description
None An integer count.

Raises:

Type Description
ValueError if the layer isn't yet built
(in which case its weights aren't yet defined).
View Source
  def count_params(self):

    """Count the total number of scalars composing the weights.

    Returns:

        An integer count.

    Raises:

        ValueError: if the layer isn't yet built

          (in which case its weights aren't yet defined).

    """

    if not self.built:

      if getattr(self, '_is_graph_network', False):

        with tf_utils.maybe_init_scope(self):

          self._maybe_build(self.inputs)

      else:

        raise ValueError('You tried to call `count_params` on ' + self.name +

                         ', but the layer isn\'t built. '

                         'You can build it manually via: `' + self.name +

                         '.build(batch_input_shape)`.')

    return layer_utils.count_params(self.weights)

export_for_serving

def export_for_serving(
    self,
    filepath
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow when the training arguments is defined int the method call, tf.save_model.save method performs a check on the graph for training=False and training=True. However, we don't want this check to be perform because our ground_truths inputs aren't defined.

View Source
    def export_for_serving(self, filepath):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, in tensorflow

        when the `training` arguments is defined int the method `call`, `tf.save_model.save` method

        performs a check on the graph for training=False and training=True.

        However, we don't want this check to be perform because our ground_truths inputs aren't defined.

        """

        self._serving = True

        call_output = self.serving_step.get_concrete_function()

        tf.saved_model.save(self, filepath, signatures={'serving_default': call_output})

        self._serving = False

get_input_at

def get_input_at(
    self,
    node_index
)

Retrieves the input tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_at(self, node_index):

    """Retrieves the input tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_tensors',

                                             'input')

get_input_mask_at

def get_input_mask_at(
    self,
    node_index
)

Retrieves the input mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple inputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_mask_at(self, node_index):

    """Retrieves the input mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple inputs).

    """

    inputs = self.get_input_at(node_index)

    if isinstance(inputs, list):

      return [getattr(x, '_keras_mask', None) for x in inputs]

    else:

      return getattr(inputs, '_keras_mask', None)

get_input_shape_at

def get_input_shape_at(
    self,
    node_index
)

Retrieves the input shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple inputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_input_shape_at(self, node_index):

    """Retrieves the input shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple inputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'input_shapes',

                                             'input shape')

get_losses_for

def get_losses_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves losses relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of loss tensors of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_losses_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves losses relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of loss tensors of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_losses_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.losses` instead.')

    return self.losses

get_output_at

def get_output_at(
    self,
    node_index
)

Retrieves the output tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A tensor (or list of tensors if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_at(self, node_index):

    """Retrieves the output tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A tensor (or list of tensors if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_tensors',

                                             'output')

get_output_mask_at

def get_output_mask_at(
    self,
    node_index
)

Retrieves the output mask tensor(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A mask tensor
(or list of tensors if the layer has multiple outputs).
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_mask_at(self, node_index):

    """Retrieves the output mask tensor(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A mask tensor

        (or list of tensors if the layer has multiple outputs).

    """

    output = self.get_output_at(node_index)

    if isinstance(output, list):

      return [getattr(x, '_keras_mask', None) for x in output]

    else:

      return getattr(output, '_keras_mask', None)

get_output_shape_at

def get_output_shape_at(
    self,
    node_index
)

Retrieves the output shape(s) of a layer at a given node.

Parameters:

Name Description
node_index Integer, index of the node
from which to retrieve the attribute.
E.g. node_index=0 will correspond to the
first time the layer was called.

Returns:

Type Description
None A shape tuple
(or list of shape tuples if the layer has multiple outputs).

Raises:

Type Description
RuntimeError If called in Eager mode.
View Source
  @doc_controls.do_not_doc_inheritable

  def get_output_shape_at(self, node_index):

    """Retrieves the output shape(s) of a layer at a given node.

    Arguments:

        node_index: Integer, index of the node

            from which to retrieve the attribute.

            E.g. `node_index=0` will correspond to the

            first time the layer was called.

    Returns:

        A shape tuple

        (or list of shape tuples if the layer has multiple outputs).

    Raises:

      RuntimeError: If called in Eager mode.

    """

    return self._get_node_attribute_at_index(node_index, 'output_shapes',

                                             'output shape')

get_updates_for

def get_updates_for(
    self,
    inputs
)

Deprecated, do NOT use!

Retrieves updates relevant to a specific set of inputs.

Parameters:

Name Description
inputs Input tensor or list/tuple of input tensors.

Returns:

Type Description
None List of update ops of the layer that depend on inputs.
View Source
  @doc_controls.do_not_generate_docs

  def get_updates_for(self, inputs):

    """Deprecated, do NOT use!

    Retrieves updates relevant to a specific set of inputs.

    Arguments:

      inputs: Input tensor or list/tuple of input tensors.

    Returns:

      List of update ops of the layer that depend on `inputs`.

    """

    warnings.warn('`layer.get_updates_for` is deprecated and '

                  'will be removed in a future version. '

                  'Please use `layer.updates` method instead.')

    return self.updates

serving_step

def serving_step(
    self,
    images,
    images_info
)

Allow to bypass the save_model behavior the graph in serving mode.

Currently, the issue is that in training the ground_truths are passed to the call method but not in inference. For the serving only the images and images_information are defined. It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow absolutely want it and will return an exception if the ground_truth isn't provided.

View Source
    @tf.function(input_signature=[

        tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32, name=DatasetField.IMAGES),

        tf.TensorSpec(shape=(None, 2), dtype=tf.float32, name=DatasetField.IMAGES_INFO)

    ])

    def serving_step(self, images, images_info):

        """Allow to bypass the save_model behavior the graph in serving mode.

        Currently, the issue is that in training the ground_truths are passed to the call method but

        not in inference. For the serving only the `images` and `images_information` are defined.

        It means the inputs link to the ground_truths won't be defined in serving. However, tensorflow

        absolutely want it and will return an exception if the ground_truth isn't provided.

        """

        return self.predict_step({

            DatasetField.IMAGES: images,

            DatasetField.IMAGES_INFO: images_info

        })

set_weights

def set_weights(
    self,
    weights
)

Sets the weights of the layer, from Numpy arrays.

The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function by calling the layer.

For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer:

a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] b.set_weights(a.get_weights()) b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)]

Parameters:

Name Description
weights a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of get_weights).

Raises:

Type Description
ValueError If the provided weights list does not match the
layer's specifications.
View Source
  def set_weights(self, weights):

    """Sets the weights of the layer, from Numpy arrays.

    The weights of a layer represent the state of the layer. This function

    sets the weight values from numpy arrays. The weight values should be

    passed in the order they are created by the layer. Note that the layer's

    weights must be instantiated before calling this function by calling

    the layer.

    For example, a Dense layer returns a list of two values-- per-output

    weights and the bias value. These can be used to set the weights of another

    Dense layer:

    >>> a = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(1.))

    >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))

    >>> a.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    >>> b = tf.keras.layers.Dense(1,

    ...   kernel_initializer=tf.constant_initializer(2.))

    >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))

    >>> b.get_weights()

    [array([[2.],

           [2.],

           [2.]], dtype=float32), array([0.], dtype=float32)]

    >>> b.set_weights(a.get_weights())

    >>> b.get_weights()

    [array([[1.],

           [1.],

           [1.]], dtype=float32), array([0.], dtype=float32)]

    Arguments:

        weights: a list of Numpy arrays. The number

            of arrays and their shape must match

            number of the dimensions of the weights

            of the layer (i.e. it should match the

            output of `get_weights`).

    Raises:

        ValueError: If the provided weights list does not match the

            layer's specifications.

    """

    params = self.weights

    expected_num_weights = 0

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        expected_num_weights += param.num_tensors

      else:

        expected_num_weights += 1

    if expected_num_weights != len(weights):

      raise ValueError(

          'You called `set_weights(weights)` on layer "%s" '

          'with a weight list of length %s, but the layer was '

          'expecting %s weights. Provided weights: %s...' %

          (self.name, len(weights), expected_num_weights, str(weights)[:50]))

    weight_index = 0

    weight_value_tuples = []

    for param in params:

      if isinstance(param, base_layer_utils.TrackableWeightHandler):

        num_tensors = param.num_tensors

        tensors = weights[weight_index:weight_index + num_tensors]

        param.set_weights(tensors)

        weight_index += num_tensors

      else:

        weight = weights[weight_index]

        ref_shape = param.shape

        if not ref_shape.is_compatible_with(weight.shape):

          raise ValueError(

              'Layer weight shape %s not compatible with provided weight '

              'shape %s' % (ref_shape, weight.shape))

        weight_value_tuples.append((param, weight))

        weight_index += 1

    backend.batch_set_value(weight_value_tuples)