Module degann.networks.losses

Expand source code
from abc import ABC
from typing import Callable

import tensorflow as tf
from tensorflow import keras


def sign(x):
    return tf.where(x < 0.0, -1.0, 1.0)


class RelativeAbsoluteError(tf.keras.losses.Loss, ABC):
    """
    This class provides RAE loss function:
    $$ RAE = \frac{\Sum^n_{i=1} |y_i - \hat(y)_i|}{\Sum^n_{i=1} |y_i - \bar(y)|}
    """
    def __init__(self, reduction=tf.keras.losses.Reduction.NONE, name="rae", **kwargs):
        super(RelativeAbsoluteError, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        true_mean = tf.reduce_mean(y_true)
        squared_error_num = tf.reduce_sum(tf.abs(y_true - y_pred))
        squared_error_den = tf.reduce_sum(tf.abs(y_true - true_mean))

        squared_error_den = tf.cond(
            pred=tf.equal(squared_error_den, tf.constant(0.0)),
            true_fn=lambda: tf.constant(1.0),
            false_fn=lambda: squared_error_den,
        )

        loss = squared_error_num / squared_error_den
        return loss


class MaxAbsoluteDeviation(tf.keras.losses.Loss, ABC):
    """
    This class provides Max Absolute Deviation loss function:
    $$ MAD = \max |y - \hat(y)|
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="my_mae", **kwargs
    ):
        super(MaxAbsoluteDeviation, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.reduce_max(tf.math.abs(y_true - y_pred))
        return loss


class MaxAbsolutePercentageError(tf.keras.losses.Loss, ABC):
    """
    This class provides Max Absolute Percentage Error loss function:
    $$ MAD = \max |\frac{y - \hat(y)}{y}|
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="maxAPE", **kwargs
    ):
        super(MaxAbsolutePercentageError, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.reduce_max(tf.math.abs((y_true - y_pred) / y_true)) * 100.0
        return loss


class RMSE(tf.keras.losses.Loss, ABC):
    """
    This class provides Root Mean squared Error loss function:
    $$ MAD = \sqrt{MSE}
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="RMSE", **kwargs
    ):
        super(RMSE, self).__init__(reduction=reduction, name=name, **kwargs)

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.sqrt(tf.math.reduce_mean((y_pred - y_true) ** 2))
        return loss


# Reduction should be set to None?
_losses: dict = {
    "Huber": keras.losses.Huber(),
    "LogCosh": keras.losses.LogCosh(),
    "MeanAbsoluteError": keras.losses.MeanAbsoluteError(),
    "MeanAbsolutePercentageError": keras.losses.MeanAbsolutePercentageError(),
    "MaxAbsolutePercentageError": MaxAbsolutePercentageError(),
    "MeanSquaredError": keras.losses.MeanSquaredError(),
    "RootMeanSquaredError": RMSE(),
    "MeanSquaredLogarithmicError": keras.losses.MeanSquaredLogarithmicError(),
    "RelativeAbsoluteError": RelativeAbsoluteError(),
    "MaxAbsoluteDeviation": MaxAbsoluteDeviation(),
}


def get_loss(name: str):
    """
    Get loss function by name
    Parameters
    ----------
    name: str
        Name of loss function

    Returns
    -------
    loss_class: tf.keras.losses.Loss
        Result loss function
    """
    return _losses.get(name)


def get_all_loss_functions() -> dict[str, tf.keras.losses.Loss]:
    """
    Get all loss functions
    Parameters
    ----------

    Returns
    -------
    loss_class: dict[str, tf.keras.losses.Loss]
        All loss functions
    """
    return _losses

Functions

def get_all_loss_functions() ‑> dict[str, keras.src.losses.loss.Loss]

Get all loss functions Parameters


Returns

loss_class : dict[str, tf.keras.losses.Loss]
All loss functions
Expand source code
def get_all_loss_functions() -> dict[str, tf.keras.losses.Loss]:
    """
    Get all loss functions
    Parameters
    ----------

    Returns
    -------
    loss_class: dict[str, tf.keras.losses.Loss]
        All loss functions
    """
    return _losses
def get_loss(name: str)

Get loss function by name Parameters


name : str
Name of loss function

Returns

loss_class : tf.keras.losses.Loss
Result loss function
Expand source code
def get_loss(name: str):
    """
    Get loss function by name
    Parameters
    ----------
    name: str
        Name of loss function

    Returns
    -------
    loss_class: tf.keras.losses.Loss
        Result loss function
    """
    return _losses.get(name)
def sign(x)
Expand source code
def sign(x):
    return tf.where(x < 0.0, -1.0, 1.0)

Classes

class MaxAbsoluteDeviation (reduction='none', name='my_mae', **kwargs)

This class provides Max Absolute Deviation loss function: $$ MAD = \max |y - \hat(y)|

Expand source code
class MaxAbsoluteDeviation(tf.keras.losses.Loss, ABC):
    """
    This class provides Max Absolute Deviation loss function:
    $$ MAD = \max |y - \hat(y)|
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="my_mae", **kwargs
    ):
        super(MaxAbsoluteDeviation, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.reduce_max(tf.math.abs(y_true - y_pred))
        return loss

Ancestors

  • keras.src.losses.loss.Loss
  • keras.src.saving.keras_saveable.KerasSaveable
  • abc.ABC
class MaxAbsolutePercentageError (reduction='none', name='maxAPE', **kwargs)

This class provides Max Absolute Percentage Error loss function: $$ MAD = \max | rac{y - \hat(y)}{y}|

Expand source code
class MaxAbsolutePercentageError(tf.keras.losses.Loss, ABC):
    """
    This class provides Max Absolute Percentage Error loss function:
    $$ MAD = \max |\frac{y - \hat(y)}{y}|
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="maxAPE", **kwargs
    ):
        super(MaxAbsolutePercentageError, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.reduce_max(tf.math.abs((y_true - y_pred) / y_true)) * 100.0
        return loss

Ancestors

  • keras.src.losses.loss.Loss
  • keras.src.saving.keras_saveable.KerasSaveable
  • abc.ABC
class RMSE (reduction='none', name='RMSE', **kwargs)

This class provides Root Mean squared Error loss function: $$ MAD = \sqrt{MSE}

Expand source code
class RMSE(tf.keras.losses.Loss, ABC):
    """
    This class provides Root Mean squared Error loss function:
    $$ MAD = \sqrt{MSE}
    """
    def __init__(
        self, reduction=tf.keras.losses.Reduction.NONE, name="RMSE", **kwargs
    ):
        super(RMSE, self).__init__(reduction=reduction, name=name, **kwargs)

    def __call__(self, y_true, y_pred, sample_weight=None):
        loss = tf.math.sqrt(tf.math.reduce_mean((y_pred - y_true) ** 2))
        return loss

Ancestors

  • keras.src.losses.loss.Loss
  • keras.src.saving.keras_saveable.KerasSaveable
  • abc.ABC
class RelativeAbsoluteError (reduction='none', name='rae', **kwargs)

This class provides RAE loss function: $$ RAE = rac{\Sum^n_{i=1} |y_i - \hat(y)i|}{\Sum^n |y_i - ar(y)|}

Expand source code
class RelativeAbsoluteError(tf.keras.losses.Loss, ABC):
    """
    This class provides RAE loss function:
    $$ RAE = \frac{\Sum^n_{i=1} |y_i - \hat(y)_i|}{\Sum^n_{i=1} |y_i - \bar(y)|}
    """
    def __init__(self, reduction=tf.keras.losses.Reduction.NONE, name="rae", **kwargs):
        super(RelativeAbsoluteError, self).__init__(
            reduction=reduction, name=name, **kwargs
        )

    def __call__(self, y_true, y_pred, sample_weight=None):
        true_mean = tf.reduce_mean(y_true)
        squared_error_num = tf.reduce_sum(tf.abs(y_true - y_pred))
        squared_error_den = tf.reduce_sum(tf.abs(y_true - true_mean))

        squared_error_den = tf.cond(
            pred=tf.equal(squared_error_den, tf.constant(0.0)),
            true_fn=lambda: tf.constant(1.0),
            false_fn=lambda: squared_error_den,
        )

        loss = squared_error_num / squared_error_den
        return loss

Ancestors

  • keras.src.losses.loss.Loss
  • keras.src.saving.keras_saveable.KerasSaveable
  • abc.ABC