Module degann.networks.callbacks
Expand source code
import gc
import time
import keras.backend as k
from keras.callbacks import Callback
from keras.callbacks import History
class MemoryCleaner(Callback):
def on_epoch_end(self, epoch, logs=None):
gc.collect()
k.clear_session()
class MeasureTrainTime(Callback):
"""
Callback for measuring time.
Supports measuring training time,
measuring the time of each epoch during training,
and measuring the running time of the predict method
"""
def __init__(self):
super(MeasureTrainTime, self).__init__()
self.start_train_time = 0
self.end_train_time = 0
self.start_evaluate_time = 0
self.end_evaluate_time = 0
self.start_predict_time = 0
self.end_predict_time = 0
self.start_epoch_time = 0
self.end_epoch_time = 0
def on_test_begin(self, logs=None):
self.model.trained_time["predict_time"] = 0
self.start_evaluate_time = time.perf_counter()
def on_test_end(self, logs=None):
self.end_evaluate_time = time.perf_counter()
self.model.trained_time["predict_time"] = (
self.end_evaluate_time - self.start_evaluate_time
)
def on_predict_begin(self, logs=None):
self.model.trained_time["predict_time"] = 0
self.start_predict_time = time.perf_counter()
def on_predict_end(self, logs=None):
self.end_predict_time = time.perf_counter()
self.model.trained_time["predict_time"] = (
self.end_predict_time - self.start_predict_time
)
def on_train_begin(self, logs=None):
self.model.trained_time["train_time"] = 0.0
self.model.trained_time["epoch_time"] = []
self.start_train_time = time.perf_counter()
def on_epoch_begin(self, epoch, logs=None):
self.start_epoch_time = time.perf_counter()
def on_epoch_end(self, epoch, logs=None):
self.end_epoch_time = time.perf_counter()
self.model.trained_time["epoch_time"].append(
self.end_epoch_time - self.start_epoch_time
)
def on_train_end(self, logs=None):
self.end_train_time = time.perf_counter()
self.model.trained_time["train_time"] = (
self.end_train_time - self.start_train_time
)
class LightHistory(History):
"""
Class based on Keras.History,
but which only stores information about the last training epoch,
not the entire process
"""
def __init__(self):
super(History, self).__init__()
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = 0
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch = epoch
for k, v in logs.items():
self.history[k] = v
# Set the history attribute on the model after the epoch ends. This will
# make sure that the state which is set is the latest one.
self.model.history = self
Classes
class LightHistory-
Class based on Keras.History, but which only stores information about the last training epoch, not the entire process
Expand source code
class LightHistory(History): """ Class based on Keras.History, but which only stores information about the last training epoch, not the entire process """ def __init__(self): super(History, self).__init__() self.history = {} def on_train_begin(self, logs=None): self.epoch = 0 def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epoch = epoch for k, v in logs.items(): self.history[k] = v # Set the history attribute on the model after the epoch ends. This will # make sure that the state which is set is the latest one. self.model.history = selfAncestors
- keras.src.callbacks.history.History
- keras.src.callbacks.callback.Callback
Methods
def on_epoch_end(self, epoch, logs=None)-
Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only be called during TRAIN mode.
Args
epoch- Integer, index of epoch.
logs- Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with
val_. For training epoch, the values of theModel's metrics are returned. Example:{'loss': 0.2, 'accuracy': 0.7}.
Expand source code
def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epoch = epoch for k, v in logs.items(): self.history[k] = v # Set the history attribute on the model after the epoch ends. This will # make sure that the state which is set is the latest one. self.model.history = self def on_train_begin(self, logs=None)-
Called at the beginning of training.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_train_begin(self, logs=None): self.epoch = 0
class MeasureTrainTime-
Callback for measuring time. Supports measuring training time, measuring the time of each epoch during training, and measuring the running time of the predict method
Expand source code
class MeasureTrainTime(Callback): """ Callback for measuring time. Supports measuring training time, measuring the time of each epoch during training, and measuring the running time of the predict method """ def __init__(self): super(MeasureTrainTime, self).__init__() self.start_train_time = 0 self.end_train_time = 0 self.start_evaluate_time = 0 self.end_evaluate_time = 0 self.start_predict_time = 0 self.end_predict_time = 0 self.start_epoch_time = 0 self.end_epoch_time = 0 def on_test_begin(self, logs=None): self.model.trained_time["predict_time"] = 0 self.start_evaluate_time = time.perf_counter() def on_test_end(self, logs=None): self.end_evaluate_time = time.perf_counter() self.model.trained_time["predict_time"] = ( self.end_evaluate_time - self.start_evaluate_time ) def on_predict_begin(self, logs=None): self.model.trained_time["predict_time"] = 0 self.start_predict_time = time.perf_counter() def on_predict_end(self, logs=None): self.end_predict_time = time.perf_counter() self.model.trained_time["predict_time"] = ( self.end_predict_time - self.start_predict_time ) def on_train_begin(self, logs=None): self.model.trained_time["train_time"] = 0.0 self.model.trained_time["epoch_time"] = [] self.start_train_time = time.perf_counter() def on_epoch_begin(self, epoch, logs=None): self.start_epoch_time = time.perf_counter() def on_epoch_end(self, epoch, logs=None): self.end_epoch_time = time.perf_counter() self.model.trained_time["epoch_time"].append( self.end_epoch_time - self.start_epoch_time ) def on_train_end(self, logs=None): self.end_train_time = time.perf_counter() self.model.trained_time["train_time"] = ( self.end_train_time - self.start_train_time )Ancestors
- keras.src.callbacks.callback.Callback
Methods
def on_epoch_begin(self, epoch, logs=None)-
Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only be called during TRAIN mode.
Args
epoch- Integer, index of epoch.
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_epoch_begin(self, epoch, logs=None): self.start_epoch_time = time.perf_counter() def on_epoch_end(self, epoch, logs=None)-
Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only be called during TRAIN mode.
Args
epoch- Integer, index of epoch.
logs- Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with
val_. For training epoch, the values of theModel's metrics are returned. Example:{'loss': 0.2, 'accuracy': 0.7}.
Expand source code
def on_epoch_end(self, epoch, logs=None): self.end_epoch_time = time.perf_counter() self.model.trained_time["epoch_time"].append( self.end_epoch_time - self.start_epoch_time ) def on_predict_begin(self, logs=None)-
Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_predict_begin(self, logs=None): self.model.trained_time["predict_time"] = 0 self.start_predict_time = time.perf_counter() def on_predict_end(self, logs=None)-
Called at the end of prediction.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_predict_end(self, logs=None): self.end_predict_time = time.perf_counter() self.model.trained_time["predict_time"] = ( self.end_predict_time - self.start_predict_time ) def on_test_begin(self, logs=None)-
Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_test_begin(self, logs=None): self.model.trained_time["predict_time"] = 0 self.start_evaluate_time = time.perf_counter() def on_test_end(self, logs=None)-
Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently the output of the last call to
on_test_batch_end()is passed to this argument for this method but that may change in the future.
Expand source code
def on_test_end(self, logs=None): self.end_evaluate_time = time.perf_counter() self.model.trained_time["predict_time"] = ( self.end_evaluate_time - self.start_evaluate_time ) def on_train_begin(self, logs=None)-
Called at the beginning of training.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently no data is passed to this argument for this method but that may change in the future.
Expand source code
def on_train_begin(self, logs=None): self.model.trained_time["train_time"] = 0.0 self.model.trained_time["epoch_time"] = [] self.start_train_time = time.perf_counter() def on_train_end(self, logs=None)-
Called at the end of training.
Subclasses should override for any actions to run.
Args
logs- Dict. Currently the output of the last call to
on_epoch_end()is passed to this argument for this method but that may change in the future.
Expand source code
def on_train_end(self, logs=None): self.end_train_time = time.perf_counter() self.model.trained_time["train_time"] = ( self.end_train_time - self.start_train_time )
class MemoryCleaner-
Base class used to build new callbacks.
Callbacks can be passed to keras methods such as
fit(),evaluate(), andpredict()in order to hook into the various stages of the model training, evaluation, and inference lifecycle.To create a custom callback, subclass
keras.callbacks.Callbackand override the method associated with the stage of interest.Example:
>>> training_finished = False >>> class MyCallback(Callback): ... def on_train_end(self, logs=None): ... global training_finished ... training_finished = True >>> model = Sequential([ ... layers.Dense(1, input_shape=(1,))]) >>> model.compile(loss='mean_squared_error') >>> model.fit(np.array([[1.0]]), np.array([[1.0]]), ... callbacks=[MyCallback()]) >>> assert training_finished == TrueIf you want to use
Callbackobjects in a custom training loop:- You should pack all your callbacks into a single
callbacks.CallbackListso they can all be called together. - You will need to manually call all the
on_*methods at the appropriate locations in your loop. Like this:
Example:
callbacks = keras.callbacks.CallbackList([...]) callbacks.append(...) callbacks.on_train_begin(...) for epoch in range(EPOCHS): callbacks.on_epoch_begin(epoch) for i, data in dataset.enumerate(): callbacks.on_train_batch_begin(i) batch_logs = model.train_step(data) callbacks.on_train_batch_end(i, batch_logs) epoch_logs = ... callbacks.on_epoch_end(epoch, epoch_logs) final_logs=... callbacks.on_train_end(final_logs)Attributes
params- Dict. Training parameters (eg. verbosity, batch size, number of epochs…).
model- Instance of
Model. Reference of the model being trained.
The
logsdictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings).Expand source code
class MemoryCleaner(Callback): def on_epoch_end(self, epoch, logs=None): gc.collect() k.clear_session()Ancestors
- keras.src.callbacks.callback.Callback
Methods
def on_epoch_end(self, epoch, logs=None)-
Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only be called during TRAIN mode.
Args
epoch- Integer, index of epoch.
logs- Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with
val_. For training epoch, the values of theModel's metrics are returned. Example:{'loss': 0.2, 'accuracy': 0.7}.
Expand source code
def on_epoch_end(self, epoch, logs=None): gc.collect() k.clear_session()
- You should pack all your callbacks into a single