From e4e3c0e113471b886ebf1e6d7920377821461815 Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Sun, 2 Feb 2020 14:16:39 +0100 Subject: [PATCH 01/12] initial neptune logging added --- requirements-dev.txt | 1 + skorch/callbacks/__init__.py | 2 +- skorch/callbacks/logging.py | 134 ++++++++++++- skorch/tests/callbacks/test_logging.py | 253 ++++++++++++++++++++++++- 4 files changed, 380 insertions(+), 10 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index c6db1f74c..bc43d760b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ flaky future>=0.17.1 jupyter matplotlib>=2.0.2 +neptune-client>=0.4.103 numpydoc openpyxl pandas diff --git a/skorch/callbacks/__init__.py b/skorch/callbacks/__init__.py index f0f4f8edc..c5830b2a2 100644 --- a/skorch/callbacks/__init__.py +++ b/skorch/callbacks/__init__.py @@ -13,7 +13,7 @@ from .training import * from .lr_scheduler import * -__all__ = ['Callback', 'EpochTimer', 'PrintLog', 'ProgressBar', +__all__ = ['Callback', 'EpochTimer', 'NeptuneLogger', 'PrintLog', 'ProgressBar', 'LRScheduler', 'WarmRestartLR', 'GradientNormClipping', 'BatchScoring', 'EpochScoring', 'Checkpoint', 'EarlyStopping', 'Freezer', 'Unfreezer', 'Initializer', 'ParamMapper', diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index 4ba67e9ef..c16f7157c 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -14,8 +14,8 @@ from skorch.dataset import get_len from skorch.callbacks import Callback - -__all__ = ['EpochTimer', 'PrintLog', 'ProgressBar', 'TensorBoard'] +__all__ = ['EpochTimer', 'NeptuneLogger', 'PrintLog', 'ProgressBar', + 'TensorBoard'] def filter_log_keys(keys, keys_ignored=None): @@ -50,6 +50,7 @@ class EpochTimer(Callback): history with the name ``dur``. """ + def __init__(self, **kwargs): super(EpochTimer, self).__init__(**kwargs) @@ -62,6 +63,127 @@ def on_epoch_end(self, net, **kwargs): net.history.record('dur', time.time() - self.epoch_start_time_) +class NeptuneLogger(Callback): + """Logs results from history to Neptune + + "Neptune is a lightweight experiment tracking tool" (Neptune_) + + Use this callback to automatically log all interesting values from + your net's history to Neptune. + + The best way to log additional information is to log directly to the + experiment object or subclass the `on_*`` methods. + + Examples + -------- + >>> # Create a neptune experiment object + >>> neptune.init('neptune-ai/skorch-integration') + ... experiment = neptune.create_experiment( + ... name='skorch-basic-example', + ... params={'max_epochs': 20, + ... 'lr': 0.1}, + ... upload_source_files=['skorch_example.py']) + + >>> # Create a neptune_logger callback + >>> neptune_logger = NeptuneLogger(experiment, close_after_train=False) + + >>> # Pass a logger to net callbacks argument + >>> net = NeuralNetClassifier( + ... ClassifierModule, + ... max_epochs=20, + ... lr=0.01, + ... callbacks=[neptune_logger]) + + >>> # Log additional metrics after training has finished + >>> from sklearn.metrics import roc_auc_score + ... y_pred = net.predict_proba(X) + ... auc = roc_auc_score(y, y_pred[:, 1]) + ... + ... neptune_logger.experiment.log_metric('roc_auc_score', auc) + + >>> # log charts like ROC curve + ... from scikitplot.metrics import plot_roc + ... import matplotlib.pyplot as plt + ... + ... fig, ax = plt.subplots(figsize=(16, 12)) + ... plot_roc(y, y_pred, ax=ax) + ... neptune_logger.experiment.log_image('roc_curve', fig) + + >>> # log net object after training + ... net.save_params(f_params='basic_model.pkl') + ... neptune_logger.experiment.log_artifact('basic_model.pkl') + + >>> # close experiment + ... neptune_logger.experiment.stop() + + Parameters + ---------- + experiment : neptune.experiments.Experiment + Instantiated ``Experiment`` class. + + close_after_train : bool (default=True) + Whether to close the ``Experiment`` object once training + finishes. Set this parameter to False if you want to continue + logging to the same Experiment or if you use it as a context + manager. + + keys_ignored : str or list of str (default=None) + Key or list of keys that should not be logged to + Neptune. Note that in addition to the keys provided by the + user. + + Note + ---- + + Install psutil to monitor resource consumption + + > pip install psutil + + .. _Neptune: https://www.neptune.ai + + """ + + def __init__( + self, + experiment, + close_after_train=True, + keys_ignored=None, + ): + self.experiment = experiment + self.close_after_train = close_after_train + self.keys_ignored = keys_ignored + + def initialize(self): + self.first_batch_ = True + + keys_ignored = self.keys_ignored + if isinstance(keys_ignored, str): + keys_ignored = [keys_ignored] + self.keys_ignored_ = set(keys_ignored or []) + self.keys_ignored_.add('batches') + self.keys_ignored_.add('epoch') + return self + + def on_batch_end(self, net, **kwargs): + batch_logs = net.history[-1]['batches'][-1] + + for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_): + self.experiment.log_metric(key, batch_logs[key]) + + def on_epoch_end(self, net, **kwargs): + """Automatically log values from the last history step.""" + history = net.history + epoch_logs = history[-1] + epoch = epoch_logs['epoch'] + + for key in filter_log_keys(epoch_logs.keys(), self.keys_ignored_): + self.experiment.log_metric(key, x=epoch, y=epoch_logs[key]) + + def on_train_end(self, net, **kwargs): + if self.close_after_train: + self.experiment.stop() + + class PrintLog(Callback): """Print useful information from the model's history as a table. @@ -114,6 +236,7 @@ class PrintLog(Callback): be consistent with numerical columns). """ + def __init__( self, keys_ignored=None, @@ -178,7 +301,8 @@ def _sorted_keys(self, keys): sorted_keys.append('epoch') # ignore keys like *_best or event_* - for key in filter_log_keys(sorted(keys), keys_ignored=self.keys_ignored_): + for key in filter_log_keys(sorted(keys), + keys_ignored=self.keys_ignored_): if key != 'dur': sorted_keys.append(key) @@ -332,7 +456,8 @@ def on_batch_end(self, net, **kwargs): self.pbar.update() # pylint: disable=attribute-defined-outside-init, arguments-differ - def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, **kwargs): + def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, + **kwargs): # Assume it is a number until proven otherwise. batches_per_epoch = self.batches_per_epoch @@ -421,6 +546,7 @@ class TensorBoard(Callback): .. _tensorboard: https://www.tensorflow.org/tensorboard/ """ + def __init__( self, writer, diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index 94dc32e23..b4bf642ca 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -13,6 +13,245 @@ from skorch.tests.conftest import tensorboard_installed +@pytest.mark.skipif( + not neptune_installed, reason='neptune is not installed') +class TestNeptune: + @pytest.fixture + def net_cls(self): + from skorch import NeuralNetClassifier + return NeuralNetClassifier + + @pytest.fixture + def data(self, classifier_data): + X, y = classifier_data + # accelerate training since we don't care for the loss + X, y = X[:40], y[:40] + return X, y + + @pytest.fixture + def tensorboard_cls(self): + from skorch.callbacks import TensorBoard + return TensorBoard + + @pytest.fixture + def summary_writer_cls(self): + from torch.utils.tensorboard import SummaryWriter + return SummaryWriter + + @pytest.fixture + def mock_writer(self, summary_writer_cls): + mock = Mock(spec=summary_writer_cls) + return mock + + @pytest.fixture + def net_fitted( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + mock_writer, + ): + return net_cls( + classifier_module, + callbacks=[tensorboard_cls(mock_writer)], + max_epochs=3, + ).fit(*data) + + @pytest.mark.skipif( + True, reason="Waiting for proper implementation of graph tracing") + def test_graph_added_once(self, net_fitted, mock_writer): + # graph should just be added once + assert mock_writer.add_graph.call_count == 1 + + @pytest.mark.skipif( + True, reason="Waiting for proper implementation of graph tracing") + def test_include_graph_false( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + mock_writer, + ): + net_cls( + classifier_module, + callbacks=[tensorboard_cls(mock_writer, include_graph=False)], + max_epochs=2, + ).fit(*data) + assert mock_writer.add_graph.call_count == 0 + + def test_writer_closed_automatically(self, net_fitted, mock_writer): + assert mock_writer.close.call_count == 1 + + def test_writer_not_closed( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + mock_writer, + ): + net_cls( + classifier_module, + callbacks=[tensorboard_cls(mock_writer, close_after_train=False)], + max_epochs=2, + ).fit(*data) + assert mock_writer.close.call_count == 0 + + def test_keys_from_history_logged(self, net_fitted, mock_writer): + add_scalar = mock_writer.add_scalar + + # 3 epochs with 4 keys + assert add_scalar.call_count == 3 * 4 + keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} + expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', + 'Loss/valid_acc'} + assert keys == expected + + def test_ignore_keys( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + mock_writer, + ): + # ignore 'dur' and 'valid_loss', 'unknown' doesn't exist but + # this should not cause a problem + tb = tensorboard_cls( + mock_writer, keys_ignored=['dur', 'valid_loss', 'unknown']) + net_cls( + classifier_module, + callbacks=[tb], + max_epochs=3, + ).fit(*data) + add_scalar = mock_writer.add_scalar + + keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} + expected = {'Loss/train_loss', 'Loss/valid_acc'} + assert keys == expected + + def test_keys_ignored_is_string(self, tensorboard_cls, mock_writer): + tb = tensorboard_cls(mock_writer, keys_ignored='a-key').initialize() + expected = {'a-key', 'batches'} + assert tb.keys_ignored_ == expected + + def test_other_key_mapper( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + mock_writer, + ): + # just map all keys to uppercase + tb = tensorboard_cls(mock_writer, key_mapper=lambda s: s.upper()) + net_cls( + classifier_module, + callbacks=[tb], + max_epochs=3, + ).fit(*data) + add_scalar = mock_writer.add_scalar + + keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} + expected = {'DUR', 'TRAIN_LOSS', 'VALID_LOSS', 'VALID_ACC'} + assert keys == expected + + @pytest.fixture + def add_scalar_maybe(self, tensorboard_cls, mock_writer): + tb = tensorboard_cls(mock_writer) + return tb.add_scalar_maybe + + @pytest.fixture + def history(self): + return [ + {'loss': 0.1, 'epoch': 1, 'foo': ['invalid', 'type']}, + {'loss': 0.2, 'epoch': 2, 'foo': ['invalid', 'type']}, + ] + + def test_add_scalar_maybe_uses_last_epoch_values( + self, add_scalar_maybe, mock_writer, history): + add_scalar_maybe(history, key='loss', tag='myloss', global_step=2) + call_kwargs = mock_writer.add_scalar.call_args_list[0][1] + assert call_kwargs['tag'] == 'myloss' + assert call_kwargs['scalar_value'] == 0.2 + assert call_kwargs['global_step'] == 2 + + def test_add_scalar_maybe_infers_epoch( + self, add_scalar_maybe, mock_writer, history): + # don't indicate 'global_step' value + add_scalar_maybe(history, key='loss', tag='myloss') + call_kwargs = mock_writer.add_scalar.call_args_list[0][1] + assert call_kwargs['global_step'] == 2 + + def test_add_scalar_maybe_unknown_key_does_not_raise( + self, tensorboard_cls, summary_writer_cls, history): + tb = tensorboard_cls(summary_writer_cls()) + # does not raise: + tb.add_scalar_maybe(history, key='unknown', tag='bar') + + def test_add_scalar_maybe_wrong_type_does_not_raise( + self, tensorboard_cls, summary_writer_cls, history): + tb = tensorboard_cls(summary_writer_cls()) + # value of 'foo' is a list but that does not raise: + tb.add_scalar_maybe(history, key='foo', tag='bar') + + def test_fit_with_real_summary_writer( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + summary_writer_cls, + tmp_path, + ): + path = str(tmp_path) + + net = net_cls( + classifier_module, + callbacks=[tensorboard_cls(summary_writer_cls(path))], + max_epochs=5, + ) + net.fit(*data) + + # is not empty + assert os.listdir(path) + + def test_fit_with_dict_input( + self, + net_cls, + classifier_module, + data, + tensorboard_cls, + summary_writer_cls, + tmp_path, + ): + from skorch.toy import MLPModule + path = str(tmp_path) + X, y = data + + # create a dictionary with unordered keys + X_dict = {k: X[:, i:i + 4] for k, i in + zip('cebad', range(0, X.shape[1], 4))} + + class MyModule(MLPModule): + # use different order for args here + def forward(self, b, e, c, d, a, **kwargs): + X = torch.cat((b, e, c, d, a), 1) + return super().forward(X, **kwargs) + + net = net_cls( + MyModule(output_nonlin=nn.Softmax(dim=-1)), + callbacks=[tensorboard_cls(summary_writer_cls(path))], + max_epochs=5, + ) + net.fit(X_dict, y) + + # is not empty + assert os.listdir(path) + + class TestPrintLog: @pytest.fixture def print_log_cls(self): @@ -42,6 +281,7 @@ def odd_epoch_callback(self): class OddEpochCallback(Callback): def on_epoch_end(self, net, **kwargs): net.history[-1]['event_odd'] = bool(len(net.history) % 2) + return OddEpochCallback().initialize() @pytest.fixture @@ -174,8 +414,8 @@ def test_with_event_key(self, history, print_log_cls): odd_row = print_log.sink.call_args_list[2][0][0].split() even_row = print_log.sink.call_args_list[3][0][0].split() - assert len(odd_row) == 6 # odd row has entries in every column - assert odd_row[4] == '+' # including '+' sign for the 'event_odd' + assert len(odd_row) == 6 # odd row has entries in every column + assert odd_row[4] == '+' # including '+' sign for the 'event_odd' assert len(even_row) == 5 # even row does not have 'event_odd' entry def test_witout_valid_data( @@ -273,7 +513,8 @@ def test_invalid_postfix(self, postfix, net_cls, progressbar_cls, data): (1, [1, 1]), # offset by -1, should still work ]) def test_different_count_schemes( - self, tqdm_mock, scheme, expected_total, net_cls, progressbar_cls, data): + self, tqdm_mock, scheme, expected_total, net_cls, progressbar_cls, + data): net = net_cls(callbacks=[ progressbar_cls(batches_per_epoch=scheme), ]) @@ -375,7 +616,8 @@ def test_keys_from_history_logged(self, net_fitted, mock_writer): # 3 epochs with 4 keys assert add_scalar.call_count == 3 * 4 keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} - expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', 'Loss/valid_acc'} + expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', + 'Loss/valid_acc'} assert keys == expected def test_ignore_keys( @@ -501,7 +743,8 @@ def test_fit_with_dict_input( X, y = data # create a dictionary with unordered keys - X_dict = {k: X[:, i:i+4] for k, i in zip('cebad', range(0, X.shape[1], 4))} + X_dict = {k: X[:, i:i + 4] for k, i in + zip('cebad', range(0, X.shape[1], 4))} class MyModule(MLPModule): # use different order for args here From 7387aed6498969c50e4046995f81ff9a68712509 Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Mon, 3 Feb 2020 13:35:11 +0100 Subject: [PATCH 02/12] added neptune logging tests --- skorch/callbacks/logging.py | 1 - skorch/tests/callbacks/test_logging.py | 183 ++++++------------------- skorch/tests/conftest.py | 7 + 3 files changed, 47 insertions(+), 144 deletions(-) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index c16f7157c..966506c57 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -161,7 +161,6 @@ def initialize(self): keys_ignored = [keys_ignored] self.keys_ignored_ = set(keys_ignored or []) self.keys_ignored_.add('batches') - self.keys_ignored_.add('epoch') return self def on_batch_end(self, net, **kwargs): diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index b4bf642ca..b87d8641e 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -10,7 +10,7 @@ import torch from torch import nn -from skorch.tests.conftest import tensorboard_installed +from skorch.tests.conftest import neptune_installed, tensorboard_installed @pytest.mark.skipif( @@ -29,18 +29,22 @@ def data(self, classifier_data): return X, y @pytest.fixture - def tensorboard_cls(self): - from skorch.callbacks import TensorBoard - return TensorBoard + def neptune_logger_cls(self): + from skorch.callbacks import NeptuneLogger + return NeptuneLogger @pytest.fixture - def summary_writer_cls(self): - from torch.utils.tensorboard import SummaryWriter - return SummaryWriter + def neptune_experiment_cls(self): + import neptune + neptune.init(project_qualified_name="tests/dry-run", + backend=neptune.OfflineBackend()) + return neptune.create_experiment @pytest.fixture - def mock_writer(self, summary_writer_cls): - mock = Mock(spec=summary_writer_cls) + def mock_experiment(self, neptune_experiment_cls): + mock = Mock(spec=neptune_experiment_cls) + mock.log_metric = Mock() + mock.stop = Mock() return mock @pytest.fixture @@ -49,186 +53,82 @@ def net_fitted( net_cls, classifier_module, data, - tensorboard_cls, - mock_writer, + neptune_logger_cls, + mock_experiment, ): return net_cls( classifier_module, - callbacks=[tensorboard_cls(mock_writer)], + callbacks=[neptune_logger_cls(mock_experiment)], max_epochs=3, ).fit(*data) - @pytest.mark.skipif( - True, reason="Waiting for proper implementation of graph tracing") - def test_graph_added_once(self, net_fitted, mock_writer): - # graph should just be added once - assert mock_writer.add_graph.call_count == 1 + def test_experiment_closed_automatically(self, net_fitted, mock_experiment): + assert mock_experiment.stop.call_count == 1 - @pytest.mark.skipif( - True, reason="Waiting for proper implementation of graph tracing") - def test_include_graph_false( + def test_experiment_not_closed( self, net_cls, classifier_module, data, - tensorboard_cls, - mock_writer, + neptune_logger_cls, + mock_experiment, ): net_cls( classifier_module, - callbacks=[tensorboard_cls(mock_writer, include_graph=False)], + callbacks=[ + neptune_logger_cls(mock_experiment, close_after_train=False)], max_epochs=2, ).fit(*data) - assert mock_writer.add_graph.call_count == 0 - - def test_writer_closed_automatically(self, net_fitted, mock_writer): - assert mock_writer.close.call_count == 1 - - def test_writer_not_closed( - self, - net_cls, - classifier_module, - data, - tensorboard_cls, - mock_writer, - ): - net_cls( - classifier_module, - callbacks=[tensorboard_cls(mock_writer, close_after_train=False)], - max_epochs=2, - ).fit(*data) - assert mock_writer.close.call_count == 0 - - def test_keys_from_history_logged(self, net_fitted, mock_writer): - add_scalar = mock_writer.add_scalar - - # 3 epochs with 4 keys - assert add_scalar.call_count == 3 * 4 - keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} - expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', - 'Loss/valid_acc'} - assert keys == expected + assert mock_experiment.stop.call_count == 0 def test_ignore_keys( self, net_cls, classifier_module, data, - tensorboard_cls, - mock_writer, + neptune_logger_cls, + mock_experiment, ): # ignore 'dur' and 'valid_loss', 'unknown' doesn't exist but # this should not cause a problem - tb = tensorboard_cls( - mock_writer, keys_ignored=['dur', 'valid_loss', 'unknown']) + npt = neptune_logger_cls( + mock_experiment, keys_ignored=['dur', 'valid_loss', 'unknown']) net_cls( classifier_module, - callbacks=[tb], + callbacks=[npt], max_epochs=3, ).fit(*data) - add_scalar = mock_writer.add_scalar - keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} - expected = {'Loss/train_loss', 'Loss/valid_acc'} - assert keys == expected - - def test_keys_ignored_is_string(self, tensorboard_cls, mock_writer): - tb = tensorboard_cls(mock_writer, keys_ignored='a-key').initialize() + def test_keys_ignored_is_string(self, neptune_logger_cls, mock_experiment): + npt = neptune_logger_cls(mock_experiment, + keys_ignored='a-key').initialize() expected = {'a-key', 'batches'} - assert tb.keys_ignored_ == expected - - def test_other_key_mapper( - self, - net_cls, - classifier_module, - data, - tensorboard_cls, - mock_writer, - ): - # just map all keys to uppercase - tb = tensorboard_cls(mock_writer, key_mapper=lambda s: s.upper()) - net_cls( - classifier_module, - callbacks=[tb], - max_epochs=3, - ).fit(*data) - add_scalar = mock_writer.add_scalar - - keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} - expected = {'DUR', 'TRAIN_LOSS', 'VALID_LOSS', 'VALID_ACC'} - assert keys == expected - - @pytest.fixture - def add_scalar_maybe(self, tensorboard_cls, mock_writer): - tb = tensorboard_cls(mock_writer) - return tb.add_scalar_maybe - - @pytest.fixture - def history(self): - return [ - {'loss': 0.1, 'epoch': 1, 'foo': ['invalid', 'type']}, - {'loss': 0.2, 'epoch': 2, 'foo': ['invalid', 'type']}, - ] - - def test_add_scalar_maybe_uses_last_epoch_values( - self, add_scalar_maybe, mock_writer, history): - add_scalar_maybe(history, key='loss', tag='myloss', global_step=2) - call_kwargs = mock_writer.add_scalar.call_args_list[0][1] - assert call_kwargs['tag'] == 'myloss' - assert call_kwargs['scalar_value'] == 0.2 - assert call_kwargs['global_step'] == 2 + assert npt.keys_ignored_ == expected - def test_add_scalar_maybe_infers_epoch( - self, add_scalar_maybe, mock_writer, history): - # don't indicate 'global_step' value - add_scalar_maybe(history, key='loss', tag='myloss') - call_kwargs = mock_writer.add_scalar.call_args_list[0][1] - assert call_kwargs['global_step'] == 2 - - def test_add_scalar_maybe_unknown_key_does_not_raise( - self, tensorboard_cls, summary_writer_cls, history): - tb = tensorboard_cls(summary_writer_cls()) - # does not raise: - tb.add_scalar_maybe(history, key='unknown', tag='bar') - - def test_add_scalar_maybe_wrong_type_does_not_raise( - self, tensorboard_cls, summary_writer_cls, history): - tb = tensorboard_cls(summary_writer_cls()) - # value of 'foo' is a list but that does not raise: - tb.add_scalar_maybe(history, key='foo', tag='bar') - - def test_fit_with_real_summary_writer( + def test_fit_with_real_experiment( self, net_cls, classifier_module, data, - tensorboard_cls, - summary_writer_cls, - tmp_path, + neptune_logger_cls, + neptune_experiment_cls, ): - path = str(tmp_path) - net = net_cls( classifier_module, - callbacks=[tensorboard_cls(summary_writer_cls(path))], + callbacks=[neptune_logger_cls(neptune_experiment_cls())], max_epochs=5, ) net.fit(*data) - # is not empty - assert os.listdir(path) - def test_fit_with_dict_input( self, net_cls, classifier_module, data, - tensorboard_cls, - summary_writer_cls, - tmp_path, + neptune_logger_cls, + neptune_experiment_cls, ): from skorch.toy import MLPModule - path = str(tmp_path) X, y = data # create a dictionary with unordered keys @@ -243,14 +143,11 @@ def forward(self, b, e, c, d, a, **kwargs): net = net_cls( MyModule(output_nonlin=nn.Softmax(dim=-1)), - callbacks=[tensorboard_cls(summary_writer_cls(path))], + callbacks=[neptune_logger_cls(neptune_experiment_cls())], max_epochs=5, ) net.fit(X_dict, y) - # is not empty - assert os.listdir(path) - class TestPrintLog: @pytest.fixture diff --git a/skorch/tests/conftest.py b/skorch/tests/conftest.py index 875b126fb..46eb7f40f 100644 --- a/skorch/tests/conftest.py +++ b/skorch/tests/conftest.py @@ -125,6 +125,13 @@ def data(): y = np.array([-1, 0, 5, 4]).astype(np.float32).reshape(-1, 1) return X, y +neptune_installed = False +try: + # pylint: disable=unused-import + import neptune + neptune_installed = True +except ImportError: + pass pandas_installed = False try: From 114284e9d0a3a912c2ebbd7c1ba0c258fc50376c Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Mon, 3 Feb 2020 13:54:40 +0100 Subject: [PATCH 03/12] updated link --- skorch/callbacks/logging.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index 966506c57..c185e770e 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -66,7 +66,8 @@ def on_epoch_end(self, net, **kwargs): class NeptuneLogger(Callback): """Logs results from history to Neptune - "Neptune is a lightweight experiment tracking tool" (Neptune_) + Neptune is a lightweight experiment tracking tool. + You can read more about it here: https://neptune.ai Use this callback to automatically log all interesting values from your net's history to Neptune. @@ -74,6 +75,10 @@ class NeptuneLogger(Callback): The best way to log additional information is to log directly to the experiment object or subclass the `on_*`` methods. + To monitor resource consumption install psutil + + >>> pip install psutil + Examples -------- >>> # Create a neptune experiment object @@ -139,8 +144,6 @@ class NeptuneLogger(Callback): > pip install psutil - .. _Neptune: https://www.neptune.ai - """ def __init__( From 91602af1bf2d3e23469a32b028551dc10db32684 Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Mon, 3 Feb 2020 14:00:52 +0100 Subject: [PATCH 04/12] added link to an example experiment ru --- skorch/callbacks/logging.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index c185e770e..80963e707 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -79,6 +79,9 @@ class NeptuneLogger(Callback): >>> pip install psutil + You can view example experiment logs here: + https://ui.neptune.ai/o/neptune-ai/org/skorch-integration/e/SKOR-24/charts + Examples -------- >>> # Create a neptune experiment object From af9d1658e9324350861db621995195ff8546f3c7 Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Mon, 3 Feb 2020 14:48:49 +0100 Subject: [PATCH 05/12] fixed formatting --- skorch/callbacks/logging.py | 14 +++----------- skorch/tests/callbacks/test_logging.py | 9 +++------ 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index 80963e707..3714c6a0b 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -14,8 +14,7 @@ from skorch.dataset import get_len from skorch.callbacks import Callback -__all__ = ['EpochTimer', 'NeptuneLogger', 'PrintLog', 'ProgressBar', - 'TensorBoard'] +__all__ = ['EpochTimer', 'NeptuneLogger', 'PrintLog', 'ProgressBar', 'TensorBoard'] def filter_log_keys(keys, keys_ignored=None): @@ -50,7 +49,6 @@ class EpochTimer(Callback): history with the name ``dur``. """ - def __init__(self, **kwargs): super(EpochTimer, self).__init__(**kwargs) @@ -148,7 +146,6 @@ class NeptuneLogger(Callback): > pip install psutil """ - def __init__( self, experiment, @@ -241,7 +238,6 @@ class PrintLog(Callback): be consistent with numerical columns). """ - def __init__( self, keys_ignored=None, @@ -306,8 +302,7 @@ def _sorted_keys(self, keys): sorted_keys.append('epoch') # ignore keys like *_best or event_* - for key in filter_log_keys(sorted(keys), - keys_ignored=self.keys_ignored_): + for key in filter_log_keys(sorted(keys), keys_ignored=self.keys_ignored_): if key != 'dur': sorted_keys.append(key) @@ -411,7 +406,6 @@ class ProgressBar(Callback): >>> net.history[-1, 'batches', -1, key] """ - def __init__( self, batches_per_epoch='auto', @@ -461,8 +455,7 @@ def on_batch_end(self, net, **kwargs): self.pbar.update() # pylint: disable=attribute-defined-outside-init, arguments-differ - def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, - **kwargs): + def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, **kwargs): # Assume it is a number until proven otherwise. batches_per_epoch = self.batches_per_epoch @@ -551,7 +544,6 @@ class TensorBoard(Callback): .. _tensorboard: https://www.tensorflow.org/tensorboard/ """ - def __init__( self, writer, diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index b87d8641e..575477679 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -410,8 +410,7 @@ def test_invalid_postfix(self, postfix, net_cls, progressbar_cls, data): (1, [1, 1]), # offset by -1, should still work ]) def test_different_count_schemes( - self, tqdm_mock, scheme, expected_total, net_cls, progressbar_cls, - data): + self, tqdm_mock, scheme, expected_total, net_cls, progressbar_cls, data): net = net_cls(callbacks=[ progressbar_cls(batches_per_epoch=scheme), ]) @@ -513,8 +512,7 @@ def test_keys_from_history_logged(self, net_fitted, mock_writer): # 3 epochs with 4 keys assert add_scalar.call_count == 3 * 4 keys = {call_args[1]['tag'] for call_args in add_scalar.call_args_list} - expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', - 'Loss/valid_acc'} + expected = {'dur', 'Loss/train_loss', 'Loss/valid_loss', 'Loss/valid_acc'} assert keys == expected def test_ignore_keys( @@ -640,8 +638,7 @@ def test_fit_with_dict_input( X, y = data # create a dictionary with unordered keys - X_dict = {k: X[:, i:i + 4] for k, i in - zip('cebad', range(0, X.shape[1], 4))} + X_dict = {k: X[:, i:i + 4] for k, i in zip('cebad', range(0, X.shape[1], 4))} class MyModule(MLPModule): # use different order for args here From 58df27540de41499bcdfd5fbe3fdf0e36c842aa4 Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Tue, 4 Feb 2020 11:30:22 +0100 Subject: [PATCH 06/12] changed api token to the one for anonymous user --- skorch/callbacks/logging.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index 3714c6a0b..c9550c1fc 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -78,16 +78,20 @@ class NeptuneLogger(Callback): >>> pip install psutil You can view example experiment logs here: - https://ui.neptune.ai/o/neptune-ai/org/skorch-integration/e/SKOR-24/charts + https://ui.neptune.ai/o/shared/org/skorch-integration/e/SKOR-4/logs Examples -------- >>> # Create a neptune experiment object - >>> neptune.init('neptune-ai/skorch-integration') + ... # We are using api token for an anonymous user. + ... # For your projects use the token associated with your neptune.ai account + >>> neptune.init(api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5tbCIsImFwaV9rZXkiOiJiNzA2YmM4Zi03NmY5LTRjMmUtOTM5ZC00YmEwMzZmOTMyZTQifQ==', + ... project_qualified_name='shared/skorch-integration') + ... ... experiment = neptune.create_experiment( ... name='skorch-basic-example', ... params={'max_epochs': 20, - ... 'lr': 0.1}, + ... 'lr': 0.01}, ... upload_source_files=['skorch_example.py']) >>> # Create a neptune_logger callback From efc2af0b7f568a3b2d721837d4e71cb469183791 Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Fri, 7 Feb 2020 09:41:15 +0100 Subject: [PATCH 07/12] local pre-merge --- CHANGES.md | 2 ++ skorch/callbacks/logging.py | 30 +++++++++------- skorch/tests/callbacks/test_logging.py | 50 ++++++++------------------ skorch/tests/conftest.py | 21 +++++++++-- 4 files changed, 54 insertions(+), 49 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 033e453b5..3c70c8585 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Added `NeptuneLogger` callback for logging experiment metadata to neptune.ai + ### Changed - When using caching in scoring callbacks, no longer uselessly iterate over the data; this can save time if iteration is slow (#552, #557) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index c16f7157c..1c342b6d6 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -72,11 +72,17 @@ class NeptuneLogger(Callback): your net's history to Neptune. The best way to log additional information is to log directly to the - experiment object or subclass the `on_*`` methods. + experiment object or subclass the ``on_*`` methods. Examples -------- + >>> # Install neptune + >>> pip install neptune-client + >>> # To monitor resource consumption install psutil (optional) + >>> pip install psutil >>> # Create a neptune experiment object + >>> import neptune + ... >>> neptune.init('neptune-ai/skorch-integration') ... experiment = neptune.create_experiment( ... name='skorch-basic-example', @@ -121,6 +127,9 @@ class NeptuneLogger(Callback): experiment : neptune.experiments.Experiment Instantiated ``Experiment`` class. + log_on_batch_end : bool (default=False) + Whether to log loss and other metrics on batch level. + close_after_train : bool (default=True) Whether to close the ``Experiment`` object once training finishes. Set this parameter to False if you want to continue @@ -130,14 +139,8 @@ class NeptuneLogger(Callback): keys_ignored : str or list of str (default=None) Key or list of keys that should not be logged to Neptune. Note that in addition to the keys provided by the - user. - - Note - ---- - - Install psutil to monitor resource consumption - - > pip install psutil + user, keys such as those starting with 'event_' or ending on + '_best' are ignored by default. .. _Neptune: https://www.neptune.ai @@ -146,10 +149,12 @@ class NeptuneLogger(Callback): def __init__( self, experiment, + log_on_batch_end=False, close_after_train=True, keys_ignored=None, ): self.experiment = experiment + self.log_on_batch_end = log_on_batch_end self.close_after_train = close_after_train self.keys_ignored = keys_ignored @@ -165,10 +170,11 @@ def initialize(self): return self def on_batch_end(self, net, **kwargs): - batch_logs = net.history[-1]['batches'][-1] + if self.log_on_batch_end: + batch_logs = net.history[-1]['batches'][-1] - for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_): - self.experiment.log_metric(key, batch_logs[key]) + for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_): + self.experiment.log_metric(key, batch_logs[key]) def on_epoch_end(self, net, **kwargs): """Automatically log values from the last history step.""" diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index b4bf642ca..b7824d1c3 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -10,7 +10,7 @@ import torch from torch import nn -from skorch.tests.conftest import tensorboard_installed +from skorch.tests.conftest import neptune_installed, tensorboard_installed @pytest.mark.skipif( @@ -29,18 +29,21 @@ def data(self, classifier_data): return X, y @pytest.fixture - def tensorboard_cls(self): - from skorch.callbacks import TensorBoard - return TensorBoard + def neptune_logger_cls(self): + from skorch.callbacks import NeptuneLogger + return NeptuneLogger @pytest.fixture - def summary_writer_cls(self): - from torch.utils.tensorboard import SummaryWriter - return SummaryWriter + def neptune_experiment_cls(self): + import neptune + neptune.init(project_qualified_name="dry-run/project", + backend=neptune.OfflineBackend()) + experiment = neptune.create_experiment() + return experiment @pytest.fixture - def mock_writer(self, summary_writer_cls): - mock = Mock(spec=summary_writer_cls) + def mock_experiment(self, neptune_experiment_cls): + mock = Mock(spec=neptune_experiment_cls) return mock @pytest.fixture @@ -49,38 +52,15 @@ def net_fitted( net_cls, classifier_module, data, - tensorboard_cls, - mock_writer, + neptune_logger_cls, + mock_experiment, ): return net_cls( classifier_module, - callbacks=[tensorboard_cls(mock_writer)], + callbacks=[neptune_logger_cls(mock_experiment)], max_epochs=3, ).fit(*data) - @pytest.mark.skipif( - True, reason="Waiting for proper implementation of graph tracing") - def test_graph_added_once(self, net_fitted, mock_writer): - # graph should just be added once - assert mock_writer.add_graph.call_count == 1 - - @pytest.mark.skipif( - True, reason="Waiting for proper implementation of graph tracing") - def test_include_graph_false( - self, - net_cls, - classifier_module, - data, - tensorboard_cls, - mock_writer, - ): - net_cls( - classifier_module, - callbacks=[tensorboard_cls(mock_writer, include_graph=False)], - max_epochs=2, - ).fit(*data) - assert mock_writer.add_graph.call_count == 0 - def test_writer_closed_automatically(self, net_fitted, mock_writer): assert mock_writer.close.call_count == 1 diff --git a/skorch/tests/conftest.py b/skorch/tests/conftest.py index 875b126fb..e1ce233a3 100644 --- a/skorch/tests/conftest.py +++ b/skorch/tests/conftest.py @@ -8,8 +8,8 @@ from sklearn.datasets import make_regression from sklearn.preprocessing import StandardScaler from torch import nn -F = nn.functional +F = nn.functional INFERENCE_METHODS = ['predict', 'predict_proba', 'forward', 'forward_iter'] @@ -25,6 +25,7 @@ def module_cls(): This module returns the input without modifying it. """ + class MyModule(nn.Module): def __init__(self): super(MyModule, self).__init__() @@ -34,6 +35,7 @@ def __init__(self): def forward(self, X): X = X + 0.0 * self.dense(X) return X + return MyModule @@ -52,8 +54,10 @@ def classifier_module(): @pytest.fixture(scope='module') def multiouput_module(): """Return a simple classifier module class.""" + class MultiOutput(nn.Module): """Simple classification module.""" + def __init__(self, input_units=20): super(MultiOutput, self).__init__() self.output = nn.Linear(input_units, 2) @@ -95,9 +99,11 @@ def multioutput_regression_data(): @pytest.fixture def score55(): """Simple scoring function.""" + # pylint: disable=unused-argument def func(est, X, y, foo=123): return 55 + func.__name__ = 'score55' return func @@ -109,6 +115,7 @@ def func(dataset, y): ds_train = type(dataset)(dataset.X[:2], dataset.y[:2]) ds_valid = type(dataset)(dataset.X[2:], dataset.y[2:]) return ds_train, ds_valid + return func @@ -126,19 +133,29 @@ def data(): return X, y +neptune_installed = False +try: + # pylint: disable=unused-import + import neptune + + neptune_installed = True +except ImportError: + pass + pandas_installed = False try: # pylint: disable=unused-import import pandas + pandas_installed = True except ImportError: pass - tensorboard_installed = False try: # pylint: disable=unused-import import tensorboard + tensorboard_installed = True except ImportError: pass From dc2e8e6b8a3376b079fe2a3664372cda0dd7e857 Mon Sep 17 00:00:00 2001 From: Jakub Czakon Date: Mon, 10 Feb 2020 12:58:16 +0100 Subject: [PATCH 08/12] added batch-level test --- skorch/tests/callbacks/test_logging.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index 2f24fa584..fe3beabc5 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -3,7 +3,7 @@ from functools import partial import os from unittest.mock import Mock -from unittest.mock import patch +from unittest.mock import call, patch import numpy as np import pytest @@ -120,6 +120,24 @@ def test_fit_with_real_experiment( ) net.fit(*data) + def test_log_on_batch_level( + self, + net_cls, + classifier_module, + data, + neptune_logger_cls, + mock_experiment, + ): + net = net_cls( + classifier_module, + callbacks=[neptune_logger_cls(mock_experiment, log_on_batch_end=True)], + max_epochs=5, + batch_size=4, + ) + net.fit(*data) + assert mock_experiment.log_metric.call_count == 130 + mock_experiment.log_metric.assert_any_call('valid_batch_size', 1) + class TestPrintLog: @pytest.fixture From a971f37c4f5668c372234cfdf6ff7d37fc598a3c Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Tue, 11 Feb 2020 09:32:14 +0100 Subject: [PATCH 09/12] switched to train_split=False for simplicity, added notes on call counts, added test for when batch log is off --- skorch/tests/callbacks/test_logging.py | 31 +++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index fe3beabc5..e5da78d3f 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -120,7 +120,7 @@ def test_fit_with_real_experiment( ) net.fit(*data) - def test_log_on_batch_level( + def test_log_on_batch_level_on( self, net_cls, classifier_module, @@ -133,10 +133,35 @@ def test_log_on_batch_level( callbacks=[neptune_logger_cls(mock_experiment, log_on_batch_end=True)], max_epochs=5, batch_size=4, + train_split=False ) net.fit(*data) - assert mock_experiment.log_metric.call_count == 130 - mock_experiment.log_metric.assert_any_call('valid_batch_size', 1) + + # 5 epochs x (40/4 batches x 2 batch metrics + 2 epoch metrics) = 110 calls + assert mock_experiment.log_metric.call_count == 110 + mock_experiment.log_metric.assert_any_call('train_batch_size', 4) + + def test_log_on_batch_level_off( + self, + net_cls, + classifier_module, + data, + neptune_logger_cls, + mock_experiment, + ): + net = net_cls( + classifier_module, + callbacks=[neptune_logger_cls(mock_experiment, log_on_batch_end=False)], + max_epochs=5, + batch_size=4, + train_split=False + ) + net.fit(*data) + + # 5 epochs x 2 epoch metrics = 10 calls + assert mock_experiment.log_metric.call_count == 10 + assert call('train_batch_size', 4) \ + not in mock_experiment.log_metric.call_args_list class TestPrintLog: From b30cd3ad0ffb0c1f7f45c4c3299af539f507e7e1 Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Fri, 14 Feb 2020 14:52:11 +0100 Subject: [PATCH 10/12] fixed first_batch_ attr, added assert on log_metric call count when keys are ignored --- skorch/callbacks/logging.py | 2 ++ skorch/tests/callbacks/test_logging.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index f18ed2d16..bca10c68b 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -183,6 +183,8 @@ def on_batch_end(self, net, **kwargs): for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_): self.experiment.log_metric(key, batch_logs[key]) + self.first_batch_ = False + def on_epoch_end(self, net, **kwargs): """Automatically log values from the last history step.""" history = net.history diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index e5da78d3f..55990b16f 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -99,6 +99,11 @@ def test_ignore_keys( max_epochs=3, ).fit(*data) + # 3 epochs x 2 epoch metrics = 6 calls + assert mock_experiment.log_metric.call_count == 6 + assert 'valid_loss' not in [call_args[0][0] + for call_args in mock_experiment.log_metric.call_args_list] + def test_keys_ignored_is_string(self, neptune_logger_cls, mock_experiment): npt = neptune_logger_cls(mock_experiment, keys_ignored='a-key').initialize() From bc763d88532219e2108abd030cc6989f929eb729 Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Sat, 15 Feb 2020 23:09:47 +0100 Subject: [PATCH 11/12] added docstring and test for first_batch_ attribute --- skorch/callbacks/logging.py | 7 +++++++ skorch/tests/callbacks/test_logging.py | 20 ++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/skorch/callbacks/logging.py b/skorch/callbacks/logging.py index bca10c68b..5b5ea071e 100644 --- a/skorch/callbacks/logging.py +++ b/skorch/callbacks/logging.py @@ -150,6 +150,13 @@ class NeptuneLogger(Callback): user, keys such as those starting with 'event_' or ending on '_best' are ignored by default. + Attributes + ---------- + first_batch_ : bool + Helper attribute that is set to True at initialization and changes + to False on first batch end. Can be used when we want to log things + exactly once. + .. _Neptune: https://www.neptune.ai """ diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index 55990b16f..e5d5d1458 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -168,6 +168,26 @@ def test_log_on_batch_level_off( assert call('train_batch_size', 4) \ not in mock_experiment.log_metric.call_args_list + def test_first_batch_flag( + self, + net_cls, + classifier_module, + data, + neptune_logger_cls, + neptune_experiment_cls, + ): + npt = neptune_logger_cls(neptune_experiment_cls()) + npt.initialize() + assert npt.first_batch_ is True + + net = net_cls( + classifier_module, + callbacks=[npt], + max_epochs=1, + ) + + npt.on_batch_end(net) + assert npt.first_batch_ is False class TestPrintLog: @pytest.fixture From 779fb86fa93f5feabf00cd07086e31cda0bf6d1a Mon Sep 17 00:00:00 2001 From: jakubczakon Date: Sun, 16 Feb 2020 13:29:18 +0100 Subject: [PATCH 12/12] formatting changes --- skorch/tests/callbacks/test_logging.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/skorch/tests/callbacks/test_logging.py b/skorch/tests/callbacks/test_logging.py index e5d5d1458..eb86750ce 100644 --- a/skorch/tests/callbacks/test_logging.py +++ b/skorch/tests/callbacks/test_logging.py @@ -10,7 +10,8 @@ import torch from torch import nn -from skorch.tests.conftest import neptune_installed, tensorboard_installed +from skorch.tests.conftest import neptune_installed +from skorch.tests.conftest import tensorboard_installed @pytest.mark.skipif( @@ -101,12 +102,12 @@ def test_ignore_keys( # 3 epochs x 2 epoch metrics = 6 calls assert mock_experiment.log_metric.call_count == 6 - assert 'valid_loss' not in [call_args[0][0] - for call_args in mock_experiment.log_metric.call_args_list] + call_args = [args[0][0] for args in mock_experiment.log_metric.call_args_list] + assert 'valid_loss' not in call_args def test_keys_ignored_is_string(self, neptune_logger_cls, mock_experiment): - npt = neptune_logger_cls(mock_experiment, - keys_ignored='a-key').initialize() + npt = neptune_logger_cls( + mock_experiment, keys_ignored='a-key').initialize() expected = {'a-key', 'batches'} assert npt.keys_ignored_ == expected @@ -165,8 +166,8 @@ def test_log_on_batch_level_off( # 5 epochs x 2 epoch metrics = 10 calls assert mock_experiment.log_metric.call_count == 10 - assert call('train_batch_size', 4) \ - not in mock_experiment.log_metric.call_args_list + call_args_list = mock_experiment.log_metric.call_args_list + assert call('train_batch_size', 4) not in call_args_list def test_first_batch_flag( self, @@ -694,4 +695,4 @@ def forward(self, b, e, c, d, a, **kwargs): net.fit(X_dict, y) # is not empty - assert os.listdir(path) + assert os.listdir(path) \ No newline at end of file