From 87cdc7df1b8b6bfd4435bf8d94d82ab366aaede3 Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Tue, 17 Aug 2021 13:50:06 -0700 Subject: [PATCH 1/6] Lists should be lists --- allennlp/training/callbacks/log_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/allennlp/training/callbacks/log_writer.py b/allennlp/training/callbacks/log_writer.py index 8b3183f28c3..f30d3236ed6 100644 --- a/allennlp/training/callbacks/log_writer.py +++ b/allennlp/training/callbacks/log_writer.py @@ -227,7 +227,7 @@ def log_batch( # Now collect per-batch metrics to log. metrics_to_log: Dict[str, float] = {} - for key in ("batch_loss", "batch_reg_loss"): + for key in ["batch_loss", "batch_reg_loss"]: if key not in metrics: continue value = metrics[key] From 7e52017b040b7097798e9855a11e906da4f64ddb Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Tue, 17 Aug 2021 13:50:19 -0700 Subject: [PATCH 2/6] Formatting --- allennlp/training/callbacks/log_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/allennlp/training/callbacks/log_writer.py b/allennlp/training/callbacks/log_writer.py index f30d3236ed6..71ce3e6db87 100644 --- a/allennlp/training/callbacks/log_writer.py +++ b/allennlp/training/callbacks/log_writer.py @@ -253,7 +253,7 @@ def log_batch( if self._batch_size_interval: # We're assuming here that `log_batch` will get called every batch, and only every - # batch. This is true with our current usage of this code (version 1.0); if that + # batch. This is true with our current usage of this code (version 1.0); if that # assumption becomes wrong, this code will break. batch_group_size = sum(get_batch_size(batch) for batch in batch_group) # type: ignore self._cumulative_batch_group_size += batch_group_size From 6b005f95e4170c5c45ede38caa103a94aacb6d03 Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Tue, 17 Aug 2021 13:50:30 -0700 Subject: [PATCH 3/6] By default, don't log parameter stats --- allennlp/training/callbacks/tensorboard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/allennlp/training/callbacks/tensorboard.py b/allennlp/training/callbacks/tensorboard.py index 73bc04a686a..630c0b70996 100644 --- a/allennlp/training/callbacks/tensorboard.py +++ b/allennlp/training/callbacks/tensorboard.py @@ -21,7 +21,7 @@ def __init__( summary_interval: int = 100, distribution_interval: Optional[int] = None, batch_size_interval: Optional[int] = None, - should_log_parameter_statistics: bool = True, + should_log_parameter_statistics: bool = False, should_log_learning_rate: bool = False, ) -> None: super().__init__( From 8addcd08bb91ac81f03034dff3f38891f4ddaf20 Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Tue, 17 Aug 2021 13:50:46 -0700 Subject: [PATCH 4/6] Log batch metrics --- allennlp/training/callbacks/log_writer.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/allennlp/training/callbacks/log_writer.py b/allennlp/training/callbacks/log_writer.py index 71ce3e6db87..3150459b468 100644 --- a/allennlp/training/callbacks/log_writer.py +++ b/allennlp/training/callbacks/log_writer.py @@ -241,6 +241,13 @@ def log_batch( self._batch_loss_moving_items[key] ) + for key, value in metrics.items(): + if key.startswith("batch_"): + continue + key = "batch_" + key + if key not in metrics_to_log: + metrics_to_log[key] = value + self.log_scalars( metrics_to_log, log_prefix="train", From db08e329f2723e506a4b955e2806f7d146f013ad Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Tue, 17 Aug 2021 13:52:28 -0700 Subject: [PATCH 5/6] Changelog --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 231f9660288..6e2b5316b23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 model's `__init__()` method by wrapping them with `self.ddp_accelerator.wrap_module()`. See the `allennlp.modules.transformer.t5` for an example. - Added Tango components, to be explored in detail in a later post. +- We now log batch metrics to tensorboard and wandb. ### Fixed @@ -34,7 +35,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `TransformerTextField` can now take tensors of shape `(1, n)` like the tensors produced from a HuggingFace tokenizer. - `tqdm` lock is now set inside `MultiProcessDataLoading` when new workers are spawned to avoid contention when writing output. - `ConfigurationError` is now pickleable. -- Multitask models now support `TextFieldTensor` in heads, not just in the backbone +- Multitask models now support `TextFieldTensor` in heads, not just in the backbone. ### Changed @@ -42,7 +43,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 with a default value of `False`. `False` means gradients are not rescaled and the gradient norm is never even calculated. `True` means the gradients are still not rescaled but the gradient norm is calculated and passed on to callbacks. A `float` value means gradients are rescaled. -- `TensorCache` now supports more concurrent readers and writers. +- `TensorCache` now supports more concurrent readers and writers. +- We no longer log parameter statistics to tensorboard or wandb by default. ## [v2.6.0](https://github.com/allenai/allennlp/releases/tag/v2.6.0) - 2021-07-19 From 8a5e369e3e60afc1e48f1f0895ab6633f73979a4 Mon Sep 17 00:00:00 2001 From: Dirk Groeneveld Date: Thu, 19 Aug 2021 11:06:17 -0700 Subject: [PATCH 6/6] Don't try to be more general than Patton --- allennlp/training/callbacks/log_writer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/allennlp/training/callbacks/log_writer.py b/allennlp/training/callbacks/log_writer.py index 3150459b468..244656c060c 100644 --- a/allennlp/training/callbacks/log_writer.py +++ b/allennlp/training/callbacks/log_writer.py @@ -227,7 +227,8 @@ def log_batch( # Now collect per-batch metrics to log. metrics_to_log: Dict[str, float] = {} - for key in ["batch_loss", "batch_reg_loss"]: + batch_loss_metrics = {"batch_loss", "batch_reg_loss"} + for key in batch_loss_metrics: if key not in metrics: continue value = metrics[key] @@ -242,7 +243,7 @@ def log_batch( ) for key, value in metrics.items(): - if key.startswith("batch_"): + if key in batch_loss_metrics: continue key = "batch_" + key if key not in metrics_to_log: