Skip to content
This repository was archived by the owner on Dec 16, 2022. It is now read-only.

Bump black from 21.12b0 to 22.1.0 #5554

Merged
merged 8 commits into from
Feb 10, 2022
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

### Fixed

- Updated dependencies, especially around doc creation.

## [v2.9.0](https://github.com/allenai/allennlp/releases/tag/v2.9.0) - 2022-01-27

### Added
Expand Down
2 changes: 1 addition & 1 deletion allennlp/commands/find_learning_rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ def search_learning_rate(
if linear_steps:
current_lr = start_lr + (lr_update_factor * i)
else:
current_lr = start_lr * (lr_update_factor ** i)
current_lr = start_lr * (lr_update_factor**i)

for param_group in trainer.optimizer.param_groups:
param_group["lr"] = current_lr
Expand Down
2 changes: 1 addition & 1 deletion allennlp/fairness/bias_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def spearman_correlation(self, x: torch.Tensor, y: torch.Tensor):

n = x.size(0)
upper = 6 * torch.sum((x_rank - y_rank).pow(2))
down = n * (n ** 2 - 1.0)
down = n * (n**2 - 1.0)
return 1.0 - (upper / down)


Expand Down
6 changes: 3 additions & 3 deletions allennlp/modules/transformer/attention_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,16 +134,16 @@ def _normalize(self) -> None:
self.query.weight.data.normal_(
mean=0.0, std=(self.hidden_size * self.attention_head_size) ** -0.5
)
self.key.weight.data.normal_(mean=0.0, std=self.hidden_size ** -0.5)
self.value.weight.data.normal_(mean=0.0, std=self.hidden_size ** -0.5)
self.key.weight.data.normal_(mean=0.0, std=self.hidden_size**-0.5)
self.value.weight.data.normal_(mean=0.0, std=self.hidden_size**-0.5)

if hasattr(self, "output"):
self.output.weight.data.normal_(
mean=0.0, std=(self.num_attention_heads * self.attention_head_size) ** -0.5
)

if hasattr(self, "relative_attention_bias"):
self.relative_attention_bias.weight.data.normal_(mean=0.0, std=self.hidden_size ** -0.5)
self.relative_attention_bias.weight.data.normal_(mean=0.0, std=self.hidden_size**-0.5)

def _transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (
Expand Down
12 changes: 6 additions & 6 deletions allennlp/modules/transformer/t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ class T5DenseReluDense(TransformerModule, FromParams):
def __init__(self, hidden_size: int = 512, ff_size: int = 2048, dropout: float = 0.1):
super().__init__()
self.wi = nn.Linear(hidden_size, ff_size, bias=False)
self.wi.weight.data.normal_(mean=0.0, std=hidden_size ** -0.5)
self.wi.weight.data.normal_(mean=0.0, std=hidden_size**-0.5)
self.wo = nn.Linear(ff_size, hidden_size, bias=False)
self.wo.weight.data.normal_(mean=0.0, std=ff_size ** -0.5)
self.wo.weight.data.normal_(mean=0.0, std=ff_size**-0.5)
self.dropout = nn.Dropout(dropout)

def forward(self, hidden_states) -> FloatT:
Expand All @@ -81,11 +81,11 @@ class T5DenseGatedGeluDense(TransformerModule, FromParams):
def __init__(self, hidden_size: int = 512, ff_size: int = 2048, dropout: float = 0.1):
super().__init__()
self.wi_0 = nn.Linear(hidden_size, ff_size, bias=False)
self.wi_0.weight.data.normal_(mean=0.0, std=hidden_size ** -0.5)
self.wi_0.weight.data.normal_(mean=0.0, std=hidden_size**-0.5)
self.wi_1 = nn.Linear(hidden_size, ff_size, bias=False)
self.wi_1.weight.data.normal_(mean=0.0, std=hidden_size ** -0.5)
self.wi_1.weight.data.normal_(mean=0.0, std=hidden_size**-0.5)
self.wo = nn.Linear(ff_size, hidden_size, bias=False)
self.wo.weight.data.normal_(mean=0.0, std=ff_size ** -0.5)
self.wo.weight.data.normal_(mean=0.0, std=ff_size**-0.5)
self.dropout = nn.Dropout(dropout)
from allennlp.nn import Activation

Expand Down Expand Up @@ -964,7 +964,7 @@ def _get_lm_logits(self, decoder_last_hidden_state: FloatT) -> FloatT:
# TODO: HF only does this when does this when embeddings are tied.
# Currently tied embeddings is the only option we have, but if make
# that configurable then we should put this in an 'if' block.
sequence_output = sequence_output * (self.model_dim ** -0.5)
sequence_output = sequence_output * (self.model_dim**-0.5)
# Shape: (batch_size, target_length, vocab_size)
logits = self.lm_head(sequence_output)
return logits
Expand Down
2 changes: 1 addition & 1 deletion allennlp/nn/beam_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ def score(
lengths += is_end_token.long()

# shape: (batch_size, beam_size)
average_log_probs = log_probabilities / (lengths ** self.length_penalty)
average_log_probs = log_probabilities / (lengths**self.length_penalty)
return average_log_probs


Expand Down
4 changes: 2 additions & 2 deletions allennlp/training/learning_rate_schedulers/cosine.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ def get_values(self):
self._cycle_counter = 0
self._last_restart = step

base_lrs = [lr * self.eta_mul ** self._n_restarts for lr in self.base_values]
self._cycle_len = int(self.t_initial * self.t_mul ** self._n_restarts)
base_lrs = [lr * self.eta_mul**self._n_restarts for lr in self.base_values]
self._cycle_len = int(self.t_initial * self.t_mul**self._n_restarts)

lrs = [
self.eta_min
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(
for i in range(len(self.base_values) - 1, -1, -1):
param_group = optimizer.param_groups[i]
if param_group["params"]:
param_group["lr"] = self.base_values[i] * decay_factor ** exponent
param_group["lr"] = self.base_values[i] * decay_factor**exponent
self.base_values[i] = param_group["lr"]
exponent += 1
# set up for the first batch
Expand Down
2 changes: 1 addition & 1 deletion allennlp/training/metrics/fbeta_measure.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def get_metric(self, reset: bool = False):
pred_sum = pred_sum.sum() # type: ignore
true_sum = true_sum.sum() # type: ignore

beta2 = self._beta ** 2
beta2 = self._beta**2
# Finally, we have all our sufficient statistics.
precision = nan_safe_tensor_divide(tp_sum, pred_sum)
recall = nan_safe_tensor_divide(tp_sum, true_sum)
Expand Down
9 changes: 4 additions & 5 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ flake8
mypy==0.931

# Automatic code formatting
black==21.12b0
black==22.1.0

# Allows generation of coverage reports with pytest.
pytest-cov
Expand All @@ -34,11 +34,10 @@ pytest-benchmark
ruamel.yaml

# Generating markdown files from Python modules.
git+https://github.com/NiklasRosenstein/pydoc-markdown.git@f0bf8af1db4f11581c19d206d4ed1ab34b4854c1
nr.databind.core<0.0.17
nr.interface<0.0.6
pydoc-markdown>=4.0.0,<5.0.0
databind.core

mkdocs==1.1.2
mkdocs==1.2.3
mkdocs-material>=5.5.0,<8.2.0
markdown-include==0.6.0

Expand Down
2 changes: 1 addition & 1 deletion scripts/ai2_internal/run_with_beaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

# This has to happen before we import spacy (even indirectly), because for some crazy reason spacy
# thought it was a good idea to set the random seed on import...
random_int = random.randint(0, 2 ** 32)
random_int = random.randint(0, 2**32)

sys.path.insert(
0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir)))
Expand Down
Loading