-
Notifications
You must be signed in to change notification settings - Fork 29.5k
Change BartLearnedPositionalEmbedding's forward method signature to support Opacus training #18486
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
6a025fd
6eab493
fcf601d
8eabee9
7863520
fe3ef03
4a31279
c6a74d4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -128,12 +128,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): | |
self.offset = 2 | ||
super().__init__(num_embeddings + self.offset, embedding_dim) | ||
|
||
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): | ||
"""`input_ids_shape` is expected to be [bsz x seqlen].""" | ||
bsz, seq_len = input_ids_shape[:2] | ||
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): | ||
"""`input_ids' shape is expected to be [bsz x seqlen].""" | ||
|
||
bsz, seq_len = input_ids.shape[:2] | ||
positions = torch.arange( | ||
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device | ||
) | ||
).repeat(bsz, 1) | ||
|
||
return super().forward(positions + self.offset) | ||
|
||
|
||
|
@@ -788,17 +790,17 @@ def forward( | |
if input_ids is not None and inputs_embeds is not None: | ||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | ||
elif input_ids is not None: | ||
input_shape = input_ids.size() | ||
input_ids = input_ids.view(-1, input_shape[-1]) | ||
input = input_ids | ||
input_ids = input_ids.view(-1, input_ids.shape[-1]) | ||
elif inputs_embeds is not None: | ||
input_shape = inputs_embeds.size()[:-1] | ||
input = inputs_embeds[:, :, -1] | ||
else: | ||
raise ValueError("You have to specify either input_ids or inputs_embeds") | ||
|
||
if inputs_embeds is None: | ||
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale | ||
|
||
embed_pos = self.embed_positions(input_shape) | ||
embed_pos = self.embed_positions(input) | ||
|
||
hidden_states = inputs_embeds + embed_pos | ||
hidden_states = self.layernorm_embedding(hidden_states) | ||
|
@@ -1013,18 +1015,20 @@ def forward( | |
if input_ids is not None and inputs_embeds is not None: | ||
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") | ||
elif input_ids is not None: | ||
input_shape = input_ids.size() | ||
input = input_ids | ||
input_shape = input.shape | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It looks like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah yes, was looking at the wrong forward 🤦♂️ |
||
input_ids = input_ids.view(-1, input_shape[-1]) | ||
elif inputs_embeds is not None: | ||
input_shape = inputs_embeds.size()[:-1] | ||
input = inputs_embeds[:, :, -1] | ||
else: | ||
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") | ||
|
||
# past_key_values_length | ||
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 | ||
|
||
if inputs_embeds is None: | ||
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale | ||
inputs_embeds = self.embed_tokens(input) * self.embed_scale | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why replace here as There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just for clarity that |
||
|
||
attention_mask = self._prepare_decoder_attention_mask( | ||
attention_mask, input_shape, inputs_embeds, past_key_values_length | ||
|
@@ -1036,7 +1040,7 @@ def forward( | |
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) | ||
|
||
# embed positions | ||
positions = self.embed_positions(input_shape, past_key_values_length) | ||
positions = self.embed_positions(input, past_key_values_length) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like it could always be There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps I'm missing something, but if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes but the |
||
|
||
hidden_states = inputs_embeds + positions | ||
hidden_states = self.layernorm_embedding(hidden_states) | ||
|
Uh oh!
There was an error while loading. Please reload this page.