diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 8f6dc6a383a7..637b723c88de 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1200,7 +1200,7 @@ def generate( input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( - "Neither `max_length` nor `max_new_tokens` have been set, `max_length` will default to " + "Neither `max_length` nor `max_new_tokens` has been set, `max_length` will default to " f"{self.config.max_length} (`self.config.max_length`). Controlling `max_length` via the config is " "deprecated and `max_length` will be removed from the config in v5 of Transformers -- we recommend " "using `max_new_tokens` to control the maximum length of the generation.", diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index d44bc80363d0..f469266d7454 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -220,7 +220,7 @@ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. - IIndices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and + Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)