Skip to content

Commit

Permalink
Patch release: v4.21.2
Browse files Browse the repository at this point in the history
  • Loading branch information
LysandreJik committed Aug 24, 2022
1 parent c5f7df8 commit b487096
Show file tree
Hide file tree
Showing 18 changed files with 3 additions and 32 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def run(self):

setup(
name="transformers",
version="4.21.1", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
version="4.21.2", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)",
author_email="transformers@huggingface.co",
description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow",
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
# in the namespace without actually importing anything (and especially none of the backends).

__version__ = "4.21.1"
__version__ = "4.21.2"

from typing import TYPE_CHECKING

Expand Down
5 changes: 0 additions & 5 deletions src/transformers/benchmark/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: b
measurements it is important that the function is executed in a separate process
Args:
- `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
- `do_multi_processing`: (`bool`) Whether to run function on separate process or not
"""
Expand Down Expand Up @@ -210,7 +209,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i
https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
Args:
- `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
the peak memory
Expand All @@ -228,7 +226,6 @@ def get_cpu_memory(process_id: int) -> int:
measures current cpu memory usage of a given `process_id`
Args:
- `process_id`: (`int`) process_id for which to measure memory
Returns
Expand Down Expand Up @@ -336,7 +333,6 @@ def start_memory_tracing(
https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
Args:
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
'transformers.models.gpt2.modeling_gpt2')
Expand Down Expand Up @@ -483,7 +479,6 @@ def stop_memory_tracing(
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
`memory_trace` (optional output of start_memory_tracing, default: None):
memory trace to convert in summary
`ignore_released_memory` (boolean, default: None):
Expand Down
1 change: 0 additions & 1 deletion src/transformers/generation_flax_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ def generate(
post](https://huggingface.co/blog/how-to-generate).
Parameters:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
max_length (`int`, *optional*, defaults to `model.config.max_length`):
Expand Down
5 changes: 0 additions & 5 deletions src/transformers/generation_tf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,6 @@ def generate(
post](https://huggingface.co/blog/how-to-generate).
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length,
feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
Expand Down Expand Up @@ -1336,7 +1335,6 @@ def _generate(
post](https://huggingface.co/blog/how-to-generate).
Parameters:
input_ids (`tf.Tensor` of `dtype=tf.int32` and shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `None` the method initializes it with
`bos_token_id` and a batch size of 1.
Expand Down Expand Up @@ -2069,7 +2067,6 @@ def greedy_search(
Generates sequences for models with a language modeling head using greedy decoding.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
Expand Down Expand Up @@ -2322,7 +2319,6 @@ def sample(
Generates sequences for models with a language modeling head using multinomial sampling.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
Expand Down Expand Up @@ -2599,7 +2595,6 @@ def beam_search(
Generates sequences for models with a language modeling head using beam search with multinomial sampling.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
max_length (`int`, *optional*, defaults to 20):
Expand Down
5 changes: 0 additions & 5 deletions src/transformers/generation_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1555,7 +1555,6 @@ def greedy_search(
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
Expand Down Expand Up @@ -1789,7 +1788,6 @@ def sample(
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
Expand Down Expand Up @@ -2046,7 +2044,6 @@ def beam_search(
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
Expand Down Expand Up @@ -2355,7 +2352,6 @@ def beam_sample(
sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
Expand Down Expand Up @@ -2672,7 +2668,6 @@ def group_beam_search(
decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
Expand Down
3 changes: 1 addition & 2 deletions src/transformers/modelcard.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ class ModelCard:
Note: A model card can be loaded and saved to disk.
Parameters:
"""
Parameters:"""

def __init__(self, **kwargs):
warnings.warn(
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/auto/auto_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,6 @@ class _LazyAutoMapping(OrderedDict):
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
"""
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/flaubert/tokenization_flaubert.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ def _tokenize(self, text, bypass_tokenizer=False):
- Install with `pip install sacremoses`
Args:
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/fsmt/tokenization_fsmt.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
- Install with `pip install sacremoses`
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/perceiver/modeling_perceiver.py
Original file line number Diff line number Diff line change
Expand Up @@ -1960,7 +1960,6 @@ def build_position_encoding(
Builds the position encoding.
Args:
- out_channels: refers to the number of channels of the position encodings.
- project_pos_dim: if specified, will project the position encodings to this dimension.
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/tapex/tokenization_tapex.py
Original file line number Diff line number Diff line change
Expand Up @@ -1421,7 +1421,6 @@ def truncate_table_rows(
):
"""
Args:
table_content:
{"header": xxx, "rows": xxx, "id" (Optionally): xxx}
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/transfo_xl/modeling_transfo_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,6 @@ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: O
weights embeddings afterwards if the model class has a *tie_weights()* method.
Arguments:
new_num_tokens: (*optional*) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
Expand Down
1 change: 0 additions & 1 deletion src/transformers/models/xlm/tokenization_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -791,7 +791,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
externally, and set `bypass_tokenizer=True` to bypass the tokenizer.
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
Expand Down
1 change: 0 additions & 1 deletion src/transformers/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1285,7 +1285,6 @@ def pytest_terminal_summary_main(tr, id):
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
Expand Down
1 change: 0 additions & 1 deletion src/transformers/trainer_pt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,6 @@ class DistributedTensorGatherer:
For some reason, that's not going to roll their boat. This class is there to solve that problem.
Args:
world_size (`int`):
The number of processes used in the distributed training.
num_samples (`int`):
Expand Down
1 change: 0 additions & 1 deletion src/transformers/trainer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,6 @@ def speed_metrics(split, start_time, num_samples=None, num_steps=None):
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/utils/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ def update(self, value: int, force_update: bool = False, comment: str = None):
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Expand Down Expand Up @@ -204,7 +203,6 @@ class NotebookTrainingTracker(NotebookProgressBar):
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
Args:
num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*):
The list of column names for the metrics table (will be inferred from the first call to
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
Expand Down

0 comments on commit b487096

Please sign in to comment.