From c23cbdff4c097d3f3039999827a675cf8f06a32e Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 11 Aug 2022 10:35:47 -0400 Subject: [PATCH] Fix docstrings with last version of hf-doc-builder styler (#18581) * Fix docstrings with last version of hf-doc-builder styler * Remove empty Parameter block --- src/transformers/benchmark/benchmark_utils.py | 5 ----- src/transformers/generation_flax_utils.py | 1 - src/transformers/generation_tf_utils.py | 5 ----- src/transformers/generation_utils.py | 5 ----- src/transformers/modelcard.py | 2 -- src/transformers/models/auto/auto_factory.py | 1 - src/transformers/models/flaubert/tokenization_flaubert.py | 1 - src/transformers/models/fsmt/tokenization_fsmt.py | 1 - src/transformers/models/perceiver/modeling_perceiver.py | 1 - src/transformers/models/tapex/tokenization_tapex.py | 1 - src/transformers/models/transfo_xl/modeling_transfo_xl.py | 1 - src/transformers/models/xlm/tokenization_xlm.py | 1 - src/transformers/testing_utils.py | 1 - src/transformers/trainer_pt_utils.py | 1 - src/transformers/trainer_utils.py | 1 - src/transformers/utils/notebook.py | 2 -- 16 files changed, 30 deletions(-) diff --git a/src/transformers/benchmark/benchmark_utils.py b/src/transformers/benchmark/benchmark_utils.py index 36fe5eb116cbef..79740805807185 100644 --- a/src/transformers/benchmark/benchmark_utils.py +++ b/src/transformers/benchmark/benchmark_utils.py @@ -79,7 +79,6 @@ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: b measurements it is important that the function is executed in a separate process Args: - - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not """ @@ -210,7 +209,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory @@ -228,7 +226,6 @@ def get_cpu_memory(process_id: int) -> int: measures current cpu memory usage of a given `process_id` Args: - - `process_id`: (`int`) process_id for which to measure memory Returns @@ -336,7 +333,6 @@ def start_memory_tracing( https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') @@ -483,7 +479,6 @@ def stop_memory_tracing( Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: - `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index 2f80c7fcf27e96..fd26a605c48bac 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -208,7 +208,6 @@ def generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. max_length (`int`, *optional*, defaults to `model.config.max_length`): diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index a3d26b789c646e..6c8da54835ac92 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -418,7 +418,6 @@ def generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length, feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the @@ -1336,7 +1335,6 @@ def _generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`tf.Tensor` of `dtype=tf.int32` and shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `None` the method initializes it with `bos_token_id` and a batch size of 1. @@ -2070,7 +2068,6 @@ def greedy_search( Generates sequences for models with a language modeling head using greedy decoding. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): @@ -2323,7 +2320,6 @@ def sample( Generates sequences for models with a language modeling head using multinomial sampling. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): @@ -2600,7 +2596,6 @@ def beam_search( Generates sequences for models with a language modeling head using beam search with multinomial sampling. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. max_length (`int`, *optional*, defaults to 20): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index bb9330de37f0cf..8f6dc6a383a774 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1555,7 +1555,6 @@ def greedy_search( used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): @@ -1789,7 +1788,6 @@ def sample( can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): @@ -2046,7 +2044,6 @@ def beam_search( can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): @@ -2355,7 +2352,6 @@ def beam_sample( sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): @@ -2672,7 +2668,6 @@ def group_beam_search( decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index dc842c2abbf72c..6743c5624eaf3c 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -80,8 +80,6 @@ class ModelCard: Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. - - Parameters: """ def __init__(self, **kwargs): diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index b412f14157f1c3..8d3fabda4706eb 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -563,7 +563,6 @@ class _LazyAutoMapping(OrderedDict): " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. Args: - - config_mapping: The map model type to config class - model_mapping: The map model type to model (or tokenizer) class """ diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index 5d5ad2a657d1bc..911ef37dac5046 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -130,7 +130,6 @@ def _tokenize(self, text, bypass_tokenizer=False): - Install with `pip install sacremoses` Args: - - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. diff --git a/src/transformers/models/fsmt/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py index 34272e53cf0fcb..66d9819785483c 100644 --- a/src/transformers/models/fsmt/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -354,7 +354,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False): - Install with `pip install sacremoses` Args: - - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index b3a0beea3d3ca4..d069182f06c3c7 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -1960,7 +1960,6 @@ def build_position_encoding( Builds the position encoding. Args: - - out_channels: refers to the number of channels of the position encodings. - project_pos_dim: if specified, will project the position encodings to this dimension. diff --git a/src/transformers/models/tapex/tokenization_tapex.py b/src/transformers/models/tapex/tokenization_tapex.py index 7c0725ffe7c108..555bf9fd2c6b9a 100644 --- a/src/transformers/models/tapex/tokenization_tapex.py +++ b/src/transformers/models/tapex/tokenization_tapex.py @@ -1398,7 +1398,6 @@ def truncate_table_rows( ): """ Args: - table_content: {"header": xxx, "rows": xxx, "id" (Optionally): xxx} diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index 75793466c7a8d1..257c45af03bbc0 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -523,7 +523,6 @@ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: O weights embeddings afterwards if the model class has a *tie_weights()* method. Arguments: - new_num_tokens: (*optional*) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and diff --git a/src/transformers/models/xlm/tokenization_xlm.py b/src/transformers/models/xlm/tokenization_xlm.py index bd7b58eb053b0e..8bb021c5b96987 100644 --- a/src/transformers/models/xlm/tokenization_xlm.py +++ b/src/transformers/models/xlm/tokenization_xlm.py @@ -791,7 +791,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False): externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index d21f353a60a8f5..2e99a76232c27c 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -1286,7 +1286,6 @@ def pytest_terminal_summary_main(tr, id): there. Args: - - tr: `terminalreporter` passed from `conftest.py` - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index e1ad471b07a9e0..57103b50d5a039 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -377,7 +377,6 @@ class DistributedTensorGatherer: For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: - world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 579e5d1dc24ce4..a298fc1de5719e 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -337,7 +337,6 @@ def speed_metrics(split, start_time, num_samples=None, num_steps=None): should be run immediately after the operation to be measured has completed. Args: - - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed diff --git a/src/transformers/utils/notebook.py b/src/transformers/utils/notebook.py index 8d81d76c4fd166..636cf785ea94ea 100644 --- a/src/transformers/utils/notebook.py +++ b/src/transformers/utils/notebook.py @@ -120,7 +120,6 @@ def update(self, value: int, force_update: bool = False, comment: str = None): The main method to update the progress bar to `value`. Args: - value (`int`): The value to use. Must be between 0 and `total`. force_update (`bool`, *optional*, defaults to `False`): @@ -204,7 +203,6 @@ class NotebookTrainingTracker(NotebookProgressBar): An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. Args: - num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): The list of column names for the metrics table (will be inferred from the first call to [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).