From 575c9791445531a5b4a42af0f5028b92489c2669 Mon Sep 17 00:00:00 2001 From: Vasudev Gupta <7vasudevgupta@gmail.com> Date: Mon, 10 May 2021 14:18:21 +0530 Subject: [PATCH 01/41] Update community.md (#11654) --- docs/source/community.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/community.md b/docs/source/community.md index 8ac15f4c889468..4c4af370a50102 100644 --- a/docs/source/community.md +++ b/docs/source/community.md @@ -55,3 +55,4 @@ This page regroups resources around 🤗 Transformers developed by the community | [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | | [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | | [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | +| [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | How to evaluate *BigBirdPegasusForConditionalGeneration* on PubMed dataset | [Vasudev Gupta](https://github.com/vasudevgupta7) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | From ef8d32c5eaa63bb724074fd011c05779ae426e93 Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 10 May 2021 14:28:04 +0100 Subject: [PATCH 02/41] Fix suggested by @bhadreshpsavani (#11660) --- .../tensorflow/text-classification/run_text_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/tensorflow/text-classification/run_text_classification.py b/examples/tensorflow/text-classification/run_text_classification.py index ab4f005ee37485..32e020d7bff283 100644 --- a/examples/tensorflow/text-classification/run_text_classification.py +++ b/examples/tensorflow/text-classification/run_text_classification.py @@ -522,7 +522,7 @@ def preprocess_function(examples): # region Prediction losses # This section is outside the scope() because it's very quick to compute, but behaves badly inside it - if "label" in datasets["test"].features: + if "test" in datasets and "label" in datasets["test"].features: print("Computing prediction loss on test labels...") labels = datasets["test"]["label"] loss = float(loss_fn(labels, predictions).numpy()) From 05a930671ffbce2e1b530a2c1f5645bfb2cf4f7e Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 10 May 2021 10:58:30 -0400 Subject: [PATCH 03/41] Save scaler state dict when checkpointing (#11663) --- src/transformers/trainer.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index e5312c8a2db652..fb9c37725a2b7f 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1480,12 +1480,16 @@ def _save_checkpoint(self, model, trial, metrics=None): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) + if self.use_amp: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt")) elif self.is_world_process_zero() and not self.deepspeed: # deepspeed.save_checkpoint above saves model/optim/sched torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) + if self.use_amp: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt")) # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: @@ -1569,6 +1573,8 @@ def _load_optimizer_and_scheduler(self, checkpoint): with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt"))) reissue_pt_warnings(caught_warnings) + if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")): + self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt"))) def hyperparameter_search( self, From dcb0e61430e3a38ae6b89d11c3bef5a82aec6019 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 10 May 2021 17:38:17 +0100 Subject: [PATCH 04/41] push (#11667) --- src/transformers/models/auto/tokenization_auto.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index deb78cc21d2de8..b9221c83307dca 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -63,6 +63,7 @@ BertConfig, BertGenerationConfig, BigBirdConfig, + BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, @@ -275,6 +276,7 @@ (Wav2Vec2Config, (Wav2Vec2CTCTokenizer, None)), (GPTNeoConfig, (GPT2Tokenizer, GPT2TokenizerFast)), (LukeConfig, (LukeTokenizer, None)), + (BigBirdPegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)), ] ) From 9120ae7d66e3f76f375fd7c941c721ca9164f581 Mon Sep 17 00:00:00 2001 From: Pavel Soriano Date: Mon, 10 May 2021 19:28:10 +0200 Subject: [PATCH 05/41] Fixes NoneType exception when topk is larger than one coupled with a small context in the Question-Answering pipeline (#11628) * added fix to decode function. added test to qa pipeline tests * completed topk docstring * fixed formatting with black * applied style_doc to fix line length --- .../pipelines/question_answering.py | 21 ++++++++++--- tests/test_pipelines_question_answering.py | 31 ++++++++++++++++++- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 0008f78c58b1be..d04fcfe108fed0 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -177,7 +177,8 @@ def __call__(self, *args, **kwargs): One or several context(s) associated with the question(s) (must be used in conjunction with the :obj:`question` argument). topk (:obj:`int`, `optional`, defaults to 1): - The number of answers to return (will be chosen by order of likelihood). + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + topk answers if there are not enough options available within the context. doc_stride (:obj:`int`, `optional`, defaults to 128): If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. @@ -341,7 +342,9 @@ def __call__(self, *args, **kwargs): # Mask CLS start_[0] = end_[0] = 0.0 - starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"]) + starts, ends, scores = self.decode( + start_, end_, kwargs["topk"], kwargs["max_answer_len"], undesired_tokens + ) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) @@ -403,7 +406,9 @@ def __call__(self, *args, **kwargs): return all_answers[0] return all_answers - def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple: + def decode( + self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray + ) -> Tuple: """ Take the output of any :obj:`ModelForQuestionAnswering` and will generate probabilities for each span to be the actual answer. @@ -417,6 +422,7 @@ def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: end (:obj:`np.ndarray`): Individual end probabilities for each token. topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output. max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output. + undesired_tokens (:obj:`np.ndarray`): Mask determining tokens that can be part of the answer """ # Ensure we have batch axis if start.ndim == 1: @@ -441,8 +447,13 @@ def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: idx = np.argpartition(-scores_flat, topk)[0:topk] idx_sort = idx[np.argsort(-scores_flat[idx])] - start, end = np.unravel_index(idx_sort, candidates.shape)[1:] - return start, end, candidates[0, start, end] + starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] + desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) + starts = starts[desired_spans] + ends = ends[desired_spans] + scores = candidates[0, starts, ends] + + return starts, ends, scores def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: """ diff --git a/tests/test_pipelines_question_answering.py b/tests/test_pipelines_question_answering.py index 978559f2eb5f36..128a4d51cd5bdf 100644 --- a/tests/test_pipelines_question_answering.py +++ b/tests/test_pipelines_question_answering.py @@ -15,7 +15,8 @@ import unittest from transformers.data.processors.squad import SquadExample -from transformers.pipelines import Pipeline, QuestionAnsweringArgumentHandler +from transformers.pipelines import Pipeline, QuestionAnsweringArgumentHandler, pipeline +from transformers.testing_utils import slow from .test_pipelines_common import CustomInputPipelineCommonMixin @@ -50,6 +51,34 @@ class QAPipelineTests(CustomInputPipelineCommonMixin, unittest.TestCase): }, ] + def get_pipelines(self): + question_answering_pipelines = [ + pipeline( + task=self.pipeline_task, + model=model, + tokenizer=model, + framework="pt", + **self.pipeline_loading_kwargs, + ) + for model in self.small_models + ] + return question_answering_pipelines + + @slow + def test_high_topk_small_context(self): + self.pipeline_running_kwargs.update({"topk": 20}) + valid_inputs = [ + {"question": "Where was HuggingFace founded ?", "context": "Paris"}, + ] + nlps = self.get_pipelines() + output_keys = {"score", "answer", "start", "end"} + for nlp in nlps: + result = nlp(valid_inputs, **self.pipeline_running_kwargs) + self.assertIsInstance(result, dict) + + for key in output_keys: + self.assertIn(key, result) + def _test_pipeline(self, nlp: Pipeline): output_keys = {"score", "answer", "start", "end"} valid_inputs = [ From 024cd19bb7c188a0e4aa681d248ad9f47587ddab Mon Sep 17 00:00:00 2001 From: Julien Plu Date: Tue, 11 May 2021 11:42:21 +0200 Subject: [PATCH 06/41] Add MacOS TF version (#11674) Co-authored-by: Julien Plu --- src/transformers/file_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/file_utils.py b/src/transformers/file_utils.py index 6e24ee022d47e2..2559ce1d7b3120 100644 --- a/src/transformers/file_utils.py +++ b/src/transformers/file_utils.py @@ -89,6 +89,7 @@ "tf-nightly-gpu", "intel-tensorflow", "tensorflow-rocm", + "tensorflow-macos", ) _tf_version = None # For the metadata, we have to look for both tensorflow and tensorflow-cpu From 64232bc0df7e28f91bdad2b29fca1808089e3dfd Mon Sep 17 00:00:00 2001 From: Jonathan Chang <31893406+cccntu@users.noreply.github.com> Date: Tue, 11 May 2021 19:58:38 +0800 Subject: [PATCH 07/41] Add --text_column to run_summarization_no_trainer (#11673) --- .../run_summarization_no_trainer.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 7bd2edd6dd6534..ab204907d4c739 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -184,6 +184,12 @@ def parse_args(): default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) + parser.add_argument( + "--text_column", + type=str, + default=None, + help="The name of the column in the datasets containing the full texts (for summarization).", + ) parser.add_argument( "--summary_column", type=str, @@ -371,9 +377,14 @@ def main(): # Get the column names for input/target. dataset_columns = summarization_name_mapping.get(args.dataset_name, None) - text_column_name = dataset_columns[0] if dataset_columns is not None else column_names[0] - - padding = "max_length" if args.pad_to_max_length else False + if args.text_column is None: + text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + text_column = args.text_column + if text_column not in column_names: + raise ValueError( + f"--text_column' value '{args.text_column}' needs to be one of: {', '.join(column_names)}" + ) if args.summary_column is None: summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: @@ -388,7 +399,7 @@ def main(): padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): - inputs = examples[text_column_name] + inputs = examples[text_column] targets = examples[summary_column] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True) From 901153c61e39fc01961dfef3613c4e529c595476 Mon Sep 17 00:00:00 2001 From: nxznm <55944993+nxznm@users.noreply.github.com> Date: Tue, 11 May 2021 20:12:02 +0800 Subject: [PATCH 08/41] Fix docstring of description about input_ids (#11672) --- src/transformers/models/distilbert/modeling_distilbert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index b30b3db90738b7..b3cb1a93cced3a 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -588,7 +588,7 @@ def __init__(self, config): self.init_weights() - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) + @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, From b3429ab678d0c782b0a1cc6df76f848b5c013f91 Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 11 May 2021 15:49:34 +0100 Subject: [PATCH 09/41] Grammar and style edits for the frontpage README (#11679) * Grammar and style edits for the frontpage README * Going all-in on em-dashes because you only live once * Update README.md Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- README.md | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 37f1a71c3c8618..fb5b8a62570752 100644 --- a/README.md +++ b/README.md @@ -41,15 +41,15 @@ limitations under the License.

State-of-the-art Natural Language Processing for Jax, PyTorch and TensorFlow -🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation, etc in 100+ languages. Its aim is to make cutting-edge NLP easier to use for everyone. +🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. -🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture can be used as a standalone and modified to enable quick research experiments. +🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. -🤗 Transformers is backed by the three most popular deep learning libraries, [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/), with a seamless integration between them, allowing you to train your models with one then load it for inference with the other. +🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. ## Online demos -You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) to use those models. +You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) for public and private models. Here are a few examples: - [Masked word completion with BERT](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France) @@ -64,20 +64,20 @@ Here are a few examples: ## Quick tour -To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model training. Here is how to quickly use a pipeline to classify positive versus negative texts +To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model's training. Here is how to quickly use a pipeline to classify positive versus negative texts: ```python >>> from transformers import pipeline # Allocate a pipeline for sentiment-analysis >>> classifier = pipeline('sentiment-analysis') ->>> classifier('We are very happy to include pipeline into the transformers repository.') -[{'label': 'POSITIVE', 'score': 0.9978193640708923}] +>>> classifier('We are very happy to introduce pipeline to the transformers repository.') +[{'label': 'POSITIVE', 'score': 0.9996980428695679}] ``` -The second line of code downloads and caches the pretrained model used by the pipeline, the third line evaluates it on the given text. Here the answer is "positive" with a confidence of 99.8%. +The second line of code downloads and caches the pretrained model used by the pipeline, while the third evaluates it on the given text. Here the answer is "positive" with a confidence of 99.97%. -This is another example of pipeline used for that can extract question answers from some context: +Many NLP tasks have a pre-trained `pipeline` ready to go. For example, we can easily extract question answers given context: ``` python >>> from transformers import pipeline @@ -86,15 +86,15 @@ This is another example of pipeline used for that can extract question answers f >>> question_answerer = pipeline('question-answering') >>> question_answerer({ ... 'question': 'What is the name of the repository ?', -... 'context': 'Pipeline have been included in the huggingface/transformers repository' +... 'context': 'Pipeline has been included in the huggingface/transformers repository' ... }) -{'score': 0.5135612454720828, 'start': 35, 'end': 59, 'answer': 'huggingface/transformers'} +{'score': 0.30970096588134766, 'start': 34, 'end': 58, 'answer': 'huggingface/transformers'} ``` -On top of the answer, the pretrained model used here returned its confidence score, along with the start position and its end position in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html). +In addition to the answer, the pretrained model used here returned its confidence score, along with the start position and end position of the answer in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html). -To download and use any of the pretrained models on your given task, you just need to use those three lines of codes (PyTorch version): +To download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version: ```python >>> from transformers import AutoTokenizer, AutoModel @@ -104,7 +104,7 @@ To download and use any of the pretrained models on your given task, you just ne >>> inputs = tokenizer("Hello world!", return_tensors="pt") >>> outputs = model(**inputs) ``` -or for TensorFlow: +And here is the equivalent code for TensorFlow: ```python >>> from transformers import AutoTokenizer, TFAutoModel @@ -115,9 +115,9 @@ or for TensorFlow: >>> outputs = model(**inputs) ``` -The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on one (or list) of texts (as we can see on the fourth line of both code examples). It will output a dictionary you can directly pass to your model (which is done on the fifth line). +The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator. -The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. For instance, [this tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model in classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune the on a new dataset. +The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. [This tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset. ## Why should I use transformers? @@ -135,16 +135,16 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta 1. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - - Seamlessly pick the right framework for training, evaluation, production. + - Seamlessly pick the right framework for training, evaluation and production. 1. Easily customize a model or an example to your needs: - - Examples for each architecture to reproduce the results by the official authors of said architecture. - - Expose the models internal as consistently as possible. + - We provide examples for each architecture to reproduce the results published by its original authors. + - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. ## Why shouldn't I use transformers? -- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving in additional abstractions/files. +- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files. - The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library. - While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. @@ -159,7 +159,7 @@ You should install 🤗 Transformers in a [virtual environment](https://docs.pyt First, create a virtual environment with the version of Python you're going to use and activate it. Then, you will need to install at least one of Flax, PyTorch or TensorFlow. -Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform and/or [Flax installation page](https://github.com/google/flax#quick-install). +Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax installation page](https://github.com/google/flax#quick-install) regarding the specific install command for your platform. When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows: @@ -181,7 +181,7 @@ conda install -c huggingface transformers Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda. -## Models architectures +## Model architectures **[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations). @@ -249,9 +249,9 @@ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[XLSR-Wav2Vec2](https://huggingface.co/transformers/model_doc/xlsr_wav2vec2.html)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli. 1. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR. -To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#bigtable) +To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#bigtable). -These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations. You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html). +These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html). ## Learn more From a135f595368581fb91f13a523be778b902ce0e07 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 11 May 2021 11:30:34 -0400 Subject: [PATCH 10/41] Auto modelcard (#11599) * Autogenerate model cards from the Trainer * ModelCard deprecated * Fix test * Style * Apply suggestions from code review Co-authored-by: Patrick von Platen * Address review comments * Quality * With all metadata * Metadata * Post-merge conflict mess * Data args and all examples * Default license and languages when possible Co-authored-by: Patrick von Platen --- examples/pytorch/language-modeling/run_clm.py | 11 +- examples/pytorch/language-modeling/run_mlm.py | 11 +- examples/pytorch/language-modeling/run_plm.py | 11 +- examples/pytorch/multiple-choice/run_swag.py | 9 +- examples/pytorch/question-answering/run_qa.py | 11 +- .../question-answering/run_qa_beam_search.py | 11 +- .../summarization/run_summarization.py | 11 +- .../pytorch/text-classification/run_glue.py | 9 +- .../pytorch/token-classification/run_ner.py | 11 +- .../pytorch/translation/run_translation.py | 15 +- src/transformers/modelcard.py | 416 ++++++++++++++++++ src/transformers/pipelines/__init__.py | 19 +- src/transformers/trainer.py | 59 ++- tests/test_trainer.py | 1 - 14 files changed, 564 insertions(+), 41 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index fdf0479095bad9..2ce18d2a81c952 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -447,7 +447,16 @@ def group_texts(examples): trainer.save_metrics("eval", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "text-generation"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 928d68c8f01be3..b5c7ad92c5da23 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -476,7 +476,16 @@ def group_texts(examples): trainer.save_metrics("eval", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "fill-mask"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 2dea89f4d06285..458b2c1d43c626 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -452,7 +452,16 @@ def group_texts(examples): trainer.save_metrics("eval", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "language-modeling"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index e0d9e0571ef4f7..9999cb25d124ff 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -428,7 +428,14 @@ def compute_metrics(eval_predictions): trainer.save_metrics("eval", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + trainer.push_to_hub( + finetuned_from=model_args.model_name_or_path, + tags="multiple-choice", + dataset_tags="swag", + dataset_args="regular", + dataset="SWAG", + language="en", + ) def _mp_fn(index): diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 07f7c28ba6538c..54b1d6919f4e33 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -601,7 +601,16 @@ def compute_metrics(p: EvalPrediction): trainer.save_metrics("predict", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "question-answering"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 9da18ac5fd2b91..320785230e393a 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -640,7 +640,16 @@ def compute_metrics(p: EvalPrediction): trainer.save_metrics("predict", metrics) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "question-answering"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index d049482ca8c2f0..4ceec8944692b7 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -583,7 +583,16 @@ def compute_metrics(eval_preds): writer.write("\n".join(predictions)) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "summarization"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) return results diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 3e49f743f3d25e..79120e2ba12312 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -516,7 +516,14 @@ def compute_metrics(p: EvalPrediction): writer.write(f"{index}\t{item}\n") if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "text-classification"} + if data_args.task_name is not None: + kwargs["language"] = "en" + kwargs["dataset_tags"] = "glue" + kwargs["dataset_args"] = data_args.task_name + kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 08434e554b2861..70936c8544ac54 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -491,7 +491,16 @@ def compute_metrics(p): writer.write(" ".join(prediction) + "\n") if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "token-classification"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + trainer.push_to_hub(**kwargs) def _mp_fn(index): diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index c6d83b30a15a1a..c525f6289dca60 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -575,7 +575,20 @@ def compute_metrics(eval_preds): writer.write("\n".join(predictions)) if training_args.push_to_hub: - trainer.push_to_hub() + kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "translation"} + if data_args.dataset_name is not None: + kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + kwargs["dataset_args"] = data_args.dataset_config_name + kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + kwargs["dataset"] = data_args.dataset_name + + languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] + if len(languages) > 0: + kwargs["language"] = languages + + trainer.push_to_hub(**kwargs) return results diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index 97fdf1903ae6c8..ea92a2c2915835 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -18,7 +18,15 @@ import copy import json import os +import warnings +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Union +import requests +from huggingface_hub import HfApi + +from . import __version__ from .file_utils import ( CONFIG_NAME, MODEL_CARD_NAME, @@ -26,9 +34,14 @@ WEIGHTS_NAME, cached_path, hf_bucket_url, + is_datasets_available, + is_offline_mode, is_remote_url, + is_tokenizers_available, + is_torch_available, ) from .models.auto.configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP +from .training_args import ParallelMode from .utils import logging @@ -49,6 +62,9 @@ class ModelCard: """ def __init__(self, **kwargs): + warnings.warn( + "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning + ) # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) self.model_details = kwargs.pop("model_details", {}) self.intended_use = kwargs.pop("intended_use", {}) @@ -218,3 +234,403 @@ def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) + + +AUTOGENERATED_COMMENT = """ + +""" + + +TASK_TAG_TO_NAME_MAPPING = { + "fill-mask": "Masked Language Modeling", + "multiple-choice": "Multiple Choice", + "question-answering": "Question Answering", + "summarization": "Summarization", + "text-classification": "Text Classification", + "text-generation": "Causal Language Modeling", + "text2text-generation": "Sequence-to-sequence Language Modeling", + "token-classification": "Token Classification", + "translation": "Translation", + "zero-shot-classification": "Zero Shot Classification", +} + + +METRIC_TAGS = [ + "accuracy", + "bleu", + "f1", + "matthews_correlation", + "pearsonr", + "precision", + "recall", + "rouge", + "sacrebleu", + "spearmanr", +] + + +def _listify(obj): + if obj is None: + return [] + elif isinstance(obj, str): + return [obj] + else: + return obj + + +def _list_possibilities(name, tags): + if tags is None: + return "" + if isinstance(tags, str): + tags = [tags] + if len(tags) == 0: + return "" + name_tags = [f"- {tag}" for tag in tags] + return f"{name}:\n" + "\n".join(name_tags) + "\n" + + +def infer_metric_tags_from_eval_results(eval_results): + if eval_results is None: + return {} + result = {} + for key in eval_results.keys(): + if key.lower().replace(" ", "_") in METRIC_TAGS: + result[key.lower().replace(" ", "_")] = key + elif key.lower() == "rouge1": + result["rouge"] = key + return result + + +@dataclass +class TrainingSummary: + model_name: str + language: Optional[Union[str, List[str]]] = None + license: Optional[str] = None + tags: Optional[Union[str, List[str]]] = None + finetuned_from: Optional[str] = None + dataset: Optional[Union[str, List[str]]] = None + dataset_tags: Optional[Union[str, List[str]]] = None + dataset_args: Optional[Union[str, List[str]]] = None + eval_results: Optional[Dict[str, float]] = None + eval_lines: Optional[List[str]] = None + hyperparameters: Optional[Dict[str, Any]] = None + + def __post_init__(self): + # Infer default license from the checkpoint used, if possible. + if self.license is None and not is_offline_mode() and self.finetuned_from is not None: + try: + model_info = HfApi().model_info(self.finetuned_from) + for tag in model_info.tags: + if tag.startswith("license:"): + self.license = tag[8:] + except requests.exceptions.HTTPError: + pass + + def create_model_index(self, metric_mapping): + model_index = f"model-index:\n- name: {self.model_name}\n" + + # Dataset mapping tag -> name + dataset_names = _listify(self.dataset) + dataset_tags = _listify(self.dataset_tags) + dataset_args = _listify(self.dataset_args) + if len(dataset_args) < len(dataset_tags): + dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) + dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)} + dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)} + + task_mapping = { + tag: TASK_TAG_TO_NAME_MAPPING[tag] for tag in _listify(self.tags) if tag in TASK_TAG_TO_NAME_MAPPING + } + + if len(task_mapping) == 0 and len(dataset_mapping) == 0: + return model_index + if len(task_mapping) == 0: + task_mapping = {None: None} + if len(dataset_mapping) == 0: + dataset_mapping = {None: None} + all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] + + model_index += " results:\n" + for task_tag, ds_tag in all_possibilities: + result = "" + if task_tag is not None: + result += f" - task:\n name: {task_mapping[task_tag]}\n type: {task_tag}\n" + if ds_tag is not None: + prefix = " - " if task_tag is None else " " + result += f"{prefix}dataset:\n name: {dataset_mapping[ds_tag]}\n type: {ds_tag}\n" + if dataset_arg_mapping[ds_tag] is not None: + result += f" args: {dataset_arg_mapping[ds_tag]}\n" + if len(metric_mapping) > 0: + result += " metrics:\n" + for metric_tag, metric_name in metric_mapping.items(): + value = self.eval_results[metric_name] + result += f" - name: {metric_name}\n type: {metric_tag}\n value: {value}\n" + + model_index += result + + return model_index + + def to_model_card(self): + model_card = "" + + metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) + + # Metadata + metadata = "" + metadata += _list_possibilities("language", self.language) + if self.license is not None: + metadata += f"license: {self.license}\n" + metadata += _list_possibilities("tags", self.tags) + metadata += _list_possibilities("datasets", self.dataset_tags) + metadata += _list_possibilities("metrics", list(metric_mapping.keys())) + metadata += "\n" + self.create_model_index(metric_mapping) + if len(metadata) > 0: + model_card = f"---\n{metadata}---\n" + + # Now the model card for realsies. + model_card += AUTOGENERATED_COMMENT + + model_card += f"\n# {self.model_name}\n\n" + + if self.finetuned_from is None: + model_card += "This model was trained from scratch on " + else: + model_card += f"This model is a fine-tuned version of [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " + + if self.dataset is None: + model_card += "an unkown dataset." + else: + if isinstance(self.dataset, str): + model_card += f"the {self.dataset} dataset." + else: + model_card += ( + ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." + ) + + if self.eval_results is not None: + model_card += "\nIt achieves the following results on the evaluation set:\n" + model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) + model_card += "\n" + + model_card += "\n## Model description\n\nMore information needed\n" + model_card += "\n## Intended uses & limitations\n\nMore information needed\n" + model_card += "\n## Training and evaluation data\n\nMore information needed\n" + + model_card += "\n## Training procedure\n" + model_card += "\n### Training hyperparameters\n" + if self.hyperparameters is not None: + model_card += "\nThe following hyperparameters were used during training:\n" + model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) + model_card += "\n" + else: + model_card += "\nMore information needed\n" + + if self.eval_lines is not None: + model_card += "\n### Training results\n\n" + model_card += make_markdown_table(self.eval_lines) + model_card += "\n" + + model_card += "\n### Framework versions\n\n" + model_card += f"- Transformers {__version__}\n" + if is_torch_available(): + import torch + + model_card += f"- Pytorch {torch.__version__}\n" + if is_datasets_available(): + import datasets + + model_card += f"- Datasets {datasets.__version__}\n" + if is_tokenizers_available(): + import tokenizers + + model_card += f"- Tokenizers {tokenizers.__version__}\n" + + return model_card + + @classmethod + def from_trainer( + cls, + trainer, + language=None, + license=None, + tags=None, + model_name=None, + finetuned_from=None, + dataset_tags=None, + dataset=None, + dataset_args=None, + ): + # TODO (Sylvain) Add a default for `pipeline-tag` inferred from the model. + if model_name is None: + model_name = Path(trainer.args.output_dir).name + + _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) + hyperparameters = extract_hyperparameters_from_trainer(trainer) + + return cls( + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + eval_results=eval_results, + eval_lines=eval_lines, + hyperparameters=hyperparameters, + ) + + +def parse_log_history(log_history): + """ + Parse the `log_history` of a Trainer to get the intermediate and final evaluation results. + """ + idx = 0 + while idx < len(log_history) and "train_runtime" not in log_history[idx]: + idx += 1 + + # If there are no training logs + if idx == len(log_history): + idx -= 1 + while idx >= 0 and "eval_loss" not in log_history[idx]: + idx -= 1 + + if idx > 0: + return None, None, log_history[idx] + else: + return None, None, None + + # From now one we can assume we have training logs: + train_log = log_history[idx] + lines = [] + training_loss = "No log" + for i in range(idx): + if "loss" in log_history[i]: + training_loss = log_history[i]["loss"] + if "eval_loss" in log_history[i]: + metrics = log_history[i].copy() + _ = metrics.pop("total_flos", None) + epoch = metrics.pop("epoch", None) + step = metrics.pop("step", None) + _ = metrics.pop("eval_runtime", None) + _ = metrics.pop("eval_samples_per_second", None) + values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step} + for k, v in metrics.items(): + if k == "eval_loss": + values["Validation Loss"] = v + else: + splits = k.split("_") + name = " ".join([part.capitalize() for part in splits[1:]]) + values[name] = v + lines.append(values) + + idx = len(log_history) - 1 + while idx >= 0 and "eval_loss" not in log_history[idx]: + idx -= 1 + + if idx > 0: + eval_results = {} + for key, value in log_history[idx].items(): + if key.startswith("eval_"): + key = key[5:] + if key not in ["runtime", "samples_per_second", "epoch", "step"]: + camel_cased_key = " ".join([part.capitalize() for part in key.split("_")]) + eval_results[camel_cased_key] = value + return train_log, lines, eval_results + else: + return train_log, lines, None + + +def _maybe_round(v, decimals=4): + if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals: + return f"{v:.{decimals}f}" + return str(v) + + +def _regular_table_line(values, col_widths): + values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)] + return "".join(values_with_space) + "|\n" + + +def _second_table_line(col_widths): + values = ["|:" + "-" * w + ":" for w in col_widths] + return "".join(values) + "|\n" + + +def make_markdown_table(lines): + """ + Create a nice Markdown table from the results in `lines`. + """ + if lines is None or len(lines) == 0: + return "" + col_widths = {key: len(str(key)) for key in lines[0].keys()} + for line in lines: + for key, value in line.items(): + if col_widths[key] < len(_maybe_round(value)): + col_widths[key] = len(_maybe_round(value)) + + table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) + table += _second_table_line(list(col_widths.values())) + for line in lines: + table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) + return table + + +_TRAINING_ARGS_KEYS = [ + "learning_rate", + "train_batch_size", + "eval_batch_size", + "seed", +] + + +def extract_hyperparameters_from_trainer(trainer): + hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} + + if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: + hyperparameters["distributed_type"] = ( + "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value + ) + if trainer.args.world_size > 1: + hyperparameters["num_devices"] = trainer.args.world_size + if trainer.args.gradient_accumulation_steps > 1: + hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps + + total_train_batch_size = ( + trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps + ) + if total_train_batch_size != hyperparameters["train_batch_size"]: + hyperparameters["total_train_batch_size"] = total_train_batch_size + total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size + if total_eval_batch_size != hyperparameters["eval_batch_size"]: + hyperparameters["total_eval_batch_size"] = total_eval_batch_size + + if trainer.args.adafactor: + hyperparameters["optimizer"] = "Adafactor" + else: + hyperparameters[ + "optimizer" + ] = f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and epsilon={trainer.args.adam_epsilon}" + + hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value + if trainer.args.warmup_ratio != 0.0: + hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio + if trainer.args.warmup_steps != 0.0: + hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps + if trainer.args.max_steps != -1: + hyperparameters["training_steps"] = trainer.args.max_steps + else: + hyperparameters["num_epochs"] = trainer.args.num_train_epochs + + if trainer.args.fp16: + if trainer.use_amp: + hyperparameters["mixed_precision_training"] = "Native AMP" + elif trainer._use_apex: + hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}" + + if trainer.args.label_smoothing_factor != 0.0: + hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor + + return hyperparameters diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 09b8e58a91664d..67061060aad0f8 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -22,7 +22,6 @@ from ..configuration_utils import PretrainedConfig from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..file_utils import is_tf_available, is_torch_available -from ..modelcard import ModelCard from ..models.auto.configuration_auto import AutoConfig from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer @@ -384,12 +383,6 @@ def pipeline( model = get_default_model(targeted_task, framework, task_options) model_name = model if isinstance(model, str) else None - modelcard = None - # Try to infer modelcard from model or config name (if provided as str) - if isinstance(model, str): - modelcard = model - elif isinstance(config, str): - modelcard = config # Infer the framework form the model if framework is None: @@ -404,10 +397,6 @@ def pipeline( if isinstance(config, str): config = AutoConfig.from_pretrained(config, revision=revision, _from_pipeline=task, **model_kwargs) - # Instantiate modelcard if needed - if isinstance(modelcard, str): - modelcard = ModelCard.from_pretrained(modelcard, revision=revision, _from_pipeline=task) - # Instantiate model if needed if isinstance(model, str): # Handle transparent TF/PT model conversion @@ -504,10 +493,4 @@ def pipeline( if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor - return task_class( - model=model, - modelcard=modelcard, - framework=framework, - task=task, - **kwargs, - ) + return task_class(model=model, framework=framework, task=task, **kwargs) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index fb9c37725a2b7f..934b55d0c09139 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -74,6 +74,7 @@ is_torch_tpu_available, is_training_run_on_sagemaker, ) +from .modelcard import TrainingSummary from .modeling_utils import PreTrainedModel, unwrap_model from .optimization import Adafactor, AdamW, get_scheduler from .tokenization_utils_base import PreTrainedTokenizerBase @@ -2381,25 +2382,49 @@ def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): else: return 0 + def create_model_card( + self, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Optional[str] = None, + model_name: Optional[str] = None, + finetuned_from: Optional[str] = None, + dataset_tags: Optional[Union[str, List[str]]] = None, + dataset: Optional[Union[str, List[str]]] = None, + dataset_args: Optional[Union[str, List[str]]] = None, + ): + training_summary = TrainingSummary.from_trainer( + self, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(os.path.join(self.args.output_dir, "README.md"), "w") as f: + f.write(model_card) + def push_to_hub( self, - save_directory: Optional[str] = None, repo_name: Optional[str] = None, repo_url: Optional[str] = None, commit_message: Optional[str] = "add model", organization: Optional[str] = None, private: bool = None, use_auth_token: Optional[Union[bool, str]] = None, + **kwargs, ): """ Upload `self.model` to the 🤗 model hub. Parameters: - save_directory (:obj:`str` or :obj:`os.PathLike`): - Folder containing the model weights and config. Will default to :obj:`self.args.output_dir`. repo_name (:obj:`str`, `optional`): - Repository name for your model or tokenizer in the hub. If not specified, the repository name will be - the stem of :obj:`save_directory`. + Repository name for your model or tokenizer in the hub. If not specified and :obj:`repo_url` is not + specified either, will default to the stem of :obj:`self.args.output_dir`. repo_url (:obj:`str`, `optional`): Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an :obj:`organization`) with @@ -2415,6 +2440,8 @@ def push_to_hub( The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to :obj:`True` if :obj:`repo_url` is not specified. + kwargs: + Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`. Returns: The url of the commit of your model in the given repository. @@ -2426,15 +2453,23 @@ def push_to_hub( raise ValueError( "The `upload_model_to_hub` method only works for models that inherit from `PushToHubMixin` models." ) - if save_directory is None: - save_directory = self.args.output_dir - # To avoid pushing all checkpoints, we just copy all the files in save_directory in a tmp dir. + if repo_url is None and repo_name is None: + repo_name = Path(self.args.output_dir).name + + if repo_name is not None: + model_name = repo_name + elif repo_url is not None: + model_name = repo_url.split("/")[-1] + else: + model_name = None + self.create_model_card(model_name=model_name, **kwargs) + with tempfile.TemporaryDirectory() as tmp_dir: - for f in os.listdir(save_directory): - fname = os.path.join(save_directory, f) - if os.path.isfile(fname): - shutil.copy(fname, os.path.join(tmp_dir, f)) + shutil.copy(os.path.join(self.args.output_dir, "README.md"), os.path.join(tmp_dir, "README.md")) + unwrap_model(self.model).save_pretrained(tmp_dir) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(tmp_dir) return unwrap_model(self.model)._push_to_hub( save_directory=tmp_dir, diff --git a/tests/test_trainer.py b/tests/test_trainer.py index c040333a83bc5e..eca71a39fb71ca 100644 --- a/tests/test_trainer.py +++ b/tests/test_trainer.py @@ -1168,7 +1168,6 @@ def tearDownClass(cls): def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) - trainer.save_model() url = trainer.push_to_hub(repo_name="test-trainer", use_auth_token=self._token) # Extract repo_name from the url From d9b286272c7d20d2aff74a0ef28384071d0a18f5 Mon Sep 17 00:00:00 2001 From: Julien Plu Date: Tue, 11 May 2021 18:01:03 +0200 Subject: [PATCH 11/41] Fix TF Roberta for mixed precision training (#11675) --- src/transformers/models/roberta/modeling_tf_roberta.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index e0b54e52ceafb3..6439d010412cf9 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -541,7 +541,9 @@ def call( # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) - extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0) + one_cst = tf.constant(1.0, dtype=embedding_output.dtype) + ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) + extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head From f13f1f8fb8a89d6405b49b0981c5350ebd52430c Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 11 May 2021 12:02:48 -0400 Subject: [PATCH 12/41] Test checkpointing (#11682) * Add test and see where CI is unhappy * Load with strict=False --- src/transformers/trainer.py | 13 ++++++++++++- tests/test_modeling_common.py | 7 +++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 934b55d0c09139..8d79fe14ec9229 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1059,7 +1059,18 @@ def train( # We load the model state dict on the CPU to avoid an OOM error. state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") # If the model is on the GPU, it still works! - self.model.load_state_dict(state_dict) + load_result = self.model.load_state_dict(state_dict, strict=False) + if len(load_result.missing_keys) != 0: + if load_result.missing_keys == self.model._keys_to_ignore_on_save: + self.model.tie_weights() + else: + logger.warn( + f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}." + ) + if len(load_result.unexpected_keys) != 0: + logger.warn( + f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." + ) # If model was re-initialized, put it on the right device and update self.model_wrapped if model_reloaded: diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 19469075adca8c..00b8080ff908b0 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -177,6 +177,13 @@ def test_save_load__keys_to_ignore_on_save(self): for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved) + # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. + load_result = model.load_state_dict(state_dict_saved, strict=False) + self.assertTrue( + len(load_result.missing_keys) == 0 or load_result.missing_keys == model._keys_to_ignore_on_save + ) + self.assertTrue(len(load_result.unexpected_keys) == 0) + def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) From 4ce6bcc31095ddb8d4cdd79831217f200c53e801 Mon Sep 17 00:00:00 2001 From: Marc van Zee Date: Tue, 11 May 2021 20:02:59 +0200 Subject: [PATCH 13/41] Adds Flax BERT finetuning example on GLUE (#11564) * Adds Flax BERT finetuning example * fix traced jax tensor type * Use Optax losses and learning schedulers * Add 1GPU training results * merge into master & make style * fix input * del file * Fix bug in loss and add torch runs * finish bert flax fine-tune * Update examples/flax/text-classification/README.md * Update examples/flax/text-classification/run_flax_glue.py * add requirements * finalize * finalize Co-authored-by: Patrick von Platen Co-authored-by: Patrick von Platen --- examples/flax/text-classification/README.md | 96 ++++ .../flax/text-classification/requirements.txt | 5 + .../flax/text-classification/run_flax_glue.py | 517 ++++++++++++++++++ 3 files changed, 618 insertions(+) create mode 100644 examples/flax/text-classification/README.md create mode 100644 examples/flax/text-classification/requirements.txt create mode 100755 examples/flax/text-classification/run_flax_glue.py diff --git a/examples/flax/text-classification/README.md b/examples/flax/text-classification/README.md new file mode 100644 index 00000000000000..cdb0c905c7957a --- /dev/null +++ b/examples/flax/text-classification/README.md @@ -0,0 +1,96 @@ + + +# Text classification examples + +## GLUE tasks + +Based on the script [`run_flax_glue.py`](https://github.com/huggingface/transformers/blob/master/examples/flax/text-classification/run_flax_glue.py). + +Fine-tuning the library models for sequence classification on the GLUE benchmark: [General Language Understanding +Evaluation](https://gluebenchmark.com/). This script can fine-tune any of the models on the [hub](https://huggingface.co/models). + +GLUE is made up of a total of 9 different tasks. Here is how to run the script on one of them: + +```bash +export TASK_NAME=mrpc + +python run_flax_glue.py \ + --model_name_or_path bert-base-cased \ + --task_name $TASK_NAME \ + --max_length 128 \ + --learning_rate 2e-5 \ + --num_train_epochs 3 \ + --per_device_train_batch_size 4 \ + --output_dir /tmp/$TASK_NAME/ +``` + +where task name can be one of cola, mnli, mnli-mm, mrpc, qnli, qqp, rte, sst2, stsb, wnli. + +Using the command above, the script will train for 3 epochs and run eval after each epoch. +Metrics and hyperparameters are stored in Tensorflow event files in `---output_dir`. +You can see the results by running `tensorboard` in that directory: + +```bash +$ tensorboard --logdir . +``` + +### Accuracy Evaluation + +We train five replicas and report mean accuracy and stdev on the dev set below. +We use the settings as in the command above (with an exception for MRPC and +WNLI which are tiny and where we used 5 epochs instead of 3), and we use a total +train batch size of 32 (we train on 8 Cloud v3 TPUs, so a per-device batch size of 4), + +On the task other than MRPC and WNLI we train for 3 these epochs because this is the standard, +but looking at the training curves of some of them (e.g., SST-2, STS-b), it appears the models +are undertrained and we could get better results when training longer. + +In the Tensorboard results linked below, the random seed of each model is equal to the ID of the run. So in order to reproduce run 1, run the command above with `--seed=1`. The best run used random seed 2, which is the default in the script. The results of all runs are in [this Google Sheet](https://docs.google.com/spreadsheets/d/1zKL_xn32HwbxkFMxB3ftca-soTHAuBFgIhYhOhCnZ4E/edit?usp=sharing). + + +| Task | Metric | Acc (best run) | Acc (avg/5runs) | Stdev | Metrics | +|-------|------------------------------|----------------|-----------------|-----------|--------------------------------------------------------------------------| +| CoLA | Matthew's corr | 59.57 | 58.04 | 1.81 | [tfhub.dev](https://tensorboard.dev/experiment/f4OvQpWtRq6CvddpxGBd0A/) | +| SST-2 | Accuracy | 92.43 | 91.79 | 0.59 | [tfhub.dev](https://tensorboard.dev/experiment/BYFwa49MRTaLIn93DgAEtA/) | +| MRPC | F1/Accuracy | 89.50/84.8 | 88.70/84.02 | 0.56/0.48 | [tfhub.dev](https://tensorboard.dev/experiment/9ZWH5xwXRS6zEEUE4RaBhQ/) | +| STS-B | Pearson/Spearman corr. | 90.00/88.71 | 89.09/88.61 | 0.51/0.07 | [tfhub.dev](https://tensorboard.dev/experiment/mUlI5B9QQ0WGEJip7p3Tng/) | +| QQP | Accuracy/F1 | 90.88/87.64 | 90.75/87.53 | 0.11/0.13 | [tfhub.dev](https://tensorboard.dev/experiment/pO6h75L3SvSXSWRcgljXKA/) | +| MNLI | Matched acc. | 84.06 | 83.88 | 0.16 | [tfhub.dev](https://tensorboard.dev/experiment/LKwaOH18RMuo7nJkESrpKg/) | +| QNLI | Accuracy | 91.01 | 90.86 | 0.18 | [tfhub.dev](https://tensorboard.dev/experiment/qesXxNcaQhmKxPmbw1sOoA/) | +| RTE | Accuracy | 66.80 | 65.27 | 1.07 | [tfhub.dev](https://tensorboard.dev/experiment/Z84xC0r6RjyzT4SLqiAbzQ/) | +| WNLI | Accuracy | 39.44 | 32.96 | 5.85 | [tfhub.dev](https://tensorboard.dev/experiment/gV73w9v0RIKrqVw32PZbAQ/) | + +Some of these results are significantly different from the ones reported on the test set of GLUE benchmark on the +website. For QQP and WNLI, please refer to [FAQ #12](https://gluebenchmark.com/faq) on the website. + +### Runtime evaluation + +We also ran each task once on a single V100 GPU, 8 V100 GPUs, and 8 Cloud v3 TPUs and report the +overall training time below. For comparison we ran Pytorch's [run_glue.py](https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py) on a single GPU (last column). + + +| Task | 8 TPU | 8 GPU | 1 GPU | 1 GPU (Pytorch) | +|-------|---------|---------|------------|-----------------| +| CoLA | 1m 46s | 1m 26s | 3m 6s | 4m 6s | +| SST-2 | 5m 30s | 6m 28s | 22m 6s | 34m 37s | +| MRPC | 1m 32s | 1m 14s | 2m 17s | 2m 56s | +| STS-B | 1m 33s | 1m 12s | 2m 11s | 2m 48s | +| QQP | 24m 40s | 31m 48s | 1h 20m 15s | 2h 54m | +| MNLI | 26m 30s | 33m 55s | 2h 7m 30s | 3u 7m 6s | +| QNLI | 8m | 9m 40s | 34m 20s | 49m 8s | +| RTE | 1m 21s | 55s | 1m 8s | 1m 16s | +| WNLI | 1m 12s | 48s | 38s | 36s | diff --git a/examples/flax/text-classification/requirements.txt b/examples/flax/text-classification/requirements.txt new file mode 100644 index 00000000000000..f428e9cccbe12d --- /dev/null +++ b/examples/flax/text-classification/requirements.txt @@ -0,0 +1,5 @@ +datasets >= 1.1.3 +jax>=0.2.8 +jaxlib>=0.1.59 +git+https://github.com/google/flax.git +git+https://github.com/deepmind/optax.git diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py new file mode 100755 index 00000000000000..217b7bdc382463 --- /dev/null +++ b/examples/flax/text-classification/run_flax_glue.py @@ -0,0 +1,517 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE.""" +import argparse +import logging +import os +import random +import time +from itertools import chain +from typing import Any, Callable, Dict, Tuple + +import datasets +from datasets import load_dataset, load_metric + +import jax +import jax.numpy as jnp +import optax +import transformers +from flax import linen as nn +from flax import struct, traverse_util +from flax.jax_utils import replicate, unreplicate +from flax.metrics import tensorboard +from flax.training import train_state +from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key +from transformers import AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, PretrainedConfig + + +logger = logging.getLogger(__name__) + +Array = Any +Dataset = datasets.arrow_dataset.Dataset +PRNGKey = Any + + +task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), +} + + +def parse_args(): + parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") + parser.add_argument( + "--task_name", + type=str, + default=None, + help="The name of the glue task to train on.", + choices=list(task_to_keys.keys()), + ) + parser.add_argument( + "--train_file", type=str, default=None, help="A csv or a json file containing the training data." + ) + parser.add_argument( + "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." + ) + parser.add_argument( + "--max_length", + type=int, + default=128, + help=( + "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," + " sequences shorter will be padded." + ), + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=True, + ) + parser.add_argument( + "--use_slow_tokenizer", + action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=8, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=8, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") + parser.add_argument("--seed", type=int, default=2, help="A seed for reproducible training.") + args = parser.parse_args() + + # Sanity checks + if args.task_name is None and args.train_file is None and args.validation_file is None: + raise ValueError("Need either a task name or a training/validation file.") + else: + if args.train_file is not None: + extension = args.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if args.validation_file is not None: + extension = args.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + return args + + +def create_train_state( + model: FlaxAutoModelForSequenceClassification, + learning_rate_fn: Callable[[int], float], + is_regression: bool, + num_labels: int, +) -> train_state.TrainState: + """Create initial training state.""" + + class TrainState(train_state.TrainState): + """Train state with an Optax optimizer. + + The two functions below differ depending on whether the task is classification + or regression. + + Args: + logits_fn: Applied to last layer to obtain the logits. + loss_fn: Function to compute the loss. + """ + + logits_fn: Callable = struct.field(pytree_node=False) + loss_fn: Callable = struct.field(pytree_node=False) + + # Creates a multi-optimizer consisting of two "Adam with weight decay" optimizers. + def adamw(weight_decay): + return optax.adamw(learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=weight_decay) + + def traverse(fn): + def mask(data): + flat = traverse_util.flatten_dict(data) + return traverse_util.unflatten_dict({k: fn(k, v) for k, v in flat.items()}) + + return mask + + # We use Optax's "masking" functionality to create a multi-optimizer, one + # with weight decay and the other without. Note masking means the optimizer + # will ignore these paths. + decay_path = lambda p: not any(x in p for x in ["bias", "LayerNorm.weight"]) # noqa: E731 + + tx = optax.chain( + optax.masked(adamw(0.0), mask=traverse(lambda path, _: decay_path(path))), + optax.masked(adamw(0.01), mask=traverse(lambda path, _: not decay_path(path))), + ) + + if is_regression: + + def mse_loss(logits, labels): + return jnp.mean((logits[..., 0] - labels) ** 2) + + return TrainState.create( + apply_fn=model.__call__, + params=model.params, + tx=tx, + logits_fn=lambda logits: logits[..., 0], + loss_fn=mse_loss, + ) + else: # Classification. + + def cross_entropy_loss(logits, labels): + logits = nn.log_softmax(logits) + xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels)) + return jnp.mean(xentropy) + + return TrainState.create( + apply_fn=model.__call__, + params=model.params, + tx=tx, + logits_fn=lambda logits: logits.argmax(-1), + loss_fn=cross_entropy_loss, + ) + + +def create_learning_rate_fn( + train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float +) -> Callable[[int], jnp.array]: + """Returns a linear warmup, linear_decay learning rate function.""" + steps_per_epoch = train_ds_size // train_batch_size + num_train_steps = steps_per_epoch * num_train_epochs + warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) + decay_fn = optax.linear_schedule( + init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps + ) + schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) + return schedule_fn + + +def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): + """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" + steps_per_epoch = len(dataset) // batch_size + perms = jax.random.permutation(rng, len(dataset)) + perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. + perms = perms.reshape((steps_per_epoch, batch_size)) + + for perm in perms: + batch = dataset[perm] + batch = {k: jnp.array(v) for k, v in batch.items()} + batch = shard(batch) + + yield batch + + +def glue_eval_data_collator(dataset: Dataset, batch_size: int): + """Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices.""" + for i in range(len(dataset) // batch_size): + batch = dataset[i * batch_size : (i + 1) * batch_size] + batch = {k: jnp.array(v) for k, v in batch.items()} + batch = shard(batch) + + yield batch + + +def main(): + args = parse_args() + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + # Setup logging, we only want one process per machine to log things on the screen. + logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) + if jax.process_index() == 0: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) + # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). + + # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the + # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named + # label if at least two columns are provided. + + # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this + # single column. You can easily tweak this behavior (see below) + + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if args.task_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset("glue", args.task_name) + else: + # Loading the dataset from local csv or json file. + data_files = {} + if args.train_file is not None: + data_files["train"] = args.train_file + if args.validation_file is not None: + data_files["validation"] = args.validation_file + extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1] + raw_datasets = load_dataset(extension, data_files=data_files) + # See more about loading any type of standard or custom dataset at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Labels + if args.task_name is not None: + is_regression = args.task_name == "stsb" + if not is_regression: + label_list = raw_datasets["train"].features["label"].names + num_labels = len(label_list) + else: + num_labels = 1 + else: + # Trying to have good defaults here, don't hesitate to tweak to your needs. + is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] + if is_regression: + num_labels = 1 + else: + # A useful fast method: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + label_list = raw_datasets["train"].unique("label") + label_list.sort() # Let's sort it for determinism + num_labels = len(label_list) + + # Load pretrained model and tokenizer + config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) + model = FlaxAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config) + + # Preprocessing the datasets + if args.task_name is not None: + sentence1_key, sentence2_key = task_to_keys[args.task_name] + else: + # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. + non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] + if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: + sentence1_key, sentence2_key = "sentence1", "sentence2" + else: + if len(non_label_column_names) >= 2: + sentence1_key, sentence2_key = non_label_column_names[:2] + else: + sentence1_key, sentence2_key = non_label_column_names[0], None + + # Some models have set the order of the labels to use, so let's make sure we do use it. + label_to_id = None + if ( + model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id + and args.task_name is not None + and not is_regression + ): + # Some have all caps in their config, some don't. + label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} + if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): + logger.info( + f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " + "Using it!" + ) + label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} + else: + logger.warning( + "Your model seems to have been trained with labels, but they don't match the dataset: ", + f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." + "\nIgnoring the model labels as a result.", + ) + elif args.task_name is None: + label_to_id = {v: i for i, v in enumerate(label_list)} + + def preprocess_function(examples): + # Tokenize the texts + texts = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(*texts, padding="max_length", max_length=args.max_length, truncation=True) + + if "label" in examples: + if label_to_id is not None: + # Map labels to IDs (not necessary for GLUE tasks) + result["labels"] = [label_to_id[l] for l in examples["label"]] + else: + # In all cases, rename the column to labels because the model will expect that. + result["labels"] = examples["label"] + return result + + processed_datasets = raw_datasets.map( + preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names + ) + + train_dataset = processed_datasets["train"] + eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] + + # Log a few random samples from the training set: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + # Define a summary writer + summary_writer = tensorboard.SummaryWriter(args.output_dir) + summary_writer.hparams(vars(args)) + + def write_metric(train_metrics, eval_metrics, train_time, step): + summary_writer.scalar("train_time", train_time, step) + + train_metrics = get_metrics(train_metrics) + for key, vals in train_metrics.items(): + tag = f"train_{key}" + for i, val in enumerate(vals): + summary_writer.scalar(tag, val, step - len(vals) + i + 1) + + for metric_name, value in eval_metrics.items(): + summary_writer.scalar(f"eval_{metric_name}", value, step) + + num_epochs = int(args.num_train_epochs) + rng = jax.random.PRNGKey(args.seed) + + train_batch_size = args.per_device_train_batch_size * jax.local_device_count() + eval_batch_size = args.per_device_eval_batch_size * jax.local_device_count() + + learning_rate_fn = create_learning_rate_fn( + len(train_dataset), train_batch_size, args.num_train_epochs, args.num_warmup_steps, args.learning_rate + ) + + state = create_train_state(model, learning_rate_fn, is_regression, num_labels=num_labels) + + # define step functions + def train_step( + state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey + ) -> Tuple[train_state.TrainState, float]: + """Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`.""" + targets = batch.pop("labels") + + def loss_fn(params): + logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] + loss = state.loss_fn(logits, targets) + return loss, logits + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, logits), grad = grad_fn(state.params) + grad = jax.lax.pmean(grad, "batch") + new_state = state.apply_gradients(grads=grad) + metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch") + return new_state, metrics + + p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,)) + + def eval_step(state, batch): + logits = state.apply_fn(**batch, params=state.params, train=False)[0] + return state.logits_fn(logits) + + p_eval_step = jax.pmap(eval_step, axis_name="batch") + + if args.task_name is not None: + metric = load_metric("glue", args.task_name) + else: + metric = load_metric("accuracy") + + logger.info(f"===== Starting training ({num_epochs} epochs) =====") + train_time = 0 + + for epoch in range(1, num_epochs + 1): + logger.info(f"Epoch {epoch}") + logger.info(" Training...") + + # make sure weights are replicated on each device + state = replicate(state) + + train_start = time.time() + train_metrics = [] + rng, input_rng, dropout_rng = jax.random.split(rng, 3) + + # train + for batch in glue_train_data_collator(input_rng, train_dataset, train_batch_size): + dropout_rngs = shard_prng_key(dropout_rng) + state, metrics = p_train_step(state, batch, dropout_rngs) + train_metrics.append(metrics) + train_time += time.time() - train_start + logger.info(f" Done! Training metrics: {unreplicate(metrics)}") + + logger.info(" Evaluating...") + rng, input_rng = jax.random.split(rng) + + # evaluate + for batch in glue_eval_data_collator(eval_dataset, eval_batch_size): + labels = batch.pop("labels") + predictions = p_eval_step(state, batch) + metric.add_batch(predictions=chain(*predictions), references=chain(*labels)) + + # evaluate also on leftover examples (not divisible by batch_size) + num_leftover_samples = len(eval_dataset) % eval_batch_size + + # make sure leftover batch is evaluated on one device + if num_leftover_samples > 0 and jax.process_index() == 0: + # put weights on single device + state = unreplicate(state) + + # take leftover samples + batch = eval_dataset[-num_leftover_samples:] + batch = {k: jnp.array(v) for k, v in batch.items()} + + labels = batch.pop("labels") + predictions = eval_step(state, batch) + metric.add_batch(predictions=predictions, references=labels) + + eval_metric = metric.compute() + logger.info(f" Done! Eval metrics: {eval_metric}") + + cur_step = epoch * (len(train_dataset) // train_batch_size) + write_metric(train_metrics, eval_metric, train_time, cur_step) + + # save last checkpoint + if jax.process_index() == 0: + params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + model.save_pretrained(args.output_dir, params=params) + + +if __name__ == "__main__": + main() From 8719afa1adc004011a34a34e374b819b5963f23b Mon Sep 17 00:00:00 2001 From: Suraj Patil Date: Wed, 12 May 2021 13:48:15 +0530 Subject: [PATCH 14/41] CLIP (#11445) * begin second draft * fix import, style * add loss * fix embeds, logits_scale, and projection * fix imports * add conversion script * add feature_extractor and processor * style * add tests for tokenizer, extractor and processor * add vision model tests * add weight init * add more tests * fix save_load test * model output, dosstrings, causal mask * config doc * add clip model tests * return dict * bigin integration test * add integration tests * fix-copies * fix init * Clip => CLIP * fix module name * docs * fix doc * output_dim => projection_dim * fix checkpoint names * remoe fast tokenizer file * fix conversion script * fix tests, quality * put causal mask on device * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * fix attribute test * style * address sylvains comments * style * fix docstrings * add qucik_gelu in activations, docstrings * clean-up attention test * fix act fun * fix config * fix torchscript tests * even batch_size * remove comment * fix ouput tu_tuple * fix save load tests * fix add tokens test * add fast tokenizer * update copyright * new processor API * fix docs * docstrings * docs * fix doc * fix doc * fix tokenizer * fix import in doc example * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * check types of config * valhalla => openai * load image using url * fix test * typo Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- README.md | 1 + docs/source/index.rst | 97 +- docs/source/model_doc/clip.rst | 154 +++ src/transformers/__init__.py | 36 + src/transformers/activations.py | 5 + src/transformers/convert_slow_tokenizer.py | 24 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 4 + src/transformers/models/auto/modeling_auto.py | 3 + src/transformers/models/clip/__init__.py | 82 ++ .../models/clip/configuration_clip.py | 282 ++++++ .../convert_clip_original_pytorch_to_hf.py | 148 +++ .../models/clip/feature_extraction_clip.py | 156 +++ src/transformers/models/clip/modeling_clip.py | 956 ++++++++++++++++++ .../models/clip/processing_clip.py | 171 ++++ .../models/clip/tokenization_clip.py | 371 +++++++ .../models/clip/tokenization_clip_fast.py | 168 +++ src/transformers/utils/dummy_pt_objects.py | 39 + .../utils/dummy_tokenizers_objects.py | 9 + .../utils/dummy_vision_objects.py | 10 + tests/test_feature_extraction_clip.py | 229 +++++ tests/test_modeling_clip.py | 561 ++++++++++ tests/test_processor_clip.py | 177 ++++ tests/test_tokenization_clip.py | 207 ++++ utils/check_repo.py | 2 + 25 files changed, 3848 insertions(+), 45 deletions(-) create mode 100644 docs/source/model_doc/clip.rst create mode 100644 src/transformers/models/clip/__init__.py create mode 100644 src/transformers/models/clip/configuration_clip.py create mode 100644 src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py create mode 100644 src/transformers/models/clip/feature_extraction_clip.py create mode 100755 src/transformers/models/clip/modeling_clip.py create mode 100644 src/transformers/models/clip/processing_clip.py create mode 100644 src/transformers/models/clip/tokenization_clip.py create mode 100644 src/transformers/models/clip/tokenization_clip_fast.py create mode 100644 tests/test_feature_extraction_clip.py create mode 100644 tests/test_modeling_clip.py create mode 100644 tests/test_processor_clip.py create mode 100644 tests/test_tokenization_clip.py diff --git a/README.md b/README.md index fb5b8a62570752..87b3b07fdbde9f 100644 --- a/README.md +++ b/README.md @@ -200,6 +200,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[BlenderbotSmall](https://huggingface.co/transformers/model_doc/blenderbot_small.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. 1. **[BORT](https://huggingface.co/transformers/model_doc/bort.html)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry. 1. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. +1. **[CLIP](https://huggingface.co/transformers/model_doc/camembert.html)** from (OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[ConvBERT](https://huggingface.co/transformers/model_doc/convbert.html)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[CPM](https://huggingface.co/transformers/model_doc/cpm.html)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. 1. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. diff --git a/docs/source/index.rst b/docs/source/index.rst index ea1d047afcb525..1fac89a4821bac 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -114,142 +114,146 @@ conversion utilities for the following models: 11. :doc:`CamemBERT ` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty French Language Model `__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. -12. :doc:`ConvBERT ` (from YituTech) released with the paper `ConvBERT: Improving BERT with +12. :doc:`CLIP ` from (OpenAI) released with the paper `Learning Transferable Visual Models From + Natural Language Supervision `__ by Alec Radford, Jong Wook Kim, Chris Hallacy, + Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen + Krueger, Ilya Sutskever. +13. :doc:`ConvBERT ` (from YituTech) released with the paper `ConvBERT: Improving BERT with Span-based Dynamic Convolution `__ by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. -13. :doc:`CPM ` (from Tsinghua University) released with the paper `CPM: A Large-scale Generative +14. :doc:`CPM ` (from Tsinghua University) released with the paper `CPM: A Large-scale Generative Chinese Pre-trained Language Model `__ by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. -14. :doc:`CTRL ` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language +15. :doc:`CTRL ` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language Model for Controllable Generation `__ by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. -15. :doc:`DeBERTa ` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with +16. :doc:`DeBERTa ` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with Disentangled Attention `__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. -16. :doc:`DeBERTa-v2 ` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT +17. :doc:`DeBERTa-v2 ` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with Disentangled Attention `__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. -17. :doc:`DeiT ` (from Facebook) released with the paper `Training data-efficient image transformers & +18. :doc:`DeiT ` (from Facebook) released with the paper `Training data-efficient image transformers & distillation through attention `__ by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. -18. :doc:`DialoGPT ` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale +19. :doc:`DialoGPT ` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation `__ by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -19. :doc:`DistilBERT ` (from HuggingFace), released together with the paper `DistilBERT, a +20. :doc:`DistilBERT ` (from HuggingFace), released together with the paper `DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter `__ by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into `DistilGPT2 `__, RoBERTa into `DistilRoBERTa `__, Multilingual BERT into `DistilmBERT `__ and a German version of DistilBERT. -20. :doc:`DPR ` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain +21. :doc:`DPR ` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain Question Answering `__ by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. -21. :doc:`ELECTRA ` (from Google Research/Stanford University) released with the paper `ELECTRA: +22. :doc:`ELECTRA ` (from Google Research/Stanford University) released with the paper `ELECTRA: Pre-training text encoders as discriminators rather than generators `__ by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. -22. :doc:`FlauBERT ` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model +23. :doc:`FlauBERT ` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model Pre-training for French `__ by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. -23. :doc:`Funnel Transformer ` (from CMU/Google Brain) released with the paper `Funnel-Transformer: +24. :doc:`Funnel Transformer ` (from CMU/Google Brain) released with the paper `Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing `__ by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le. -24. :doc:`GPT ` (from OpenAI) released with the paper `Improving Language Understanding by Generative +25. :doc:`GPT ` (from OpenAI) released with the paper `Improving Language Understanding by Generative Pre-Training `__ by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. -25. :doc:`GPT-2 ` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask +26. :doc:`GPT-2 ` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask Learners `__ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. -26. :doc:`GPT Neo ` (from EleutherAI) released in the repository `EleutherAI/gpt-neo +27. :doc:`GPT Neo ` (from EleutherAI) released in the repository `EleutherAI/gpt-neo `__ by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. -27. :doc:`I-BERT ` (from Berkeley) released with the paper `I-BERT: Integer-only BERT Quantization +28. :doc:`I-BERT ` (from Berkeley) released with the paper `I-BERT: Integer-only BERT Quantization `__ by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer -28. :doc:`LayoutLM ` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training +29. :doc:`LayoutLM ` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training of Text and Layout for Document Image Understanding `__ by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. -29. :doc:`LED ` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer +30. :doc:`LED ` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer `__ by Iz Beltagy, Matthew E. Peters, Arman Cohan. -30. :doc:`Longformer ` (from AllenAI) released with the paper `Longformer: The Long-Document +31. :doc:`Longformer ` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer `__ by Iz Beltagy, Matthew E. Peters, Arman Cohan. -31. :doc:`LUKE ` (from Studio Ousia) released with the paper `LUKE: Deep Contextualized Entity +32. :doc:`LUKE ` (from Studio Ousia) released with the paper `LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention `__ by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto. -32. :doc:`LXMERT ` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality +33. :doc:`LXMERT ` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering `__ by Hao Tan and Mohit Bansal. -33. :doc:`M2M100 ` (from Facebook) released with the paper `Beyond English-Centric Multilingual +34. :doc:`M2M100 ` (from Facebook) released with the paper `Beyond English-Centric Multilingual Machine Translation `__ by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. -34. :doc:`MarianMT ` Machine translation models trained using `OPUS `__ data by +35. :doc:`MarianMT ` Machine translation models trained using `OPUS `__ data by Jörg Tiedemann. The `Marian Framework `__ is being developed by the Microsoft Translator Team. -35. :doc:`MBart ` (from Facebook) released with the paper `Multilingual Denoising Pre-training for +36. :doc:`MBart ` (from Facebook) released with the paper `Multilingual Denoising Pre-training for Neural Machine Translation `__ by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. -36. :doc:`MBart-50 ` (from Facebook) released with the paper `Multilingual Translation with Extensible +37. :doc:`MBart-50 ` (from Facebook) released with the paper `Multilingual Translation with Extensible Multilingual Pretraining and Finetuning `__ by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. -37. :doc:`Megatron-BERT ` (from NVIDIA) released with the paper `Megatron-LM: Training +38. :doc:`Megatron-BERT ` (from NVIDIA) released with the paper `Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism `__ by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. -38. :doc:`Megatron-GPT2 ` (from NVIDIA) released with the paper `Megatron-LM: Training +39. :doc:`Megatron-GPT2 ` (from NVIDIA) released with the paper `Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism `__ by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. -39. :doc:`MPNet ` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted +40. :doc:`MPNet ` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted Pre-training for Language Understanding `__ by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu. -40. :doc:`MT5 ` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained +41. :doc:`MT5 ` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained text-to-text transformer `__ by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. -41. :doc:`Pegasus ` (from Google) released with the paper `PEGASUS: Pre-training with Extracted +42. :doc:`Pegasus ` (from Google) released with the paper `PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization `__> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. -42. :doc:`ProphetNet ` (from Microsoft Research) released with the paper `ProphetNet: Predicting +43. :doc:`ProphetNet ` (from Microsoft Research) released with the paper `ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training `__ by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. -43. :doc:`Reformer ` (from Google Research) released with the paper `Reformer: The Efficient +44. :doc:`Reformer ` (from Google Research) released with the paper `Reformer: The Efficient Transformer `__ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. -44. :doc:`RoBERTa ` (from Facebook), released together with the paper a `Robustly Optimized BERT +45. :doc:`RoBERTa ` (from Facebook), released together with the paper a `Robustly Optimized BERT Pretraining Approach `__ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. -45. :doc:`SpeechToTextTransformer ` (from Facebook), released together with the paper +46. :doc:`SpeechToTextTransformer ` (from Facebook), released together with the paper `fairseq S2T: Fast Speech-to-Text Modeling with fairseq `__ by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. -46. :doc:`SqueezeBert ` released with the paper `SqueezeBERT: What can computer vision teach NLP +47. :doc:`SqueezeBert ` released with the paper `SqueezeBERT: What can computer vision teach NLP about efficient neural networks? `__ by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer. -47. :doc:`T5 ` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a +48. :doc:`T5 ` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer `__ by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. -48. :doc:`TAPAS ` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via +49. :doc:`TAPAS ` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via Pre-training `__ by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. -49. :doc:`Transformer-XL ` (from Google/CMU) released with the paper `Transformer-XL: +50. :doc:`Transformer-XL ` (from Google/CMU) released with the paper `Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context `__ by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. -50. :doc:`Vision Transformer (ViT) ` (from Google AI) released with the paper `An Image is Worth 16x16 +51. :doc:`Vision Transformer (ViT) ` (from Google AI) released with the paper `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale `__ by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. -51. :doc:`Wav2Vec2 ` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for +52. :doc:`Wav2Vec2 ` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations `__ by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. -52. :doc:`XLM ` (from Facebook) released together with the paper `Cross-lingual Language Model +53. :doc:`XLM ` (from Facebook) released together with the paper `Cross-lingual Language Model Pretraining `__ by Guillaume Lample and Alexis Conneau. -53. :doc:`XLM-ProphetNet ` (from Microsoft Research) released with the paper `ProphetNet: +54. :doc:`XLM-ProphetNet ` (from Microsoft Research) released with the paper `ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training `__ by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. -54. :doc:`XLM-RoBERTa ` (from Facebook AI), released together with the paper `Unsupervised +55. :doc:`XLM-RoBERTa ` (from Facebook AI), released together with the paper `Unsupervised Cross-lingual Representation Learning at Scale `__ by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. -55. :doc:`XLNet ` (from Google/CMU) released with the paper `​XLNet: Generalized Autoregressive +56. :doc:`XLNet ` (from Google/CMU) released with the paper `​XLNet: Generalized Autoregressive Pretraining for Language Understanding `__ by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. -56. :doc:`XLSR-Wav2Vec2 ` (from Facebook AI) released with the paper `Unsupervised +57. :doc:`XLSR-Wav2Vec2 ` (from Facebook AI) released with the paper `Unsupervised Cross-Lingual Representation Learning For Speech Recognition `__ by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli. @@ -284,6 +288,8 @@ Flax), PyTorch, and/or TensorFlow. +-----------------------------+----------------+----------------+-----------------+--------------------+--------------+ | BlenderbotSmall | ✅ | ❌ | ✅ | ✅ | ❌ | +-----------------------------+----------------+----------------+-----------------+--------------------+--------------+ +| CLIP | ✅ | ✅ | ✅ | ❌ | ❌ | ++-----------------------------+----------------+----------------+-----------------+--------------------+--------------+ | CTRL | ✅ | ❌ | ✅ | ✅ | ❌ | +-----------------------------+----------------+----------------+-----------------+--------------------+--------------+ | CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ | @@ -461,6 +467,7 @@ Flax), PyTorch, and/or TensorFlow. model_doc/blenderbot_small model_doc/bort model_doc/camembert + model_doc/clip model_doc/convbert model_doc/cpm model_doc/ctrl diff --git a/docs/source/model_doc/clip.rst b/docs/source/model_doc/clip.rst new file mode 100644 index 00000000000000..2692680cabea3d --- /dev/null +++ b/docs/source/model_doc/clip.rst @@ -0,0 +1,154 @@ +.. + Copyright 2021 The HuggingFace Team. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + specific language governing permissions and limitations under the License. + +CLIP +----------------------------------------------------------------------------------------------------------------------- + +Overview +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The CLIP model was proposed in `Learning Transferable Visual Models From Natural Language Supervision +`__ by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, +Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP +(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be +instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing +for the task, similarly to the zero-shot capabilities of GPT-2 and 3. + +The abstract from the paper is the following: + +*State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This +restricted form of supervision limits their generality and usability since additional labeled data is needed to specify +any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a +much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes +with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 +million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference +learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study +the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks +such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The +model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need +for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot +without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained +model weights at this https URL.* + +Usage +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image +classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text +features. Both the text and visual features are then projected to a latent space with identical dimension. The dot +product between the projected image and text features is then used as a similar score. + +To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, +which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors +also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. +The :class:`~transformers.CLIPFeatureExtractor` can be used to resize (or rescale) and normalize images for the model. + +The :class:`~transformers.CLIPTokenizer` is used to encode the text. The :class:`~transformers.CLIPProcessor` wraps +:class:`~transformers.CLIPFeatureExtractor` and :class:`~transformers.CLIPTokenizer` into a single instance to both +encode the text and prepare the images. The following example shows how to get the image-text similarity scores using +:class:`~transformers.CLIPProcessor` and :class:`~transformers.CLIPModel`. + + +.. code-block:: + + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> from transformers import CLIPProcessor, CLIPModel + + >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + + +This model was contributed by `valhalla `__. The original code can be found `here +`__. + +CLIPConfig +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPConfig + :members: from_text_vision_configs + + +CLIPTextConfig +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPTextConfig + :members: + + +CLIPVisionConfig +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPVisionConfig + :members: + + + +CLIPTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPTokenizer + :members: build_inputs_with_special_tokens, get_special_tokens_mask, + create_token_type_ids_from_sequences, save_vocabulary + +CLIPTokenizerFast +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPTokenizerFast + :members: + + +CLIPFeatureExtractor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPFeatureExtractor + :members: + + +CLIPProcessor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPProcessor + :members: + + + +CLIPModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPModel + :members: forward, get_text_features, get_image_features + + +CLIPTextModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPTextModel + :members: forward + + +CLIPVisionModel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CLIPVisionModel + :members: forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 6843b110a05186..f89c3c43283801 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -166,6 +166,13 @@ "BlenderbotSmallTokenizer", ], "models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"], + "models.clip": [ + "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CLIPConfig", + "CLIPTextConfig", + "CLIPTokenizer", + "CLIPVisionConfig", + ], "models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"], "models.cpm": ["CpmTokenizer"], "models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"], @@ -315,6 +322,7 @@ # tokenizers-backed objects if is_tokenizers_available(): # Fast tokenizers + _import_structure["models.clip"].append("CLIPTokenizerFast") _import_structure["models.convbert"].append("ConvBertTokenizerFast") _import_structure["models.albert"].append("AlbertTokenizerFast") _import_structure["models.bart"].append("BartTokenizerFast") @@ -390,6 +398,8 @@ # Vision-specific objects if is_vision_available(): _import_structure["image_utils"] = ["ImageFeatureExtractionMixin"] + _import_structure["models.clip"].append("CLIPFeatureExtractor") + _import_structure["models.clip"].append("CLIPProcessor") _import_structure["models.deit"].append("DeiTFeatureExtractor") _import_structure["models.vit"].append("ViTFeatureExtractor") else: @@ -498,6 +508,7 @@ "AutoModelWithLMHead", ] ) + _import_structure["models.bart"].extend( [ "BART_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -588,6 +599,15 @@ "CamembertModel", ] ) + _import_structure["models.clip"].extend( + [ + "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "CLIPModel", + "CLIPPreTrainedModel", + "CLIPTextModel", + "CLIPVisionModel", + ] + ) _import_structure["models.convbert"].extend( [ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -1566,6 +1586,13 @@ BlenderbotSmallTokenizer, ) from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig + from .models.clip import ( + CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + CLIPConfig, + CLIPTextConfig, + CLIPTokenizer, + CLIPVisionConfig, + ) from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer from .models.cpm import CpmTokenizer from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer @@ -1715,6 +1742,7 @@ from .models.bert import BertTokenizerFast from .models.big_bird import BigBirdTokenizerFast from .models.camembert import CamembertTokenizerFast + from .models.clip import CLIPTokenizerFast from .models.convbert import ConvBertTokenizerFast from .models.deberta import DebertaTokenizerFast from .models.distilbert import DistilBertTokenizerFast @@ -1763,6 +1791,7 @@ if is_vision_available(): from .image_utils import ImageFeatureExtractionMixin + from .models.clip import CLIPFeatureExtractor, CLIPProcessor from .models.deit import DeiTFeatureExtractor from .models.vit import ViTFeatureExtractor else: @@ -1936,6 +1965,13 @@ CamembertForTokenClassification, CamembertModel, ) + from .models.clip import ( + CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + CLIPModel, + CLIPPreTrainedModel, + CLIPTextModel, + CLIPVisionModel, + ) from .models.convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, diff --git a/src/transformers/activations.py b/src/transformers/activations.py index deade8c8685356..f60c64206266f2 100644 --- a/src/transformers/activations.py +++ b/src/transformers/activations.py @@ -52,6 +52,10 @@ def gelu_fast(x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) +def quick_gelu(x): + return x * torch.sigmoid(1.702 * x) + + def _silu_python(x): """ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear @@ -85,6 +89,7 @@ def linear_act(x): "tanh": torch.tanh, "gelu_new": gelu_new, "gelu_fast": gelu_fast, + "quick_gelu": quick_gelu, "mish": mish, "linear": linear_act, "sigmoid": torch.sigmoid, diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 002878492a0c16..252990f01d117d 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -701,6 +701,29 @@ def post_processor(self): ) +class CLIPConverter(Converter): + def converted(self) -> Tokenizer: + vocab = self.original_tokenizer.encoder + merges = list(self.original_tokenizer.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) + + return tokenizer + + SLOW_TO_FAST_CONVERTERS = { "AlbertTokenizer": AlbertConverter, "BartTokenizer": RobertaConverter, @@ -708,6 +731,7 @@ def post_processor(self): "BertTokenizer": BertConverter, "BigBirdTokenizer": BigBirdConverter, "CamembertTokenizer": CamembertConverter, + "CLIPTokenizer": CLIPConverter, "ConvBertTokenizer": BertConverter, "DebertaTokenizer": DebertaConverter, "DistilBertTokenizer": BertConverter, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 7fd6d63acdc6c0..297ff6ae4f8909 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -30,6 +30,7 @@ blenderbot, blenderbot_small, camembert, + clip, convbert, cpm, ctrl, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index e3c78dd34040cd..7b37b4e6303a26 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -33,6 +33,7 @@ BlenderbotSmallConfig, ) from ..camembert.configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig +from ..clip.configuration_clip import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig from ..convbert.configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig from ..ctrl.configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from ..deberta.configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig @@ -90,6 +91,7 @@ (key, value) for pretrained_map in [ # Add archive maps here + CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -144,6 +146,7 @@ CONFIG_MAPPING = OrderedDict( [ # Add configs here + ("clip", CLIPConfig), ("bigbird_pegasus", BigBirdPegasusConfig), ("deit", DeiTConfig), ("luke", LukeConfig), @@ -204,6 +207,7 @@ MODEL_NAMES_MAPPING = OrderedDict( [ # Add full (and cased) model names here + ("clip", "CLIP"), ("bigbird_pegasus", "BigBirdPegasus"), ("deit", "DeiT"), ("luke", "LUKE"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index f28b8466676c08..ae82405e09c03e 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -81,6 +81,7 @@ CamembertForTokenClassification, CamembertModel, ) +from ..clip.modeling_clip import CLIPModel from ..convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, @@ -299,6 +300,7 @@ BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, + CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, @@ -352,6 +354,7 @@ MODEL_MAPPING = OrderedDict( [ # Base model mapping + (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py new file mode 100644 index 00000000000000..1f58953266a018 --- /dev/null +++ b/src/transformers/models/clip/__init__.py @@ -0,0 +1,82 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...file_utils import _BaseLazyModule, is_tokenizers_available, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_clip": ["CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPTextConfig", "CLIPVisionConfig"], + "tokenization_clip": ["CLIPTokenizer"], +} + +if is_tokenizers_available(): + _import_structure["tokenization_clip_fast"] = ["CLIPTokenizerFast"] + +if is_vision_available(): + _import_structure["feature_extraction_clip"] = ["CLIPFeatureExtractor"] + _import_structure["processing_clip"] = ["CLIPProcessor"] + +if is_torch_available(): + _import_structure["modeling_clip"] = [ + "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "CLIPModel", + "CLIPPreTrainedModel", + "CLIPTextModel", + "CLIPVisionModel", + ] + + +if TYPE_CHECKING: + from .configuration_clip import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPTextConfig, CLIPVisionConfig + from .tokenization_clip import CLIPTokenizer + + if is_tokenizers_available(): + from .tokenization_clip_fast import CLIPTokenizerFast + + if is_vision_available(): + from .feature_extraction_clip import CLIPFeatureExtractor + from .processing_clip import CLIPProcessor + + if is_torch_available(): + from .modeling_clip import ( + CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + CLIPModel, + CLIPPreTrainedModel, + CLIPTextModel, + CLIPVisionModel, + ) + + +else: + import importlib + import os + import sys + + class _LazyModule(_BaseLazyModule): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + __file__ = globals()["__file__"] + __path__ = [os.path.dirname(__file__)] + + def _get_module(self, module_name: str): + return importlib.import_module("." + module_name, self.__name__) + + sys.modules[__name__] = _LazyModule(__name__, _import_structure) diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py new file mode 100644 index 00000000000000..849b5d906c99d3 --- /dev/null +++ b/src/transformers/models/clip/configuration_clip.py @@ -0,0 +1,282 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" CLIP model configuration """ + +import copy + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json", + # See all CLIP models at https://huggingface.co/models?filter=clip +} + + +class CLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a :class:`~transformers.CLIPModel`. It is used to + instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the CLIP + `openai/clip-vit-base-patch32 `__ architecture. + + Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model + outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. + + + Args: + vocab_size (:obj:`int`, `optional`, defaults to 49408): + Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by + the :obj:`inputs_ids` passed when calling :class:`~transformers.CLIPModel`. + hidden_size (:obj:`int`, `optional`, defaults to 512): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (:obj:`int`, `optional`, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (:obj:`int`, `optional`, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (:obj:`int`, `optional`, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + max_position_embeddings (:obj:`int`, `optional`, defaults to 77): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, + :obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` :obj:`"quick_gelu"` are supported. + layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-5): + The epsilon used by the layer normalization layers. + attention_dropout (:obj:`float`, `optional`, defaults to 0.0): + The dropout ratio for the attention probabilities. + dropout (:obj:`float`, `optional`, defaults to 0.0): + The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. + initializer_range (:obj:`float`, `optional`, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (:obj:`float`, `optional`, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + + Example:: + + >>> from transformers import CLIPTextModel, CLIPTextConfig + + >>> # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration + >>> configuration = CLIPTextConfig() + + >>> # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration + >>> model = CLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + """ + model_type = "clip_text_model" + + def __init__( + self, + vocab_size=49408, + hidden_size=512, + intermediate_size=2048, + num_hidden_layers=12, + num_attention_heads=8, + max_position_embeddings=77, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + dropout=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + gradient_checkpointing=False, + **kwargs + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.max_position_embeddings = max_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.gradient_checkpointing = gradient_checkpointing + + +class CLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a :class:`~transformers.CLIPModel`. It is used to + instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the CLIP + `openai/clip-vit-base-patch32 `__ architecture. + + Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model + outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. + + + Args: + hidden_size (:obj:`int`, `optional`, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (:obj:`int`, `optional`, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (:obj:`int`, `optional`, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (:obj:`int`, `optional`, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + image_size (:obj:`int`, `optional`, defaults to 224): + The size (resolution) of each image. + patch_size (:obj:`int`, `optional`, defaults to 32): + The size (resolution) of each patch. + hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, + :obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` :obj:`"quick_gelu"` are supported. + layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-5): + The epsilon used by the layer normalization layers. + dropout (:obj:`float`, `optional`, defaults to 0.0): + The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (:obj:`float`, `optional`, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (:obj:`float`, `optional`, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (:obj:`float`, `optional`, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + + Example:: + + >>> from transformers import CLIPVisionModel, CLIPVisionConfig + + >>> # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration + >>> configuration = CLIPVisionConfig() + + >>> # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration + >>> model = CLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + """ + + model_type = "clip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + num_hidden_layers=12, + num_attention_heads=12, + image_size=224, + patch_size=32, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + dropout=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + gradient_checkpointing=False, + **kwargs + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.gradient_checkpointing = gradient_checkpointing + + +class CLIPConfig(PretrainedConfig): + r""" + :class:`~transformers.CLIPConfig` is the configuration class to store the configuration of a + :class:`~transformers.CLIPModel`. It is used to instantiate CLIP model according to the specified arguments, + defining the text model and vision model configs. + + Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model + outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. + + Args: + projection_dim: (:obj:`int`, `optional`, defaults to 512): + Dimentionality of text and vision projection layers. + kwargs (`optional`): + Dictionary of keyword arguments. Notably: + + - **text_config** (:class:`~transformers.CLIPTextConfig`, `optional`) -- An instance of a configuration + object that defines the text model config. + - **vision_config** (:class:`~transformers.CLIPVisionConfig`, `optional`) -- An instance of a + configuration object that defines the vision model config. + """ + + model_type = "clip" + is_composition = True + + def __init__(self, text_config_dict=None, vision_config_dict=None, projection_dim=512, **kwargs): + super().__init__(text_config_dict=text_config_dict, vision_config_dict=vision_config_dict, **kwargs) + + if text_config_dict is None: + text_config_dict = {} + logger.info("text_config_dict is None. Initializing the CLIPTextConfig with default values.") + + if vision_config_dict is None: + vision_config_dict = {} + logger.info("vision_config_dict is None. initializing the CLIPVisionConfig with default values.") + + self.text_config = CLIPTextConfig(**text_config_dict) + self.vision_config = CLIPVisionConfig(**vision_config_dict) + + self.projection_dim = projection_dim + self.initializer_factor = 1.0 + + @classmethod + def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs): + r""" + Instantiate a :class:`~transformers.CLIPConfig` (or a derived class) from clip text model configuration and + clip vision model configuration. + + Returns: + :class:`CLIPConfig`: An instance of a configuration object + """ + + return cls(text_config_dict=text_config.to_dict(), vision_config_dict=vision_config.to_dict(), **kwargs) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default + :meth:`~transformers.PretrainedConfig.to_dict`. + + Returns: + :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output["text_config"] = self.text_config.to_dict() + output["vision_config"] = self.vision_config.to_dict() + output["model_type"] = self.__class__.model_type + return output diff --git a/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py b/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py new file mode 100644 index 00000000000000..fdd4c148a94083 --- /dev/null +++ b/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch + +from clip import load +from transformers import CLIPConfig, CLIPModel + + +def copy_attn_layer(hf_attn_layer, pt_attn_layer): + q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0) + q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0) + + out_proj_weights = pt_attn_layer.out_proj.weight + out_proj_bias = pt_attn_layer.out_proj.bias + + hf_attn_layer.q_proj.weight.data = q_proj + hf_attn_layer.q_proj.bias.data = q_proj_bias + + hf_attn_layer.k_proj.weight.data = k_proj + hf_attn_layer.k_proj.bias.data = k_proj_bias + + hf_attn_layer.v_proj.weight.data = v_proj + hf_attn_layer.v_proj.bias.data = v_proj_bias + + hf_attn_layer.out_proj.weight = out_proj_weights + hf_attn_layer.out_proj.bias = out_proj_bias + + +def copy_mlp(hf_mlp, pt_mlp): + copy_linear(hf_mlp.fc1, pt_mlp.c_fc) + copy_linear(hf_mlp.fc2, pt_mlp.c_proj) + + +def copy_linear(hf_linear, pt_linear): + hf_linear.weight = pt_linear.weight + hf_linear.bias = pt_linear.bias + + +def copy_layer(hf_layer, pt_layer): + # copy layer norms + copy_linear(hf_layer.layer_norm1, pt_layer.ln_1) + copy_linear(hf_layer.layer_norm2, pt_layer.ln_2) + + # copy MLP + copy_mlp(hf_layer.mlp, pt_layer.mlp) + + # copy attn + copy_attn_layer(hf_layer.self_attn, pt_layer.attn) + + +def copy_layers(hf_layers, pt_layers): + for hf_layer, pt_layer in zip(hf_layers, pt_layers): + copy_layer(hf_layer, pt_layer) + + +def copy_encoder(hf_encoder, pt_model): + # copy embeds + hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight + hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding + + # copy layer norm + copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) + + # copy hidden layers + copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks) + + +def copy_text_model_and_projection(hf_model, pt_model): + # copy projection + hf_model.text_projection.weight.data = pt_model.text_projection.data.T + + # copy text encoder + copy_encoder(hf_model.text_model, pt_model) + + +def copy_vison_model_and_projection(hf_model, pt_model): + # copy projection + hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T + + # copy layer norms + copy_linear(hf_model.vision_model.pre_layrnorm, pt_model.visual.ln_pre) + copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post) + + # copy embeds + hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data + hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding + hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data + + # copy encoder + copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks) + + +@torch.no_grad() +def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): + """ + Copy/paste/tweak model's weights to transformers design. + """ + if config_path is not None: + config = CLIPConfig.from_pretrained(config_path) + else: + config = CLIPConfig(projection_dim=512, text_config={}, vision_config={}) + + hf_model = CLIPModel(config).eval() + + pt_model, _ = load(checkpoint_path, jit=False) + pt_model = pt_model.eval() + + copy_text_model_and_projection(hf_model, pt_model) + copy_vison_model_and_projection(hf_model, pt_model) + hf_model.logit_scale = pt_model.logit_scale + + input_ids = torch.arange(0, 77).unsqueeze(0) + pixel_values = torch.randn(1, 3, 224, 224) + + hf_logits_per_image, hf_logits_per_text = hf_model( + input_ids=input_ids, pixel_values=pixel_values, return_dict=True + )[1:3] + pt_logits_per_image, pt_logits_per_text = pt_model(pixel_values, input_ids) + + assert torch.allclose(hf_logits_per_image, pt_logits_per_image, atol=1e-3) + assert torch.allclose(hf_logits_per_text, pt_logits_per_text, atol=1e-3) + + hf_model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + args = parser.parse_args() + + convert_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) diff --git a/src/transformers/models/clip/feature_extraction_clip.py b/src/transformers/models/clip/feature_extraction_clip.py new file mode 100644 index 00000000000000..d28252625356f9 --- /dev/null +++ b/src/transformers/models/clip/feature_extraction_clip.py @@ -0,0 +1,156 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for CLIP.""" + +from typing import List, Optional, Union + +import numpy as np +from PIL import Image + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...file_utils import TensorType +from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class CLIPFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): + r""" + Constructs a CLIP feature extractor. + + This feature extractor inherits from :class:`~transformers.FeatureExtractionMixin` which contains most of the main + methods. Users should refer to this superclass for more information regarding those methods. + + Args: + do_resize (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to resize the input to a certain :obj:`size`. + size (:obj:`int`, `optional`, defaults to 224): + Resize the input to the given size. Only has an effect if :obj:`do_resize` is set to :obj:`True`. + resample (:obj:`int`, `optional`, defaults to :obj:`PIL.Image.BICUBIC`): + An optional resampling filter. This can be one of :obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BOX`, + :obj:`PIL.Image.BILINEAR`, :obj:`PIL.Image.HAMMING`, :obj:`PIL.Image.BICUBIC` or :obj:`PIL.Image.LANCZOS`. + Only has an effect if :obj:`do_resize` is set to :obj:`True`. + do_center_crop (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether to crop the input at the center. If the input size is smaller than :obj:`crop_size` along any edge, + the image is padded with 0's and then center cropped. + crop_size (:obj:`int`, `optional`, defaults to 224): + Desired output size when applying center-cropping. Only has an effect if :obj:`do_center_crop` is set to + :obj:`True`. + do_normalize (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to normalize the input with :obj:`image_mean` and :obj:`image_std`. + image_mean (:obj:`List[int]`, defaults to :obj:`[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. + image_std (:obj:`List[int]`, defaults to :obj:`[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize=True, + size=224, + resample=Image.BICUBIC, + do_center_crop=True, + crop_size=224, + do_normalize=True, + image_mean=None, + image_std=None, + **kwargs + ): + super().__init__(**kwargs) + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] + self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] + + def __call__( + self, + images: Union[ + Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa + ], + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs + ) -> BatchFeature: + """ + Main method to prepare for the model one or several image(s). + + .. warning:: + + NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass + PIL images. + + Args: + images (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`, :obj:`List[PIL.Image.Image]`, :obj:`List[np.ndarray]`, :obj:`List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`, defaults to :obj:`'np'`): + If set, will return tensors of a particular framework. Acceptable values are: + + * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. + * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. + * :obj:`'np'`: Return NumPy :obj:`np.ndarray` objects. + * :obj:`'jax'`: Return JAX :obj:`jnp.ndarray` objects. + + Returns: + :class:`~transformers.BatchFeature`: A :class:`~transformers.BatchFeature` with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + """ + # Input type checking for clearer error + valid_images = False + + # Check that images has a valid type + if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): + valid_images = True + elif isinstance(images, (list, tuple)): + if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): + valid_images = True + + if not valid_images: + raise ValueError( + "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," + "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." + ) + + is_batched = bool( + isinstance(images, (list, tuple)) + and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) + ) + + if not is_batched: + images = [images] + + # transformations (resizing + center cropping + normalization) + if self.do_resize and self.size is not None and self.resample is not None: + images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images] + if self.do_center_crop and self.crop_size is not None: + images = [self.center_crop(image, self.crop_size) for image in images] + if self.do_normalize: + images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images] + + # return as BatchFeature + data = {"pixel_values": images} + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + return encoded_inputs diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py new file mode 100755 index 00000000000000..6a2c0f42632929 --- /dev/null +++ b/src/transformers/models/clip/modeling_clip.py @@ -0,0 +1,956 @@ +# coding=utf-8 +# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch CLIP model. """ + + +from typing import Any, Optional, Tuple + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...file_utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig + + +logger = logging.get_logger(__name__) + + +CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "openai/clip-vit-base-patch32", + # See all CLIP models at https://huggingface.co/models?filter=clip +] + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: torch.Tensor, dim: int) -> torch.Tensor: + neg_ce = torch.diag(F.log_softmax(logits, dim=dim)) + return -neg_ce.mean() + + +def clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity, dim=0) + image_loss = contrastive_loss(similarity, dim=1) + return (caption_loss + image_loss) / 2.0 + + +class CLIPOutput(ModelOutput): + """ + Args: + loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`return_loss` is :obj:`True`): + Contrastive loss for image-text similarity. + logits_per_image:(:obj:`torch.FloatTensor` of shape :obj:`(image_batch_size, text_batch_size)`): + The scaled dot product scores between :obj:`image_embeds` and :obj:`text_embeds`. This represents the + image-text similarity scores. + logits_per_text:(:obj:`torch.FloatTensor` of shape :obj:`(text_batch_size, image_batch_size)`): + The scaled dot product scores between :obj:`text_embeds` and :obj:`image_embeds`. This represents the + text-image similarity scores. + text_embeds(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + :class:`~transformers.CLIPTextModel`. + image_embeds(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + :class:`~transformers.CLIPVisionModel`. + text_model_output(:obj:`BaseModelOutputWithPooling`): + The output of the :class:`~transformers.CLIPTextModel`. + vision_model_output(:obj:`BaseModelOutputWithPooling`): + The output of the :class:`~transformers.CLIPVisionModel`. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPooling = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class CLIPVisionEmbeddings(nn.Module): + def __init__(self, config: CLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1))) + + def forward(self, pixel_values): + batch_size = pixel_values.shape[0] + patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +class CLIPTextEmbeddings(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward(self, input_ids=None, position_ids=None, inputs_embeds=None): + seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings + + +class CLIPAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + assert ( + self.head_dim * self.num_heads == self.embed_dim + ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." + self.scale = self.head_dim ** -0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" + ) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = F.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +class CLIPMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class CLIPEncoderLayer(nn.Module): + def __init__(self, config: CLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = CLIPAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim) + self.mlp = CLIPMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)` + attention_mask (:obj:`torch.FloatTensor`): attention mask of size + :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size + :obj:`(config.encoder_attention_heads,)`. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class CLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = CLIPConfig + base_model_prefix = "clip" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, CLIPTextEmbeddings): + module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + elif isinstance(module, CLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** -0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, CLIPAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim ** -0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim ** -0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, CLIPMLP): + factor = self.config.initializer_factor + in_proj_std = ( + (module.config.hidden_size ** -0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + ) + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, CLIPModel): + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim ** -0.5 * self.config.initializer_factor, + ) + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim ** -0.5 * self.config.initializer_factor, + ) + + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +CLIP_START_DOCSTRING = r""" + This model is a PyTorch `torch.nn.Module `_ subclass. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config (:class:`~transformers.CLIPConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model + weights. +""" + +CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using :class:`~transformers.CLIPTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + `What are attention masks? <../glossary.html#attention-mask>`__ + position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + + `What are position IDs? <../glossary.html#position-ids>`_ + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + +CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + :class:`~transformers.CLIPFeatureExtractor`. See :meth:`transformers.CLIPFeatureExtractor.__call__` for + details. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + +CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using :class:`~transformers.CLIPTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + `What are attention masks? <../glossary.html#attention-mask>`__ + position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + + `What are position IDs? <../glossary.html#position-ids>`_ + pixel_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + :class:`~transformers.CLIPFeatureExtractor`. See :meth:`transformers.CLIPFeatureExtractor.__call__` for + details. + return_loss (:obj:`bool`, `optional`): + Whether or not to return the contrastive loss. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + + +class CLIPEncoder(nn.Module): + """ + Transformer encoder consisting of :obj:`config.num_hidden_layers` self attention layers. Each layer is a + :class:`~transformers.CLIPEncoderLayer`. + + Args: + config: CLIPConfig + embed_tokens (torch.nn.Embedding): output embedding + """ + + def __init__(self, config: CLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward( + self, + inputs_embeds, + attention_mask=None, + causal_attention_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded + representation. This is useful if you want more control over how to convert :obj:`input_ids` indices + into associated vectors than the model's internal embedding lookup matrix. + attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + `What are attention masks? <../glossary.html#attention-mask>`__ + causal_attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Causal mask for the text model. Mask values selected in ``[0, 1]``: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + `What are attention masks? <../glossary.html#attention-mask>`__ + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under + returned tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors + for more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + causal_attention_mask, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class CLIPTextTransformer(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = CLIPTextEmbeddings(config) + self.encoder = CLIPEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim) + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + bsz, seq_len = input_shape + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len).to(hidden_states.device) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # text_embeds.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, bsz, seq_len): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(bsz, seq_len, seq_len) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class CLIPTextModel(CLIPPreTrainedModel): + config_class = CLIPTextConfig + + def __init__(self, config: CLIPTextConfig): + super().__init__(config) + self.text_model = CLIPTextTransformer(config) + self.init_weights() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, value): + self.text_model.embeddings.token_embedding = value + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + """ + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class CLIPVisionTransformer(nn.Module): + def __init__(self, config: CLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = CLIPVisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim) + self.encoder = CLIPEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim) + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) + def forward( + self, + pixel_values=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class CLIPVisionModel(CLIPPreTrainedModel): + config_class = CLIPVisionConfig + + def __init__(self, config: CLIPVisionConfig): + super().__init__(config) + self.vision_model = CLIPVisionTransformer(config) + self.init_weights() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) + def forward( + self, + pixel_values=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + """ + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +@add_start_docstrings(CLIP_START_DOCSTRING) +class CLIPModel(CLIPPreTrainedModel): + config_class = CLIPConfig + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + if not isinstance(config.text_config, CLIPTextConfig): + raise ValueError( + f"config.text_config is expected to be of type CLIPTextConfig but is of type {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, CLIPVisionConfig): + raise ValueError( + f"config.vision_config is expected to be of type CLIPVisionConfig but is of type {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = CLIPTextTransformer(text_config) + self.vision_model = CLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.ones([])) + + self.init_weights() + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + text_features (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, output_dim`): The text embeddings + obtained by applying the projection layer to the pooled output of :class:`~transformers.CLIPTextModel`. + """ + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[1] + text_features = self.text_projection(pooled_output) + + return text_features + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + image_features (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, output_dim`): The image embeddings + obtained by applying the projection layer to the pooled output of :class:`~transformers.CLIPVisionModel`. + """ + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(pooled_output) + + return image_features + + @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig) + def forward( + self, + input_ids=None, + pixel_values=None, + attention_mask=None, + position_ids=None, + return_loss=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + """ + return_dict = return_dict if return_dict is not None else self.config.return_dict + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale + logits_per_image = logits_per_text.T + + loss = None + if return_loss: + loss = clip_loss(logits_per_text) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return CLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) diff --git a/src/transformers/models/clip/processing_clip.py b/src/transformers/models/clip/processing_clip.py new file mode 100644 index 00000000000000..e75199f2b2253c --- /dev/null +++ b/src/transformers/models/clip/processing_clip.py @@ -0,0 +1,171 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for CLIP +""" +from ...tokenization_utils_base import BatchEncoding +from .feature_extraction_clip import CLIPFeatureExtractor +from .tokenization_clip import CLIPTokenizer + + +class CLIPProcessor: + r""" + Constructs a CLIP processor which wraps a CLIP feature extractor and a CLIP tokenizer into a single processor. + + :class:`~transformers.CLIPProcessor` offers all the functionalities of :class:`~transformers.CLIPFeatureExtractor` + and :class:`~transformers.CLIPTokenizer`. See the :meth:`~transformers.CLIPProcessor.__call__` and + :meth:`~transformers.CLIPProcessor.decode` for more information. + + Args: + feature_extractor (:class:`~transformers.CLIPFeatureExtractor`): + The feature extractor is a required input. + tokenizer (:class:`~transformers.CLIPTokenizer`): + The tokenizer is a required input. + """ + + def __init__(self, feature_extractor, tokenizer): + if not isinstance(feature_extractor, CLIPFeatureExtractor): + raise ValueError( + f"`feature_extractor` has to be of type CLIPFeatureExtractor, but is {type(feature_extractor)}" + ) + if not isinstance(tokenizer, CLIPTokenizer): + raise ValueError(f"`tokenizer` has to be of type CLIPTokenizer, but is {type(tokenizer)}") + + self.feature_extractor = feature_extractor + self.tokenizer = tokenizer + self.current_processor = self.feature_extractor + + def save_pretrained(self, save_directory): + """ + Save a CLIP feature extractor object and CLIP tokenizer object to the directory ``save_directory``, so that it + can be re-loaded using the :func:`~transformers.CLIPProcessor.from_pretrained` class method. + + .. note:: + + This class method is simply calling :meth:`~transformers.PreTrainedFeatureExtractor.save_pretrained` and + :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizer.save_pretrained`. Please refer to the + docstrings of the methods above for more information. + + Args: + save_directory (:obj:`str` or :obj:`os.PathLike`): + Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will + be created if it does not exist). + """ + + self.feature_extractor.save_pretrained(save_directory) + self.tokenizer.save_pretrained(save_directory) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate a :class:`~transformers.CLIPProcessor` from a pretrained CLIP processor. + + .. note:: + + This class method is simply calling CLIPFeatureExtractor's + :meth:`~transformers.PreTrainedFeatureExtractor.from_pretrained` and CLIPTokenizer's + :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizer.from_pretrained`. Please refer to the + docstrings of the methods above for more information. + + Args: + pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): + This can be either: + + - a string, the `model id` of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like ``clip-vit-base-patch32``, or + namespaced under a user or organization name, like ``openai/clip-vit-base-patch32``. + - a path to a `directory` containing a feature extractor file saved using the + :meth:`~transformers.PreTrainedFeatureExtractor.save_pretrained` method, e.g., + ``./my_model_directory/``. + - a path or url to a saved feature extractor JSON `file`, e.g., + ``./my_model_directory/preprocessor_config.json``. + + **kwargs + Additional keyword arguments passed along to both :class:`~transformers.PreTrainedFeatureExtractor` and + :class:`~transformers.PreTrainedTokenizer` + """ + feature_extractor = CLIPFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) + tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) + + return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) + + def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the + :obj:`text` and :obj:`kwargs` arguments to CLIPTokenizer's :meth:`~transformers.CLIPTokenizer.__call__` if + :obj:`text` is not :obj:`None` to encode the text. To prepare the image(s), this method forwards the + :obj:`images` and :obj:`kwrags` arguments to CLIPFeatureExtractor's + :meth:`~transformers.CLIPFeatureExtractor.__call__` if :obj:`images` is not :obj:`None`. Please refer to the + doctsring of the above two methods for more information. + + Args: + text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + :obj:`is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`, :obj:`List[PIL.Image.Image]`, :obj:`List[np.ndarray]`, :obj:`List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`): + If set, will return tensors of a particular framework. Acceptable values are: + + * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. + * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. + * :obj:`'np'`: Return NumPy :obj:`np.ndarray` objects. + * :obj:`'jax'`: Return JAX :obj:`jnp.ndarray` objects. + + Returns: + :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when :obj:`text` is not :obj:`None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + :obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names` and if + :obj:`text` is not :obj:`None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when :obj:`images` is not :obj:`None`. + """ + + if text is None and images is None: + raise ValueError("You have to specify either text or images. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + + if images is not None: + image_features = self.feature_extractor(images, return_tensors=return_tensors, **kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizer's + :meth:`~transformers.PreTrainedTokenizer.batch_decode`. Please refer to the docstring of this method for more + information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizer's :meth:`~transformers.PreTrainedTokenizer.decode`. + Please refer to the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/clip/tokenization_clip.py b/src/transformers/models/clip/tokenization_clip.py new file mode 100644 index 00000000000000..39eed99e3ac832 --- /dev/null +++ b/src/transformers/models/clip/tokenization_clip.py @@ -0,0 +1,371 @@ +# coding=utf-8 +# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for CLIP.""" + +import json +import os +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re +from transformers.models.bert.tokenization_bert import BasicTokenizer + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", + }, + "merges_file": { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "openai/clip-vit-base-patch32": 77, +} + + +PRETRAINED_INIT_CONFIGURATION = { + "openai/clip-vit-base-patch32": {"do_lower_case": True}, +} + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2 ** 8): + if b not in bs: + bs.append(b) + cs.append(2 ** 8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +class CLIPTokenizer(PreTrainedTokenizer): + """ + Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + + You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + .. note:: + + When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first + one). + + This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. + Users should refer to this superclass for more information regarding those methods. + + Args: + vocab_file (:obj:`str`): + Path to the vocabulary file. + merges_file (:obj:`str`): + Path to the merges file. + errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode + `__ for more information. + unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The beginning of sequence token. + eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The end of sequence token. + add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (CLIP tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="<|endoftext|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", # hack to enable padding + add_prefix_space=False, + do_lower_case=True, + **kwargs + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + + super().__init__( + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + do_lower_case=do_lower_case, + **kwargs, + ) + + try: + import ftfy + + self.fix_text = ftfy.fix_text + except ImportError: + logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of ftfy.") + self.nlp = BasicTokenizer(do_lower_case=True) + self.fix_text = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1 : 49152 - 256 - 2 + 1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"} + self.add_prefix_space = add_prefix_space + + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + + # Very ugly hack to enable padding + @property + def pad_token_id(self) -> Optional[int]: + """ + :obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been + set. + """ + return 0 + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A CLIP sequence has the following format: + + - single sequence: ``<|startoftext|> X <|endoftext|>`` + + Pairs of sequences are not the expected use case, but they will be handled without a separator. + + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + + Returns: + :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.bos_token_id] + token_ids_0 + [self.eos_token_id] + return [self.bos_token_id] + token_ids_0 + token_ids_1 + [self.eos_token_id] + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` method. + + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + [1] + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + if self.fix_text is None: + text = " ".join(self.nlp.tokenize(text)) + else: + text = whitespace_clean(self.fix_text(text)).lower() + + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors).replace("", " ") + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, ensure_ascii=False)) + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + "Saving vocabulary to {}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!".format(merge_file) + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if is_split_into_words or add_prefix_space: + text = " " + text + return (text, kwargs) diff --git a/src/transformers/models/clip/tokenization_clip_fast.py b/src/transformers/models/clip/tokenization_clip_fast.py new file mode 100644 index 00000000000000..a04dfd2f1a6b27 --- /dev/null +++ b/src/transformers/models/clip/tokenization_clip_fast.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI GPT.""" + + +import json +from typing import Optional, Tuple + +from tokenizers import pre_tokenizers + +from ...tokenization_utils_base import BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_clip import CLIPTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", + }, + "merges_file": { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", + }, + "tokenizer_file": { + "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/tokenizer.json", + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "openai/clip-vit-base-patch32": 77, +} + + +class CLIPTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" CLIP tokenizer (backed by HuggingFace's `tokenizers` library). Based on byte-level + Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + :: + + >>> from transformers import CLIPTokenizerFast + >>> tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer("Hello world")['input_ids'] + [15496, 995] + >>> tokenizer(" Hello world")['input_ids'] + [18435, 995] + + You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + .. note:: + + When used with ``is_split_into_words=True``, this tokenizer needs to be instantiated with + ``add_prefix_space=True``. + + This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main + methods. Users should refer to this superclass for more information regarding those methods. + + Args: + vocab_file (:obj:`str`): + Path to the vocabulary file. + merges_file (:obj:`str`): + Path to the merges file. + errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode + `__ for more information. + unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The beginning of sequence token. + eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): + The end of sequence token. + add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (CLIP tokenizer detect beginning of words by the preceding space). + trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not the post-processing step should trim offsets to avoid including whitespaces. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = CLIPTokenizer + + def __init__( + self, + vocab_file, + merges_file, + tokenizer_file=None, + unk_token="<|endoftext|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", # hack to enable padding + add_prefix_space=False, + **kwargs + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + # Very ugly hack to enable padding + @property + def pad_token_id(self) -> Optional[int]: + """ + :obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been + set. + """ + return 0 + + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 158c7f7381d774..2a223a67fa4078 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -888,6 +888,45 @@ def from_pretrained(self, *args, **kwargs): requires_backends(self, ["torch"]) +CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class CLIPModel: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPPreTrainedModel: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPTextModel: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class CLIPVisionModel: + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tokenizers_objects.py b/src/transformers/utils/dummy_tokenizers_objects.py index 04584349bb1318..92873c641ba1c6 100644 --- a/src/transformers/utils/dummy_tokenizers_objects.py +++ b/src/transformers/utils/dummy_tokenizers_objects.py @@ -56,6 +56,15 @@ def from_pretrained(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) +class CLIPTokenizerFast: + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + class ConvBertTokenizerFast: def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index c4f55df8e8b5a3..1798c9f73c8933 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -7,6 +7,16 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class CLIPFeatureExtractor: + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + +class CLIPProcessor: + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class DeiTFeatureExtractor: def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) diff --git a/tests/test_feature_extraction_clip.py b/tests/test_feature_extraction_clip.py new file mode 100644 index 00000000000000..eac10af6f43a9c --- /dev/null +++ b/tests/test_feature_extraction_clip.py @@ -0,0 +1,229 @@ +# coding=utf-8 +# Copyright 2021 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import numpy as np + +from transformers.file_utils import is_torch_available, is_vision_available +from transformers.testing_utils import require_torch, require_vision + +from .test_feature_extraction_common import FeatureExtractionSavingTestMixin + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import CLIPFeatureExtractor + + +class CLIPFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + image_size=18, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=20, + do_center_crop=True, + crop_size=18, + do_normalize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_feat_extract_dict(self): + return { + "do_resize": self.do_resize, + "size": self.size, + "do_center_crop": self.do_center_crop, + "crop_size": self.crop_size, + "do_normalize": self.do_normalize, + "image_mean": self.image_mean, + "image_std": self.image_std, + } + + def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): + """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, + or a list of PyTorch tensors if one specifies torchify=True. + """ + + assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" + + if equal_resolution: + image_inputs = [] + for i in range(self.batch_size): + image_inputs.append( + np.random.randint( + 255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uint8 + ) + ) + else: + image_inputs = [] + for i in range(self.batch_size): + width, height = np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2) + image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uint8)) + + if not numpify and not torchify: + # PIL expects the channel dimension as last dimension + image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] + + if torchify: + image_inputs = [torch.from_numpy(x) for x in image_inputs] + + return image_inputs + + +@require_torch +@require_vision +class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + + feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None + + def setUp(self): + self.feature_extract_tester = CLIPFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + self.assertTrue(hasattr(feature_extractor, "do_center_crop")) + self.assertTrue(hasattr(feature_extractor, "center_crop")) + self.assertTrue(hasattr(feature_extractor, "do_normalize")) + self.assertTrue(hasattr(feature_extractor, "image_mean")) + self.assertTrue(hasattr(feature_extractor, "image_std")) + + def test_batch_feature(self): + pass + + def test_call_pil(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PIL images + image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + for image in image_inputs: + self.assertIsInstance(image, Image.Image) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) + + def test_call_numpy(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random numpy tensors + image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True) + for image in image_inputs: + self.assertIsInstance(image, np.ndarray) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) + + def test_call_pytorch(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PyTorch tensors + image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.crop_size, + self.feature_extract_tester.crop_size, + ), + ) diff --git a/tests/test_modeling_clip.py b/tests/test_modeling_clip.py new file mode 100644 index 00000000000000..c5ab9416d152e0 --- /dev/null +++ b/tests/test_modeling_clip.py @@ -0,0 +1,561 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch CLIP model. """ + + +import inspect +import os +import tempfile +import unittest + +import requests +from transformers.file_utils import is_torch_available, is_vision_available +from transformers.testing_utils import require_torch, require_vision, slow, torch_device + +from .test_configuration_common import ConfigTester +from .test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask + + +if is_torch_available(): + import torch + + from transformers import CLIPConfig, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPVisionConfig, CLIPVisionModel + from transformers.models.clip.modeling_clip import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import CLIPProcessor + + +class CLIPVisionModelTester: + def __init__( + self, + parent, + batch_size=12, + image_size=30, + patch_size=2, + num_channels=3, + is_training=True, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + config = CLIPVisionConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + ) + + return config, pixel_values + + def create_and_check_model(self, config, pixel_values): + model = CLIPVisionModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + image_size = (self.image_size, self.image_size) + patch_size = (self.patch_size, self.patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (CLIPVisionModel,) if is_torch_available() else () + + test_pruning = False + test_torchscript = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = CLIPVisionModelTester(self) + self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_inputs_embeds(self): + # CLIP does not use inputs_embeds + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, torch.nn.Linear)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) + image_size = (self.model_tester.image_size, self.model_tester.image_size) + patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + seq_len = num_patches + 1 + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + out_len = len(outputs) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + added_hidden_states = 1 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, seq_len, seq_len], + ) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + # CLIP has a different seq_length + image_size = (self.model_tester.image_size, self.model_tester.image_size) + patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + seq_length = num_patches + 1 + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + def test_training(self): + pass + + def test_training_gradient_checkpointing(self): + pass + + # skip this test as CLIPVisionModel has no base class and is + # not available in MODEL_MAPPING + def test_save_load_fast_init_from_base(self): + pass + + # skip this test as CLIPVisionModel has no base class and is + # not available in MODEL_MAPPING + def test_save_load_fast_init_to_base(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = CLIPVisionModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +class CLIPTextModelTester: + def __init__( + self, + parent, + batch_size=12, + seq_length=7, + is_training=True, + use_input_mask=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + max_position_embeddings=512, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + config = CLIPTextConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + max_position_embeddings=self.max_position_embeddings, + initializer_range=self.initializer_range, + ) + + return config, input_ids, input_mask + + def create_and_check_model(self, config, input_ids, input_mask): + model = CLIPTextModel(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, input_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase): + + all_model_classes = (CLIPTextModel,) if is_torch_available() else () + test_pruning = False + test_head_masking = False + + def setUp(self): + self.model_tester = CLIPTextModelTester(self) + self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_training(self): + pass + + def test_training_gradient_checkpointing(self): + pass + + def test_inputs_embeds(self): + # CLIP does not use inputs_embeds + pass + + # skip this test as CLIPTextModel has no base class and is + # not available in MODEL_MAPPING + def test_save_load_fast_init_from_base(self): + pass + + # skip this test as CLIPTextModel has no base class and is + # not available in MODEL_MAPPING + def test_save_load_fast_init_to_base(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = CLIPTextModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +class CLIPModelTester: + def __init__(self, parent, is_training=True): + self.parent = parent + self.text_model_tester = CLIPTextModelTester(parent) + self.vision_model_tester = CLIPVisionModelTester(parent) + self.is_training = is_training + + def prepare_config_and_inputs(self): + text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() + vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() + + config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64) + + return config, input_ids, attention_mask, pixel_values + + def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): + model = CLIPModel(config).to(torch_device).eval() + result = model(input_ids, pixel_values, attention_mask) + self.parent.assertEqual( + result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) + ) + self.parent.assertEqual( + result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask, pixel_values = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "pixel_values": pixel_values, + "return_loss": True, + } + return config, inputs_dict + + +@require_torch +class CLIPModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (CLIPModel,) if is_torch_available() else () + test_head_masking = False + test_pruning = False + test_resize_embeddings = False + test_attention_outputs = False + + def setUp(self): + self.model_tester = CLIPModelTester(self) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + # hidden_states are tested in individual model tests + def test_hidden_states_output(self): + pass + + # input_embeds are tested in individual model tests + def test_inputs_embeds(self): + pass + + # tested in individual model tests + def test_retain_grad_hidden_states_attentions(self): + pass + + # CLIPModel does not have input/output embeddings + def test_model_common_attributes(self): + pass + + def _create_and_check_torchscript(self, config, inputs_dict): + if not self.test_torchscript: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.torchscript = True + configs_no_init.return_dict = False + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + + try: + input_ids = inputs_dict["input_ids"] + pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values + traced_model = torch.jit.trace(model, (input_ids, pixel_values)) + except RuntimeError: + self.fail("Couldn't trace module.") + + with tempfile.TemporaryDirectory() as tmp_dir_name: + pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") + + try: + torch.jit.save(traced_model, pt_file_name) + except Exception: + self.fail("Couldn't save module.") + + try: + loaded_model = torch.jit.load(pt_file_name) + except Exception: + self.fail("Couldn't load module.") + + model.to(torch_device) + model.eval() + + loaded_model.to(torch_device) + loaded_model.eval() + + model_state_dict = model.state_dict() + loaded_model_state_dict = loaded_model.state_dict() + + self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) + + models_equal = True + for layer_name, p1 in model_state_dict.items(): + p2 = loaded_model_state_dict[layer_name] + if p1.data.ne(p2.data).sum() > 0: + models_equal = False + + self.assertTrue(models_equal) + + @slow + def test_model_from_pretrained(self): + for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = CLIPModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@require_vision +class CLIPModelIntegrationTest(unittest.TestCase): + @slow + def test_inference(self): + model_name = "openai/clip-vit-base-patch32" + model = CLIPModel.from_pretrained(model_name).to(torch_device) + processor = CLIPProcessor.from_pretrained(model_name) + + image = prepare_img() + inputs = processor( + text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" + ).to(torch_device) + + # forward pass + outputs = model(**inputs) + + # verify the logits + self.assertEqual( + outputs.logits_per_image.shape, + torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), + ) + self.assertEqual( + outputs.logits_per_text.shape, + torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), + ) + + expected_logits = torch.Tensor([[24.5056, 18.8076]]).to(torch_device) + + self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) diff --git a/tests/test_processor_clip.py b/tests/test_processor_clip.py new file mode 100644 index 00000000000000..e8d7a73e537b67 --- /dev/null +++ b/tests/test_processor_clip.py @@ -0,0 +1,177 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import shutil +import tempfile +import unittest + +import numpy as np +import pytest + +from transformers import CLIPTokenizer +from transformers.file_utils import FEATURE_EXTRACTOR_NAME, is_vision_available +from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES +from transformers.testing_utils import require_vision + + +if is_vision_available(): + from PIL import Image + + from transformers import CLIPFeatureExtractor, CLIPProcessor + + +@require_vision +class CLIPProcessorTest(unittest.TestCase): + def setUp(self): + self.tmpdirname = tempfile.mkdtemp() + + # fmt: off + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "low", "er", "lowest", "newer", "wider", "", "<|endoftext|>"] + # fmt: on + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "l o", "lo w", "e r", ""] + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + + feature_extractor_map = { + "do_resize": True, + "size": 20, + "do_center_crop": True, + "crop_size": 18, + "do_normalize": True, + "image_mean": [0.48145466, 0.4578275, 0.40821073], + "image_std": [0.26862954, 0.26130258, 0.27577711], + } + self.feature_extractor_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) + with open(self.feature_extractor_file, "w", encoding="utf-8") as fp: + json.dump(feature_extractor_map, fp) + + def get_tokenizer(self, **kwargs): + return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_feature_extractor(self, **kwargs): + return CLIPFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) + + def tearDown(self): + shutil.rmtree(self.tmpdirname) + + def prepare_image_inputs(self): + """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, + or a list of PyTorch tensors if one specifies torchify=True. + """ + + image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] + + image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] + + return image_inputs + + def test_save_load_pretrained_default(self): + tokenizer = self.get_tokenizer() + feature_extractor = self.get_feature_extractor() + + processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + processor.save_pretrained(self.tmpdirname) + processor = CLIPProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertIsInstance(processor.tokenizer, CLIPTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) + self.assertIsInstance(processor.feature_extractor, CLIPFeatureExtractor) + + def test_save_load_pretrained_additional_features(self): + processor = CLIPProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + processor.save_pretrained(self.tmpdirname) + + tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) + + processor = CLIPProcessor.from_pretrained( + self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, CLIPTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, CLIPFeatureExtractor) + + def test_feature_extractor(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + image_input = self.prepare_image_inputs() + + input_feat_extract = feature_extractor(image_input, return_tensors="np") + input_processor = processor(images=image_input, return_tensors="np") + + for key in input_feat_extract.keys(): + self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) + + def test_tokenizer(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + input_str = "lower newer" + + encoded_processor = processor(text=input_str) + + encoded_tok = tokenizer(input_str) + + for key in encoded_tok.keys(): + self.assertListEqual(encoded_tok[key], encoded_processor[key]) + + def test_processor(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + input_str = "lower newer" + image_input = self.prepare_image_inputs() + + inputs = processor(text=input_str, images=image_input) + + self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) + + # test if it raises when no input is passed + with pytest.raises(ValueError): + processor() + + def test_tokenizer_decode(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] + + decoded_processor = processor.batch_decode(predicted_ids) + decoded_tok = tokenizer.batch_decode(predicted_ids) + + self.assertListEqual(decoded_tok, decoded_processor) diff --git a/tests/test_tokenization_clip.py b/tests/test_tokenization_clip.py new file mode 100644 index 00000000000000..f7911d0f257275 --- /dev/null +++ b/tests/test_tokenization_clip.py @@ -0,0 +1,207 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +import unittest + +from transformers import CLIPTokenizer, CLIPTokenizerFast +from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES +from transformers.testing_utils import require_tokenizers + +from .test_tokenization_common import TokenizerTesterMixin + + +@require_tokenizers +class CLIPTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + + tokenizer_class = CLIPTokenizer + rust_tokenizer_class = CLIPTokenizerFast + from_pretrained_kwargs = {"add_prefix_space": True} + test_seq2seq = False + + def setUp(self): + super().setUp() + + # fmt: off + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "low", "er", "lowest", "newer", "wider", "", "<|endoftext|>"] + # fmt: on + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "l o", "lo w", "e r", ""] + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + + def get_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_rust_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self, tokenizer): + input_text = "lower newer" + output_text = "lower newer " + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) + text = "lower newer" + bpe_tokens = ["lo", "w", "er", "n", "e", "w", "er"] + tokens = tokenizer.tokenize(text, add_prefix_space=True) + self.assertListEqual(tokens, bpe_tokens) + + input_tokens = tokens + [tokenizer.unk_token] + input_bpe_tokens = [10, 2, 12, 9, 3, 2, 12, 16] + self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + + def test_rust_and_python_full_tokenizers(self): + if not self.test_rust_tokenizer: + return + + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) + + sequence = "lower newer" + + # Testing tokenization + tokens = tokenizer.tokenize(sequence, add_prefix_space=True) + rust_tokens = rust_tokenizer.tokenize(sequence) + self.assertListEqual(tokens, rust_tokens) + + # Testing conversion to ids without special tokens + ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) + rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) + self.assertListEqual(ids, rust_ids) + + # Testing conversion to ids with special tokens + rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) + ids = tokenizer.encode(sequence, add_prefix_space=True) + rust_ids = rust_tokenizer.encode(sequence) + self.assertListEqual(ids, rust_ids) + + # Testing the unknown token + input_tokens = tokens + [rust_tokenizer.unk_token] + input_bpe_tokens = [10, 2, 12, 9, 3, 2, 12, 16] + self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + + def test_pretokenized_inputs(self, *args, **kwargs): + # It's very difficult to mix/test pretokenization with byte-level + # And get both CLIP and Roberta to work at the same time (mostly an issue of adding a space before the string) + pass + + def test_padding(self, max_length=15): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + # Simple input + s = "This is a simple input" + s2 = ["This is a simple input 1", "This is a simple input 2"] + p = ("This is a simple input", "This is a pair") + p2 = [ + ("This is a simple input 1", "This is a simple input 2"), + ("This is a simple pair 1", "This is a simple pair 2"), + ] + + # Simple input tests + self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") + + # Simple input + self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") + + # Simple input + self.assertRaises( + ValueError, + tokenizer_r.batch_encode_plus, + s2, + max_length=max_length, + padding="max_length", + ) + + # Pair input + self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") + + # Pair input + self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") + + # Pair input + self.assertRaises( + ValueError, + tokenizer_r.batch_encode_plus, + p2, + max_length=max_length, + padding="max_length", + ) + + def test_add_tokens_tokenizer(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + vocab_size = tokenizer.vocab_size + all_size = len(tokenizer) + + self.assertNotEqual(vocab_size, 0) + + # We usually have added tokens from the start in tests because our vocab fixtures are + # smaller than the original vocabs - let's not assert this + # self.assertEqual(vocab_size, all_size) + + new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] + added_toks = tokenizer.add_tokens(new_toks) + vocab_size_2 = tokenizer.vocab_size + all_size_2 = len(tokenizer) + + self.assertNotEqual(vocab_size_2, 0) + self.assertEqual(vocab_size, vocab_size_2) + self.assertEqual(added_toks, len(new_toks)) + self.assertEqual(all_size_2, all_size + len(new_toks)) + + tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) + + self.assertGreaterEqual(len(tokens), 4) + self.assertGreater(tokens[0], tokenizer.vocab_size - 1) + self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) + + new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} + added_toks_2 = tokenizer.add_special_tokens(new_toks_2) + vocab_size_3 = tokenizer.vocab_size + all_size_3 = len(tokenizer) + + self.assertNotEqual(vocab_size_3, 0) + self.assertEqual(vocab_size, vocab_size_3) + self.assertEqual(added_toks_2, len(new_toks_2)) + self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) + + tokens = tokenizer.encode( + ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False + ) + + self.assertGreaterEqual(len(tokens), 6) + self.assertGreater(tokens[0], tokenizer.vocab_size - 1) + self.assertGreater(tokens[0], tokens[1]) + self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) + self.assertGreater(tokens[-2], tokens[-3]) + self.assertEqual(tokens[0], tokenizer.eos_token_id) + # padding is very hacky in CLIPTokenizer, pad_token_id is always 0 + # so skip this check + # self.assertEqual(tokens[-2], tokenizer.pad_token_id) diff --git a/utils/check_repo.py b/utils/check_repo.py index 0077fcc7e6be82..63d9db1194ded5 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -91,6 +91,8 @@ # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = [ # models to ignore for model xxx mapping + "CLIPTextModel", + "CLIPVisionModel", "DPRReader", "DPRSpanPredictor", "FlaubertForQuestionAnswering", From f063c56d942737d2c7aac93895cd8310afd9c7a4 Mon Sep 17 00:00:00 2001 From: Suraj Patil Date: Wed, 12 May 2021 15:28:30 +0530 Subject: [PATCH 15/41] Fix clip docs (#11694) * fix doc url * fix example --- README.md | 2 +- docs/source/index.rst | 2 +- src/transformers/models/clip/configuration_clip.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 87b3b07fdbde9f..ffdf0db9e8a9ec 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[BlenderbotSmall](https://huggingface.co/transformers/model_doc/blenderbot_small.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. 1. **[BORT](https://huggingface.co/transformers/model_doc/bort.html)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry. 1. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. -1. **[CLIP](https://huggingface.co/transformers/model_doc/camembert.html)** from (OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. +1. **[CLIP](https://huggingface.co/transformers/model_doc/clip.html)** from (OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[ConvBERT](https://huggingface.co/transformers/model_doc/convbert.html)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[CPM](https://huggingface.co/transformers/model_doc/cpm.html)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. 1. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. diff --git a/docs/source/index.rst b/docs/source/index.rst index 1fac89a4821bac..ad6f8360d89e96 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -114,7 +114,7 @@ conversion utilities for the following models: 11. :doc:`CamemBERT ` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty French Language Model `__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. -12. :doc:`CLIP ` from (OpenAI) released with the paper `Learning Transferable Visual Models From +12. :doc:`CLIP ` from (OpenAI) released with the paper `Learning Transferable Visual Models From Natural Language Supervision `__ by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index 849b5d906c99d3..ba19563a19293b 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -71,7 +71,7 @@ class CLIPTextConfig(PretrainedConfig): gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. - Example:: + Example:: >>> from transformers import CLIPTextModel, CLIPTextConfig From 6797cdc077782a8d9b94b57620b8b6a832791e80 Mon Sep 17 00:00:00 2001 From: Marc van Zee Date: Wed, 12 May 2021 14:52:52 +0200 Subject: [PATCH 16/41] Updates README and fixes bug (#11701) --- examples/flax/text-classification/README.md | 32 ++++++++++++------- .../flax/text-classification/run_flax_glue.py | 4 +-- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/examples/flax/text-classification/README.md b/examples/flax/text-classification/README.md index cdb0c905c7957a..28267351013934 100644 --- a/examples/flax/text-classification/README.md +++ b/examples/flax/text-classification/README.md @@ -83,14 +83,24 @@ We also ran each task once on a single V100 GPU, 8 V100 GPUs, and 8 Cloud v3 TPU overall training time below. For comparison we ran Pytorch's [run_glue.py](https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py) on a single GPU (last column). -| Task | 8 TPU | 8 GPU | 1 GPU | 1 GPU (Pytorch) | -|-------|---------|---------|------------|-----------------| -| CoLA | 1m 46s | 1m 26s | 3m 6s | 4m 6s | -| SST-2 | 5m 30s | 6m 28s | 22m 6s | 34m 37s | -| MRPC | 1m 32s | 1m 14s | 2m 17s | 2m 56s | -| STS-B | 1m 33s | 1m 12s | 2m 11s | 2m 48s | -| QQP | 24m 40s | 31m 48s | 1h 20m 15s | 2h 54m | -| MNLI | 26m 30s | 33m 55s | 2h 7m 30s | 3u 7m 6s | -| QNLI | 8m | 9m 40s | 34m 20s | 49m 8s | -| RTE | 1m 21s | 55s | 1m 8s | 1m 16s | -| WNLI | 1m 12s | 48s | 38s | 36s | +| Task | TPU v3-8 | 8 GPU | 1 GPU | 1 GPU (Pytorch) | +|-------|-----------|------------|------------|-----------------| +| CoLA | 1m 46s | 1m 26s | 3m 6s | 4m 6s | +| SST-2 | 5m 30s | 6m 28s | 22m 6s | 34m 37s | +| MRPC | 1m 32s | 1m 14s | 2m 17s | 2m 56s | +| STS-B | 1m 33s | 1m 12s | 2m 11s | 2m 48s | +| QQP | 24m 40s | 31m 48s | 1h 20m 15s | 2h 54m | +| MNLI | 26m 30s | 33m 55s | 2h 7m 30s | 3h 7m 6s | +| QNLI | 8m | 9m 40s | 34m 20s | 49m 8s | +| RTE | 1m 21s | 55s | 1m 8s | 1m 16s | +| WNLI | 1m 12s | 48s | 38s | 36s | +|-------| +| **TOTAL** | 1h 13m | 1h 28m | 4h 34m | 6h 37m | +| **COST*** | $9.60 | $29.10 | $11.33 | $16.41 | + + +*All experiments are ran on Google Cloud Platform. Prices are on-demand prices +(not preemptible), obtained from the following tables: +[TPU pricing table](https://cloud.google.com/tpu/pricing), +[GPU pricing table](https://cloud.google.com/compute/gpus-pricing). GPU +experiments are ran without further optimizations besides JAX transformations. \ No newline at end of file diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 217b7bdc382463..f405dd9fc767eb 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -473,8 +473,8 @@ def eval_step(state, batch): dropout_rngs = shard_prng_key(dropout_rng) state, metrics = p_train_step(state, batch, dropout_rngs) train_metrics.append(metrics) - train_time += time.time() - train_start - logger.info(f" Done! Training metrics: {unreplicate(metrics)}") + train_time += time.time() - train_start + logger.info(f" Done! Training metrics: {unreplicate(metrics)}") logger.info(" Evaluating...") rng, input_rng = jax.random.split(rng) From 77f4c46b501322e9bffb5416dfbf0397deefd7d8 Mon Sep 17 00:00:00 2001 From: Philip May Date: Wed, 12 May 2021 15:11:10 +0200 Subject: [PATCH 17/41] remove defaults to None if optional (#11703) --- examples/research_projects/wav2vec2/run_asr.py | 4 ++-- src/transformers/debug_utils.py | 2 +- src/transformers/modeling_tf_utils.py | 2 +- src/transformers/modeling_utils.py | 2 +- src/transformers/models/albert/tokenization_albert_fast.py | 4 ++-- .../models/big_bird/tokenization_big_bird_fast.py | 6 +++--- src/transformers/models/ibert/quant_modules.py | 6 +++--- src/transformers/models/mpnet/modeling_mpnet.py | 2 +- src/transformers/models/mpnet/tokenization_mpnet.py | 2 +- .../models/xlm_prophetnet/tokenization_xlm_prophetnet.py | 2 +- src/transformers/pipelines/text2text_generation.py | 4 ++-- 11 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/research_projects/wav2vec2/run_asr.py b/examples/research_projects/wav2vec2/run_asr.py index 5e62cb504eb127..410d5c2d3a6229 100755 --- a/examples/research_projects/wav2vec2/run_asr.py +++ b/examples/research_projects/wav2vec2/run_asr.py @@ -144,7 +144,7 @@ class Orthography: Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to accept lowercase input and lowercase the output when decoding. - vocab_file (:obj:`str`, `optional`, defaults to :obj:`None`): + vocab_file (:obj:`str`, `optional`): File containing the vocabulary. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`): The token used for delimiting words; it needs to be in the vocabulary. @@ -152,7 +152,7 @@ class Orthography: Table to use with `str.translate()` when preprocessing text (e.g., "-" -> " "). words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`): Words to remove when preprocessing text (e.g., "sil"). - untransliterator (:obj:`Callable[[str], str]`, `optional`, defaults to :obj:`None`): + untransliterator (:obj:`Callable[[str], str]`, `optional`): Function that untransliterates text back into native writing system. """ diff --git a/src/transformers/debug_utils.py b/src/transformers/debug_utils.py index 45384a80134ba1..537f897b49f845 100644 --- a/src/transformers/debug_utils.py +++ b/src/transformers/debug_utils.py @@ -118,7 +118,7 @@ class DebugUnderflowOverflow: How many frames back to record trace_batch_nums(:obj:`List[int]`, `optional`, defaults to ``[]``): Which batch numbers to trace (turns detection off) - abort_after_batch_num (:obj:`int`, `optional`, defaults to :obj:`None`): + abort_after_batch_num (:obj:`int`, `optional`): Whether to abort after a certain batch number has finished """ diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 4bf12af5573cf1..16af519e2345ea 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1128,7 +1128,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. - mirror(:obj:`str`, `optional`, defaults to :obj:`None`): + mirror(:obj:`str`, `optional`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 4247f4c2a6dbd6..ca8ae2267109d7 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -975,7 +975,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. - mirror(:obj:`str`, `optional`, defaults to :obj:`None`): + mirror(:obj:`str`, `optional`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. diff --git a/src/transformers/models/albert/tokenization_albert_fast.py b/src/transformers/models/albert/tokenization_albert_fast.py index cb817ddcc01fdb..9aa18317042dab 100644 --- a/src/transformers/models/albert/tokenization_albert_fast.py +++ b/src/transformers/models/albert/tokenization_albert_fast.py @@ -172,7 +172,7 @@ def build_inputs_with_special_tokens( Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: @@ -201,7 +201,7 @@ def create_token_type_ids_from_sequences( Args: token_ids_0 (:obj:`List[int]`): List of ids. - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: diff --git a/src/transformers/models/big_bird/tokenization_big_bird_fast.py b/src/transformers/models/big_bird/tokenization_big_bird_fast.py index cbe2b741331659..e5b1e5bab0e285 100644 --- a/src/transformers/models/big_bird/tokenization_big_bird_fast.py +++ b/src/transformers/models/big_bird/tokenization_big_bird_fast.py @@ -152,7 +152,7 @@ def build_inputs_with_special_tokens( Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: @@ -174,7 +174,7 @@ def get_special_tokens_mask( Args: token_ids_0 (:obj:`List[int]`): List of ids. - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model @@ -212,7 +212,7 @@ def create_token_type_ids_from_sequences( Args: token_ids_0 (:obj:`List[int]`): List of ids. - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: diff --git a/src/transformers/models/ibert/quant_modules.py b/src/transformers/models/ibert/quant_modules.py index 065a3fef6144de..d1da18686abd37 100644 --- a/src/transformers/models/ibert/quant_modules.py +++ b/src/transformers/models/ibert/quant_modules.py @@ -124,7 +124,7 @@ class QuantAct(nn.Module): Momentum for updating the activation quantization range. per_channel (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to or not use channel-wise quantization. - channel_len (:obj:`int`, `optional`, defaults to :obj:`None`): + channel_len (:obj:`int`, `optional`): Specify the channel length when set the `per_channel` True. quant_mode (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the layer is quantized. @@ -755,9 +755,9 @@ class FixedPointMul(Function): Quantization bitwidth. z_scaling_factor (:obj:`torch.Tensor`): Scaling factor of the output tensor. - identity (:obj:`torch.Tensor`, `optional`, defaults to :obj:`None`): + identity (:obj:`torch.Tensor`, `optional`): Identity tensor, if exists. - identity_scaling_factor (:obj:`torch.Tensor`, `optional`, defaults to :obj:`None`): + identity_scaling_factor (:obj:`torch.Tensor`, `optional`): Scaling factor of the identity tensor `identity`, if exists. Returns: diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index f1327a87197620..90ba92242bc623 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -444,7 +444,7 @@ def forward(self, hidden_states): details. `What are input IDs? <../glossary.html#input-ids>`__ - attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): + attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, diff --git a/src/transformers/models/mpnet/tokenization_mpnet.py b/src/transformers/models/mpnet/tokenization_mpnet.py index 98af763ade64ae..7bbefb4946430d 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet.py +++ b/src/transformers/models/mpnet/tokenization_mpnet.py @@ -235,7 +235,7 @@ def build_inputs_with_special_tokens( Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: diff --git a/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py index 9c2d90914a6d8f..b2707f8dcb2a7f 100644 --- a/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py @@ -290,7 +290,7 @@ def build_inputs_with_special_tokens( Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added - token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): + token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: diff --git a/src/transformers/pipelines/text2text_generation.py b/src/transformers/pipelines/text2text_generation.py index 96aaf3d19fb84a..346f178bbc9201 100644 --- a/src/transformers/pipelines/text2text_generation.py +++ b/src/transformers/pipelines/text2text_generation.py @@ -295,10 +295,10 @@ def __call__( Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to clean up the potential extra spaces in the text output. - src_lang (:obj:`str`, `optional`, defaults to :obj:`None`): + src_lang (:obj:`str`, `optional`): The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models - tgt_lang (:obj:`str`, `optional`, defaults to :obj:`None`): + tgt_lang (:obj:`str`, `optional`): The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs: From 5c1cda9d3cfc68fb1e17a865c2b4e7cf2e668c3d Mon Sep 17 00:00:00 2001 From: Suraj Patil Date: Wed, 12 May 2021 19:18:52 +0530 Subject: [PATCH 18/41] fix example in config doc (#11696) --- .../models/clip/configuration_clip.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index ba19563a19293b..261956e6b15249 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -164,7 +164,7 @@ class CLIPVisionConfig(PretrainedConfig): gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. - Example:: + Example:: >>> from transformers import CLIPVisionModel, CLIPVisionConfig @@ -224,15 +224,14 @@ class CLIPConfig(PretrainedConfig): outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: - projection_dim: (:obj:`int`, `optional`, defaults to 512): + text_config_dict (:obj:`dict`, `optional`): + Dictionary of configuration options used to initialize :class:`~transformers.CLIPTextConfig`. + vision_config_dict (:obj:`dict`, `optional`): + Dictionary of configuration options used to initialize :class:`~transformers.CLIPVisionConfig`. + projection_dim (:obj:`int`, `optional`, defaults to 512): Dimentionality of text and vision projection layers. kwargs (`optional`): - Dictionary of keyword arguments. Notably: - - - **text_config** (:class:`~transformers.CLIPTextConfig`, `optional`) -- An instance of a configuration - object that defines the text model config. - - **vision_config** (:class:`~transformers.CLIPVisionConfig`, `optional`) -- An instance of a - configuration object that defines the vision model config. + Dictionary of keyword arguments. """ model_type = "clip" From fd6204b2a70d100800cb259a7fbddfc812631ed3 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 12 May 2021 15:52:54 +0100 Subject: [PATCH 19/41] [Lazy init] Force fall back to slow init for composite models (#11705) * fix encoder-decoder & RAG * finalize * Update src/transformers/models/encoder_decoder/modeling_encoder_decoder.py Co-authored-by: Lysandre Debut * Update src/transformers/models/rag/modeling_rag.py Co-authored-by: Lysandre Debut Co-authored-by: Patrick von Platen Co-authored-by: Lysandre Debut --- src/transformers/modeling_utils.py | 24 ++++++++++++------- .../modeling_encoder_decoder.py | 7 ++++++ src/transformers/models/rag/modeling_rag.py | 7 ++++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index ca8ae2267109d7..9ab8824067c54e 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -510,6 +510,12 @@ def get_output_embeddings(self) -> nn.Module: """ return None # Overwrite for models with output embeddings + def _init_weights(self, module): + """ + Initialize the weights. This method should be overridden by derived class. + """ + raise NotImplementedError(f"Make sure `_init_weigths` is implemented for {self.__class__}") + def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. @@ -1205,7 +1211,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ) model, missing_keys, unexpected_keys, error_msgs = cls._load_state_dict_into_model( - model, state_dict, pretrained_model_name_or_path + model, state_dict, pretrained_model_name_or_path, _fast_init=_fast_init ) # make sure token embedding weights are still tied if needed @@ -1225,7 +1231,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P return model @classmethod - def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path): + def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path, _fast_init=True): # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] @@ -1273,12 +1279,14 @@ def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] - # tie unintialized modules - unintialized_modules = model.retrieve_modules_from_names( - missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix - ) - for module in unintialized_modules: - model._init_weights(module) + if _fast_init: + # retrieve unintialized modules and initialize + unintialized_modules = model.retrieve_modules_from_names( + missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix + ) + for module in unintialized_modules: + model._init_weights(module) + # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index 3696cf9167b18d..b3bb1eb6036597 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -221,6 +221,13 @@ def get_output_embeddings(self): def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) + @classmethod + def from_pretrained(cls, *args, **kwargs): + # At the moment fast initialization is not supported + # for composite models + kwargs["_fast_init"] = False + return super().from_pretrained(*args, **kwargs) + @classmethod def from_encoder_decoder_pretrained( cls, diff --git a/src/transformers/models/rag/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py index 42c2e16d6ca795..8bbc754d14e825 100644 --- a/src/transformers/models/rag/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -232,6 +232,13 @@ class RagPreTrainedModel(PreTrainedModel): base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] + @classmethod + def from_pretrained(cls, *args, **kwargs): + # At the moment fast initialization is not supported + # for composite models + kwargs["_fast_init"] = False + return super().from_pretrained(*args, **kwargs) + @classmethod def from_pretrained_question_encoder_generator( cls, From 64e78564a519cda2b4408803e2781c604e1e3bdd Mon Sep 17 00:00:00 2001 From: Lysandre Date: Wed, 12 May 2021 17:03:03 +0200 Subject: [PATCH 20/41] Release: v4.6.0 --- examples/pytorch/language-modeling/run_clm.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- examples/pytorch/question-answering/run_qa_beam_search.py | 2 +- .../question-answering/run_qa_beam_search_no_trainer.py | 3 +-- examples/pytorch/question-answering/run_qa_no_trainer.py | 3 +-- examples/pytorch/summarization/run_summarization.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- setup.py | 2 +- src/transformers/__init__.py | 2 +- 15 files changed, 15 insertions(+), 17 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 2ce18d2a81c952..a5c6a17f8aca6b 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index b5c7ad92c5da23..d9214bed404997 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 458b2c1d43c626..f2751fc4c905f2 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -44,7 +44,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 9999cb25d124ff..35890d0a746b98 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 54b1d6919f4e33..1a537836400e3e 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 320785230e393a..de57cc017a0913 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index e1e97bece31f07..569e487f0384f4 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -50,8 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.5.0.dev0") - +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index de020adb0228e8..fc4ef11b8e14e9 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -52,8 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.5.0.dev0") - +check_min_version("4.6.0") logger = logging.getLogger(__name__) # You should update this to your particular problem to have better documentation of `model_type` diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 4ceec8944692b7..948b8322167eee 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 79120e2ba12312..d4f4e148c43161 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") task_to_keys = { "cola": ("sentence", None), diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 21c071a812051b..c7b068a24268cd 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 70936c8544ac54..38fba61020281e 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index c525f6289dca60..f89e33bda24c0d 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0.dev0") +check_min_version("4.6.0") logger = logging.getLogger(__name__) diff --git a/setup.py b/setup.py index 0942a76f6c95cc..1cb946cde30da8 100644 --- a/setup.py +++ b/setup.py @@ -320,7 +320,7 @@ def run(self): setup( name="transformers", - version="4.6.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="4.6.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Suraj Patil, Stas Bekman, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="thomas@huggingface.co", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index f89c3c43283801..ccc3e44b67f84a 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -22,7 +22,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.6.0.dev0" +__version__ = "4.6.0" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. From d77eb0cf922105d85337409863090ebdb9981873 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Wed, 12 May 2021 17:08:35 +0200 Subject: [PATCH 21/41] Docs for v4.7.0.dev0 --- .circleci/deploy.sh | 3 ++- docs/source/_static/js/custom.js | 5 +++-- examples/pytorch/language-modeling/run_clm.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- examples/pytorch/question-answering/run_qa_beam_search.py | 2 +- .../question-answering/run_qa_beam_search_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa_no_trainer.py | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- setup.py | 2 +- src/transformers/__init__.py | 2 +- 17 files changed, 20 insertions(+), 18 deletions(-) diff --git a/.circleci/deploy.sh b/.circleci/deploy.sh index 11716e9df0ff76..f5542fb1332c3d 100755 --- a/.circleci/deploy.sh +++ b/.circleci/deploy.sh @@ -62,4 +62,5 @@ deploy_doc "c988db5" v4.4.0 deploy_doc "c5d6a28" v4.4.1 deploy_doc "6bc89ed" v4.4.2 deploy_doc "4906a29" v4.5.0 -deploy_doc "4bae96e" # v4.5.1 Latest stable release \ No newline at end of file +deploy_doc "4bae96e" v4.5.1 +deploy_doc "64e7856" # v4.6.0 Latest stable release \ No newline at end of file diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index 3b975a81f775a8..21e97714a8e8d0 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -1,10 +1,11 @@ // These two things need to be updated at each release for the version selector. // Last stable version -const stableVersion = "v4.5.1" +const stableVersion = "v4.6.0" // Dictionary doc folder to label. The last stable version should have an empty key. const versionMapping = { "master": "master", - "": "v4.5.0/v4.5.1 (stable)", + "": "v4.6.0 (stable)", + "v4.5.1": "v4.5.0/v4.5.1", "v4.4.2": "v4.4.0/v4.4.1/v4.4.2", "v4.3.3": "v4.3.0/v4.3.1/v4.3.2/v4.3.3", "v4.2.2": "v4.2.0/v4.2.1/v4.2.2", diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index a5c6a17f8aca6b..9d6e40c58a08bf 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index d9214bed404997..9085e6fe0c8b23 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index f2751fc4c905f2..38f57768edfb1c 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -44,7 +44,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 35890d0a746b98..3c9bfce866d074 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 1a537836400e3e..57b0cb04e94955 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index de57cc017a0913..e097b5bea74db5 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index 569e487f0384f4..c4e6fab49bfb18 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index fc4ef11b8e14e9..d0bb7457854865 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -52,7 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) # You should update this to your particular problem to have better documentation of `model_type` diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 948b8322167eee..690dede77c840b 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -46,7 +46,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index d4f4e148c43161..453a488eaf40c0 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") task_to_keys = { "cola": ("sentence", None), diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index c7b068a24268cd..6327c8f8d81a1b 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 38fba61020281e..81690186bc462b 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index f89e33bda24c0d..84181ab1130d68 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") +check_min_version("4.7.0.dev0") logger = logging.getLogger(__name__) diff --git a/setup.py b/setup.py index 1cb946cde30da8..498107ac0c2d55 100644 --- a/setup.py +++ b/setup.py @@ -320,7 +320,7 @@ def run(self): setup( name="transformers", - version="4.6.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="4.7.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Suraj Patil, Stas Bekman, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="thomas@huggingface.co", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index ccc3e44b67f84a..c034b29ca99959 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -22,7 +22,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.6.0" +__version__ = "4.7.0.dev0" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. From fa84540e98a6af309c3007f64def5011db775a70 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 12 May 2021 17:46:02 +0200 Subject: [PATCH 22/41] Vit deit fixes (#11309) * Improve docs of DeiT and ViT, add community notebook * Add gitignore for test_samples * Add notebook with Trainer Co-authored-by: Lysandre Debut --- docs/source/community.md | 2 ++ .../models/deit/feature_extraction_deit.py | 9 ++++++--- src/transformers/models/deit/modeling_deit.py | 5 ++--- .../models/vit/feature_extraction_vit.py | 9 ++++++--- src/transformers/models/vit/modeling_vit.py | 5 ++--- tests/fixtures/tests_samples/.gitignore | 14 +++++++------- tests/fixtures/tests_samples/COCO/cats.png | Bin 0 -> 694498 bytes 7 files changed, 25 insertions(+), 19 deletions(-) create mode 100644 tests/fixtures/tests_samples/COCO/cats.png diff --git a/docs/source/community.md b/docs/source/community.md index 4c4af370a50102..8f979a601a9b9d 100644 --- a/docs/source/community.md +++ b/docs/source/community.md @@ -52,6 +52,8 @@ This page regroups resources around 🤗 Transformers developed by the community |[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | How to fine-tune BART for summarization in two languages with Trainer class | [Eliza Szczechla](https://github.com/elsanns) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)| |[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | How to evaluate BigBird on long document question answering on Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)| | [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | How to create YouTube captions from any video by transcribing the audio with Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | +| [Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and PyTorch Lightning | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | +| [Fine-tune the Vision Transformer on CIFAR-10 using the 🤗 Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | How to fine-tune the Vision Transformer (ViT) on CIFAR-10 using HuggingFace Transformers, Datasets and the 🤗 Trainer | [Niels Rogge](https://github.com/nielsrogge) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | | [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | | [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | | [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | diff --git a/src/transformers/models/deit/feature_extraction_deit.py b/src/transformers/models/deit/feature_extraction_deit.py index aae149c40b3ee9..591630fff77701 100644 --- a/src/transformers/models/deit/feature_extraction_deit.py +++ b/src/transformers/models/deit/feature_extraction_deit.py @@ -38,8 +38,10 @@ class DeiTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): Args: do_resize (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to resize the input to a certain :obj:`size`. - size (:obj:`int`, `optional`, defaults to 256): - Resize the input to the given size. Only has an effect if :obj:`do_resize` is set to :obj:`True`. + size (:obj:`int` or :obj:`Tuple(int)`, `optional`, defaults to 256): + Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an + integer is provided, then the input will be resized to (size, size). Only has an effect if :obj:`do_resize` + is set to :obj:`True`. resample (:obj:`int`, `optional`, defaults to :obj:`PIL.Image.BICUBIC`): An optional resampling filter. This can be one of :obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BOX`, :obj:`PIL.Image.BILINEAR`, :obj:`PIL.Image.HAMMING`, :obj:`PIL.Image.BICUBIC` or :obj:`PIL.Image.LANCZOS`. @@ -115,7 +117,8 @@ def __call__( Returns: :class:`~transformers.BatchFeature`: A :class:`~transformers.BatchFeature` with the following fields: - - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, + width). """ # Input type checking for clearer error valid_images = False diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 602d5e26005b9f..f620e6b78845b2 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -417,9 +417,8 @@ def _init_weights(self, module): DEIT_INPUTS_DOCSTRING = r""" Args: pixel_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using - :class:`~transformers.DeiTFeatureExtractor`. See :meth:`transformers.DeiTFeatureExtractor.__call__` for - details. + Pixel values. Pixel values can be obtained using :class:`~transformers.DeiTFeatureExtractor`. See + :meth:`transformers.DeiTFeatureExtractor.__call__` for details. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: diff --git a/src/transformers/models/vit/feature_extraction_vit.py b/src/transformers/models/vit/feature_extraction_vit.py index 50e5d3ba3da1a8..a5177a15b4b032 100644 --- a/src/transformers/models/vit/feature_extraction_vit.py +++ b/src/transformers/models/vit/feature_extraction_vit.py @@ -38,8 +38,10 @@ class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): Args: do_resize (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to resize the input to a certain :obj:`size`. - size (:obj:`int`, `optional`, defaults to 224): - Resize the input to the given size. Only has an effect if :obj:`do_resize` is set to :obj:`True`. + size (:obj:`int` or :obj:`Tuple(int)`, `optional`, defaults to 224): + Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an + integer is provided, then the input will be resized to (size, size). Only has an effect if :obj:`do_resize` + is set to :obj:`True`. resample (:obj:`int`, `optional`, defaults to :obj:`PIL.Image.BILINEAR`): An optional resampling filter. This can be one of :obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BOX`, :obj:`PIL.Image.BILINEAR`, :obj:`PIL.Image.HAMMING`, :obj:`PIL.Image.BICUBIC` or :obj:`PIL.Image.LANCZOS`. @@ -105,7 +107,8 @@ def __call__( Returns: :class:`~transformers.BatchFeature`: A :class:`~transformers.BatchFeature` with the following fields: - - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, + width). """ # Input type checking for clearer error valid_images = False diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index 3584813db62a38..0972a7b7bf3e84 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -403,9 +403,8 @@ def _init_weights(self, module): VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using - :class:`~transformers.ViTFeatureExtractor`. See :meth:`transformers.ViTFeatureExtractor.__call__` for - details. + Pixel values. Pixel values can be obtained using :class:`~transformers.ViTFeatureExtractor`. See + :meth:`transformers.ViTFeatureExtractor.__call__` for details. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: diff --git a/tests/fixtures/tests_samples/.gitignore b/tests/fixtures/tests_samples/.gitignore index 46ad771d4530a6..f5030eb61e7c0b 100644 --- a/tests/fixtures/tests_samples/.gitignore +++ b/tests/fixtures/tests_samples/.gitignore @@ -1,7 +1,7 @@ -*.* -cache* -temp* -!*.txt -!*.tsv -!*.json -!.gitignore \ No newline at end of file +*.* +cache* +temp* +!*.txt +!*.tsv +!*.json +!.gitignore \ No newline at end of file diff --git a/tests/fixtures/tests_samples/COCO/cats.png b/tests/fixtures/tests_samples/COCO/cats.png new file mode 100644 index 0000000000000000000000000000000000000000..a3b5225fc3cef5c492cc109aebe883f24941a156 GIT binary patch literal 694498 zcmV)NK)1h%P)~Mr5;{Q*+_g-twY>e?`J@1Xefm>+O+X`>;;D7r0KU6S~n34kt_V&n(>;9JV&!_2o zQUei%9T9tD@wWWw@o)eYLMpX(%pBs?s-wjZxBNPGhwAa^!A1YA_^DOy4@&|Hv0~F! z876g!EOtpyCI>mSx#uQ?NJam_B&DsdkN}OAR6tUNjF~4Ztg?K$o59>_U zHH02$EwN`dN^iN|owm!eD20Nmf!N-~oJvXA?}K#&xWuh<7bd%`*TZzG*~OBr!$dz0 z^`j~_%kIwIbDX^@qc(k36?$CE|mu;9b#^X}Iix8Aw2dyHvW*4N|l^R`v8)jE)^7$>)zOoxXI z&wkIJp8oE3fAe8GT`t=_C>aiYcfJ4AU-VvsT90D!o~|E0p8WNl8-Oxf&IkEr<`>~j zwL!HWu#o~#yrge~`V_bO`80An-1p~hO82>(?Nchd=~Yfxv+AA6@iwntr^h4Jz9c~z z86U~U_w8TyN37dO%C|g@N-o|kV$;axE3~Pesbvjwatu>-?-oI!& zcV>e2BqB;vu5VRVmJc<%3U41y-@UzE`|(7Iz4zK6QJe4Y+VFI^%-h!rqUj?u4t-nK z&|1kUIAciI*>T%8JMg-_o%BvZh<-FP1dq4vrzDyJDA9KaH3B@-wjr7y}A{bAsu3 ziqWUK3xra4qy9FQyB%QLZ$t!@HE5%I;jf4oOEtF3339+%=yH5~+5adaL+?lcfacp` zP2Y80MsMk1S^sGCP>Zs*^Q)aM#mCnifY=Eu-K*={^V4*anD06EWAFWDI~PlB0;F7w zxw+tg_kAzSwN}+2rJ|;O&2cJPRkdSoTpiR@;+)grG+ZUx6k)*LxLn$!g!p3g3L_rW@)CP5mannIW7$xW2cjNng?)qW( z@uM-e?`FTT*0~m=Ss|qmvp~;US3ss}IJF9;cC2dwQk0>D8mlx#Y}lYc@hw>-5F0W9 zdPh4**`qeinnQ~4e~W*phQDk2gE{_x+Wy-x7X$!OLIoOVVUUMRPY9=O{E*CVWuFy% zjFpW06NiVUb%h>Puap95r5Z%X8VeJFF$SQO(nLg&1%PdTwug^<7R=F^sw{ags`cg& zQ;4e4s73|0#jI892c0+6);18*>b|@yP0s22?VW6|ttq9NhAzvgA4Y@4ucETFW+tw?jzFs5|tEUl|YgGA7EHpW~FksecGt@@PrQt~*NWqWRtjdQ@` zyoAX{)Fw4(MI5;j%QzlFPH*XUw1aJIpnIR!;e3pB^O(@`cu5fvWAMx4=k;yWnu~c&?r5JV6e9HAEP)Kd5yq9uPqrhNYNx6ctT?66h?ciLNFEOpt_^6=lAq!}1X0WG{GN{*#Uk3HJ0ydUt z)DbGUUiLT_xcBl;v;5ddvw)pjMv^7i1;4Ihl1QdurptkNZBSR30Bc z+~%uMI&9%Eo&6S!9gUh=O(KX$q5VmyLHe~ei#Baue;UVwAsgr{nVds&(F+pI@81#w~+4E z8U;3cr}bT-x+Zgke8=7JgWC5nwGZlt*Y`Ku+mwRR*jlQPwPqp!iYXDyfGlnLp+~`|gf6dGlzll`i1TcU& zBZrDPW={$cvG0y?Z`St9`r~-Q)=b!`A!t4s>HzP>N7jW1q&ksvsf4!r^ln87w%7UF z>2i3zzCL|;*j9f$9==7vtG9bhN9Xsv~BDV5vopB_$e z&&*b^ftJm0qZ_DVQ-=`O3Z>ND_WJexU0aiL?%2NMT`BcshIQRir;the(rimYlR0oG zY~fpYH^y{Lz3ev-GRiWm(rS?khz*-Ay;3T-+CWNcDaWxlh}cpvmVENg*?r%g8W5FM z-MVMP3R`6+q2g-{ShpNc${1rxDU1?Q$+fXI4N#TMbgq;}sA7yt8HEf03ficw#4I9a z>{H1#ri1I3S}RJy+CXZ}tpTG_NL$^t)`*yMtSJLPQIHuvB>SyXe~|dk=WqZ0Zu&Ki znbm(b{}})Z0thLf0u8ungb!7HRO%D=w-^?UR-3M3%W38cjUB0hv_^nhxWTB@plr>J znE@LK5RoD+f~~?f7V}m|HGOS!=`yAG=pNTy6fr=;=6gkh=}9=}4ALB0rERXw^xge_ zKOT<&EhgVu7DWIksdT7};>NO0 zFR`8o&>17O2vo#YTND&BZd>EvjsS35-j0{??e^;W-nEXMp7*Wqhp_m;4Iwok99^7? zN2T(bC+Y}5Mt+Oe9v>_C0$Ym9)A5{kQ0lmDO{vZH!?NzIw6Qwph)}fR*l3R5YHOqW z?5Iy=iFt46lA)o=0l75@gfW*MVQBEI!i9z?l95|$TZ?$ zq?TnlU#8|=$%qPxqjPRus_WSFsm_mG-+z1iaX7VH)^0d$YjWE4tubEK?M0bPFp$;2 zNq{j-sR<}X$*t{90V6DcF+~{q(WYsx5qry$biNbpWR7#{dO5NSxvaL&KHNJu`E(^xTK}pR%H`QQ)xsoD zSZ^(>IX;7o){M?MY&I;3DYp8$hQq_Bw63$l1EGS#h=}^VawpX}ZA;Et*>BH3e)#-Q zGUeJ(Qp!6v)7b|Ejhy!IqBv+iHn0Fv+Ffes%n`(9>4d)$@Zb(1y_TebbiqG?at42+ zxwdk}IvTX=_Hvva-u5z`42ZJryw6KF>|n|k!IqPqa7kaQtd>fNUXYbnnFKym`= zVp$HyZg~MWiLZZ-Q5}zmE&N3C(30uhQ1YG4G5Vd|3sUFm_kz&q%i(amzAx7FDMCoq zm>z+p*=~^CUvp~CIe}7Q>bjxmuQiPz3DTkO4?aF4jj6s8JXteSr4WLh643zpnA6SK zLx?l0CXl2lg%=Sf z}r?-a>x;lRWYqil-1vr zGH;6vU8nV$a%6rGpe%Q33DpwfU7G2-&UQK{in|tCob9!Ceve9%NYO^#-=6L0q?uZ^ zG-I@DHEV{PBdD5E>iS{dgVwz5^I_@`p~zW8TFu_?U1yy!&}3N_>!#`8mO0cG`fl7q zt~iqxRR+k?yADVK#ME-hB9#&?%p5{!pkk_*hfmv@OK$9f)EZ-CYY>qDYprDUGJCQO znm=1!{1TKc$zAyIJ%3?lV@zu;##o_}!iG&nw$3#WHag{8YJye;s#Bv6%Q*J;`|Wf( zZCg;LPqB*Iq(X~Tu?6D2Nl~f*oJzLVw$=b3<_bhix`HH`3DF2E;g*55sa2HG%~grH zBc+r|r3ivXs=0uuT5Bah25H!8Rmeu^nIid`a`6U@(uXRsg*S|cWL`D=AL=tj`IUA5 z1O4Iup5OmfT#>Of0U-rgK!DmH@R7|&$>hf-vB8W7{@+sU8>jPnV0od zM2w!K4(G!UKi&VHxRNBR1%+BxC}XQEZu5KTyK~MNpjF8}d26jj5!KeX^@CmR^YL)G z-!{-1WEhwXfbQM%wt;3yxsx_i9L8zeywy%(o}7y%6sgAL+j~5nzR!`@ed`8b_A%dv zb_9*u3<~;^VoEcTR_1cwXKg8EKORo+x0}@_#&8(jCZYK}*=a>-t(lnhVDEmx|K#&O zIriU0ABmEbs`=zo?8jC;PUA(ShO{^{y}hJqnsU7{%4z(om?4z6r;mr%_uIO?R=PfQ z{~@|Rlt`Gr2iifs)}|3JXYJQmQFU7DRy;~DT8fkwXq=8I)m(*HD^$$<{`zJ{G|XB% zDPu`SD_Z@}wu{O<=Jq1Zq!gy%vtK*G1pssX3^Dw*R-`4=jUqtjMDD%l*yurENpAs~sGav22m>&I824hM9P8{=^nbAy$`M zerj1kZyIbRW@m;{f-x?a5bM>+MfY*tUx>!hevVsXR8E`wJ`bbu@m2F9*C$jMEv*Zdzm!{57 z=WYAFQX@dGc!=IllZyGOOpV)zHN7jtDYo-4#JCu9D7g#!ayUUPDyL?;kcv*p*#Rut z3iG6~)~d?=+b5fN!_wRu)8WuI!&TBleLI%r+1svk^iDadb`oU!Nkx6PW#D&vfO ze)%?>`dYJ~APN)Ino$}G0HFwEfBf`)|NL^#@j-RBM&;CMJ?OBKz7c#3?aqjDAL=7j%6X~#@R2Y!A8F%0;@F$0YE9G zl!A&IVb1x{*$T3IuZ+&EYOPvHB4X^h#GnnyK>sZwhNK|}x~ z0Y%!dnqwwGA(ItCD=MW5IhmFaez)KLi?;kTGhWz@X#e;ACj)ASTB{%sAyp|1@PP2i z^pE6fgWson&-#EIxg9HgXZjgcjIoqL3XL%e$<(GAKq;rw2?cUUnw26#A!qD$-v{nQ zWG%a%RP>=`Aa*17*0o&TQ{1eXES;tH1f9ljf4M&@y)4Vq)6>51%#1aaR)JJ&jhRc! zz$ge4ol|)yanQ)P)Ea>dA-z96d<^mae!Clf)LN%@?RyuzR3Ro?lQ?4mTP~Y+QftbI zx~_}S17RPY>;9fOhZK*~spX1_H@^WQn+d^D zF^*ISH)AX6oWn;uNJ#IcF07T`+hP3dH$YgCT@_txWL#M%0S?IwW@^C?*4(m(X}E9e z(OR3cAbf-HfyX8Y*rc}2w0=!W)5mqYF)9t0Mv`T3o3wC`d7Wn#!ZB7i`QK}s~}a_&Ns%egYy znAcXJmf92#N)O|acmi2U*=zNsc1%Z-$*M=t>-~Op=474T)`)HD$LW5*6Odz@tMo>1 z-jiUxEL>dv460|wQ8hqZju>ElPpIqnPuKq<}Aod_%E736g-K{-4T~*$t>DDvo z0cz>c03U09r#95MyF*`FVa_Ga6Mm=IAx(_Rm!bueuthhRUtH(k8vHbxe>C4JxmP{< zKmGKl??3&@uU%(#makbhmWk<+l*(aa$O1k z{tSv=xBuqx@HZhRrKQ%Q6*VEPOw4uPd#zFF7}JV~YcMTw@y3jcL`i9Ysu6~gGh(db zpHAO>efc+!51-g@k%(Bfb$NVxOg{KU4#TgT#M*93L*6r>@g?<#KT{*WH^gSmK`EWv zf0eTt2NxFKcaJ`XkgC^qmjvdrOp_)S9J(nl3y2kuZ3K9obP+r+1+o(l0YdrbynrVPIJk1+h^+SFW+uDyO zpVr_H4`<5phS-LwThrAScHIwL3>IKxja|tuCB9?+1o;>G{KvfPu|Vghm>Z(Yp;$ct zHqAq$fVdU)3N2N+;Htlr~yB#w1Le-z65dLY9m(uh*+?dKjh#6w5{hUa09_AChG6 zw@!m*|Lu)G^)a_$>R^XuIR165EG?Au=s??ZD59BR1Xr@X@d4trjmvEh)s;tq*wtxB0(A|gpDM)8ubmM^ZaA>5+x0kd{D#e)xP^lIuK zNc))Y4AZ_sqsWDo*+6Zgpn=3vOPfk#ZdECDz3#_{Ln*oIth9pCm@L<9jM;;CR_9cW zcA(Y1D6OANPqo}~usLKmxXw(i6l_o>33yh{C_r#6+luo%muWhM9ZAg4Q3$JXn!%K6 zr6Fq2OvJWU)`m8S$)sJClG8;x$Jz?`91|+#%pN>5Gc%!hzfWxOKmGJSZaGo!4 z7Gh4Rb*_&=SV<}KIF7Aqqyn_A>&kc>?Nf~Le4gHJ*Wu8Wx~pbe^$c;)50pDpZ#gZK z`@7fjCnOD|G!jd#r8HEm9M%>>GOjbW6G0-{*179C1TLkeST$K9HdbPJv98_M+;X@DB&`%F zjk})_(e;M`6ekg;jW^Jw@?nzCOM6kUjK*yF&cnEQaRha5;*_LLsYIA-vZ|AiuPK|S z{?gQQNK2KIj}`R_Bsot9*YErK^zh4Ny{G!zDm(oVDRZMO|FLtw%wf>oZC}NxljuKp zddRRMCBG-5&Q1@lrj!@J4@jY5C{6rE$HV3Q?d#>SYYnim($}2Ax%(&A>$~lJZ3E{^ zt~W+-hv4%IslOup=-ZF3vgsR;Uf*$aT`tcJGwWjr;W!*xsq?x#eg3qqGnO(Ky~moX zNxTl@E=gMWAQkMiZ11ff_;{SYynHEj`|$Dewx?~sf4KbS_1T}M4?ety@?|*a7?q|6 z#|z`3mN7<;>JE@Pcc_tLdsVt)#;`wCX?A>n{dT)tjs&mMGKg7Xynj#&yo&G znN~T5YRyne$tj)B=XHC5mWW{-$8EhDU8OPObQhn(Jm_C57ELyK7J5_W5ar+Y>QgOy zky6`HJ1O~9q9yq23Rl}FYE+9-+>ol`VPDMYGH&}Vi`NXsxVM+L%kdJjjFSl)IPDs~ zuG`(XA=i7--^b{&Ny&H|pXRr_HQeeB{e54(GY*0K?yxLt-yLF3p*4VY7z73Vsu(9@ zTu9y?U$$-Q__L(Xgs#x59m=-tB$r%X4dJ2x5OZ4mGr1n|sM}ug=PE1HM{Ji|_9Cy! z7UHRtMnt{sLkLQ#ZQB4qtB;BxriD}x2*9c&69Os1Zs-9(q~7QEzEg^tVyNXPh`AOZ ztTk)J1fm&Jlv1~8e9(r2PeQ^jEbGR)!-7(%QM-kOJCjOAP#KnOTZm{FrW}g1-K#CnGy~hA{N8ntWVzcl2G>$dp zuIpML1u|$YIU%(qxnQXXwi;0sYbmWFVyq=W!``ybT3aG&RR91Ho79xjlhrrKz9g4~ zh*pOl3dQpOi0S9%|2t+li9XihKZCD@giEUg+yDUzAtO8peP(mERx~q{>@+qb1G?k<^P|M926LK;}aV;I|psZoNZ#QQY zG(4RS%QEji)%Nvt_-GJmW_SChUtgtptS}AFlhDLQdZP$7>U;XX-(+7NBzQ26@)BX9X zldf*iHqCSD6f+(gttpt^;{W>TpLOa{S_4|7Hl;2l*9!M(`Y6?S@B5J%)qMB;z&?F( z_H*CqZP^VuR_N1~6OQ9Y9DdCH{BUyb*WVBJ^8V_r1J{{-%Nj>&eMg<;wufs(KU_wg z-R1)+W@T!v#+VfS;V|v~oyg?S`u;DnhnASBE3_)^D%_npUT<%%FF6sZUAGH}CTfDA z7AELx3ra5l{pxojP(}l^(n>=prD!CAfmL0?k6Lxs{IX`3^5X0_DX7pGdzfb-Hf%Q5 z_m++W4wKVckfhOn=vMx+p5aXD2LvDFm|}l*9tkNU9ey? zre6=kcgxi^*mHgFdJ9m?LaN{I_i{S@B9$dax*VU1JZG<)un^U@3xuKTvbS6(F|Pu7 zIM{D5FMU6zDAo*C8J~Bo#?l3GG5y`I*6EL1dO zl)>lnO|vR{UqG22Q}LLPD%oL-tu%?pPNn4*^Je-F4M$^fTmGa~g4PZ0-m+1f^$Z%F_e7$|ia{n;?G6ussxIs}pzP*37IzpSMzYg{{i+^E8 z0*qnTJSy|@w#!M*@W8Mv3Lj_kF5On_z!&FStvTiWd>WSRRp~k%y60DQ9zR8&6*rXX z^kG{{XND{P*Tw78aq7*7FF!v|2S?Zm?0&yp4&M>lnB=wuR8>))I+xV9(l*_GQPTnW zi*Xst3B<;byHS&h^YVH;9^zi(){Gu%|E?IzJ{s;>`w&uVuF1ZsZ#8v*M*twsVV^s5 zlw?ky^tS9$>e!F3&oBKrvPE41xjVSYBd&WxR~tKMMX@Yt>FQDWxQ12>`TK%eErK zCV|LE?VLMreor-A4#$+zx^3flsJSpv@}aX1I017^MG$I{zOx{e1bm(~oW>D0|5{2@ zYODqIc)#v}z=&8h-ICYTqO{gxR_0?$`{Cg3^Q-GfN&x^7x##?3hSuOY?89NQp@C5D zb@r)ffR;~&m#`$GYNQLFVhbXb6(M5Ixwa-mwSlp@9EgUYpt16ijB~h#8%~6 zKJqyG4Ykrr)h#eHc4kSTwoIsm5Rnk!FIBAEGF1di0IlSdYS|e)a{%~1`RiX5`tP1T zsJ;9j+w~O)r1n*IuDSvNMiiysdobUV8I}Ig$6bjN>QL2M30m)L-u!VGRRi0Qk&p=x zTdl2B>KLkEMN7sht%0ajLM+-&-aie)OAARU2{@UOm)9Mrlszmdb7D zznkY|r~SJ44-b!2N?dP_Ys0*@D%R;}hGE!4FxL7M6(KXGln8U@F42>7CYM{vcVk@V z4(sgfa4V$qva=6B{?GPX)q^S-ag(;?@WYAQ95qFVDX^$<_% z{-P{2A=0$_YXh|U@WkfpO+eenM;GT?N5_Eh=D&To6djF{Yn4-RBPii;!t`D&R!`%g zV_ZO}VFjg`4${Au_51Z6l=(UwDz)SD^Yi5i?zhXs{n-?-Z7r?v@?}VNQQ<`gl}-^Ll?1HjkI@=l849wHwvE`%>{?I)D4| z=kI>;!*#nGOKF}vT9wjG*>zjOvHJy7h4f*KKWWtyg_?s@czD$N9R*B;Y69}8 zfmcZIb-|-KYn^MYtpO1XefReM<#ZnQWsXKMo~6{&G0*cisJ2Et9lKD@$O)y?y5$lN zs$*IH}UKajqaR++h!mUAAufrOyR z40mo9s|`q$LaDu%wpsglou8HNKsh1V!d0Yny{$zcpSOMaczLv%=lRA8g`kvDBUme1 zBMOzIkvah=HO^)2T{m=gTi@IeZu1v~51KwnzV`h?h4^y&t`T5H6)2pOAmHB3;OVZ|17FbJ=+=zV zS`o#@qmE^>4YhO`wa?J^{TBaNTN{RnsNdG6sq3tZey6^+l5-IMGZs-onbp_mjO-0RQop$STcfIw+U&}r!gT5VxhghXjTm5Z1 z7z(G@!kq5I)B{mW;tPW^j~Y@~YTde?_I+2nCmw2J6|`|(p>^|Lq3Yg!0$n})wA67x zf;~Kpm-l&7-LS{K>s<^(9J2$4h zDeHUnF?xNU!{u`kk>tU^*%;a4??8+K3k?Y?G9Y4WjVinTSY&U)ET+-Qb-quYWCLLvse;yAfX@@Fhk3TbXEe*zs22}}S z4yJbkwUiXr%^9nXp2{c5k(F0+ELoD=yPixrFZ1>3@w;VxZ;(k7xmFt_Psiay@QY9H zX0XxI)T%;9k2WldH+ zxS_=5+VvJdy!T2ID-}XA#)Pmxe*Ca(szwp{`E=6vt493Z+P{aQsC{hsTF|#L6-vxU zFd^=Uu=j=uj?4OXJUM97`s$CFXn(2uj3 zRo&WtFrD7vCe)R(_pGt(KD6WU^Ss_c`)NAg-_&3}l={aibLT#G{rTnn-&Uxuce=Z$ z_bydWY!8PYLdv(>>+x_XiJdD zN(S?;7P%yKoA2*NqajXv>APbog_-yAweL<$#&4Tq z=j;^XopliYkc+ogm6~di6vdjU7HR}T*T=9!iwse*)R4MvxcgvxvxJpsY(>8jAlt;JxAZlsc!Y-#0_gV9{Nl_0`>tmY)NKCC&TeB_eG)FXCe95-JaMbH>pV(VIi6ZjycRu}UNbMt!q!x%PAO>RmZ>LFN+oWk z6i$^{H>-S8zIoN^z3gYP2OR!)_Wk{~{0-nQPoLjn_!s_VQwWW?RUr~VfeMSs9(y zdvANA2frm{BB`L2i{AD|x7M`Q_vOxv+Og6YqlcWvaf~6SQiuL<-*#56NYeAs{Op}A!pM~RR};?Ww&LWKH7Yr9Z-_oI^II;l`$DtITC_Y zZ!NNF`{o5~-<@vdodt|2tI{_X^9bhoMg) z5|A+lP$L-;F%;w0lZ>w8TyMneQyqtC_jkddmHkUbyT@l^2*eF0mT)Gmq)qHzbFfN# z!{$0)kB5sw)k+me0x_4QH4o~8FE6eiat$e8AzeoGy|mXrVqMT`ii?p}h;>X0>i4DH zPI$uZaSvvwZqJ?uIvRd?U0J7Ys==$yd>`UBR$Gaq(`MN>Hf(#htanZ5tZk)Q@)@Q* zUdV~+o;eEc6Qyh)C4?~9U%y`epk3>RBXD?mE>BNLqL!Q2rnf`CZnGuB)vCkma!;4@ zr$7Js&%gTh_ph(dQ1Uj}nXDJX+YvQlQ>^7UGN@x$NLaMea~LT&rE_pjq{l12@Sg!1t8 zc)#BSsz=W-`^egW{P|L?P2^x{;%izn;z~yRmUTWU$p7$TD znw#{XJ|=i&>8VhK-%kg%Yz?p@6~oS>`A*vw{7qTEu8ROO*(UwI6=Q*=_3l@slo{u@ z_ZNl-YbeXlTK*D4V&9F3SZkA}hyIZ3bBR`KT}or>t3>TmPR)ngU_V+uqd+rU06|kqt2JBokB1RM(0y6vmQD_4#%@xfE+m zwsXgQpOF)n!RyFHI&Gq=MszRTTbK{yuVTzTJ|E5@WpD7D!rl!v6_)CVZY4QITB~IE z6H&OArDi=&Prv`uZ}nKWvYPIY3dcMfW0X=U#hQ!JI{9FXNurqwLoT^Z<0aLyr5mVd z+NWG&Y$3Nq+^o_BtcYR)A}Xag=ScCoE?VnSN>6Fkq!Mc~Hv|m%!fwTEEXUnPMoLD?450stbMqOc}JV89So*INL{v2~_b#I+V|&f1fh z{~W&DmoSq1D`mjhpSI=48k%AjEVT#%2m!Uep+@n(7LW`wT5l)Y=4{%VGEMWp&toa$(~4vTL(Hd z46$@()Xa>u`MI$i$EWo^8)HKBq{((_OPi4N)VHdTYA$nUj#7?*Ih3E33;<9{>ue7I z2+bIib7UU2Z95zX@3-0_YX*i~6XTJQN=&U5Yju(6Q_z}0i!rX{Qfh214a1O|1YcDh zSqGo)&Q7yqH*5@*h=8e;8i7!;i9*@OPMfJaY%$fA z6&J-7{}#a8`8N0P~ZmNW^Dpb?>7(-67 zXce7b!58jcq%)y?~FzkB>g?{7=Y@^Ct*=#iSz zjX2-#FUq2p(b_Y#@jmb8i$Z=)YdhM1bPv7gf1`*PK3G#4-(rlc&Ae|9-RN6MQaO|) z*+YKl#@1|2L#l7Wvy`L4QJLM^TD;ND#*%#st}{7!5SgaoKA%YaR?^z@XTz6*LTpraEhW*vp^tY9AhKi4FNNXY}!ni8?uX^Nq7vIifM27yrAi{h&1FG#?KpRG-@aTfrzYj~?d|dL!?G-+ zaYa41W?7EbZR>nLpMQB>W`TL?epB+*?}fM?A9dP~%l>whF_r7-{CN*?%U_H=qdFFR zulX$}RsArI6w7^@&YO=Y1IrVudx=}uPkZ($%kkK4+n+^0VUDA@?EBsyhiwnmsO)!? zaU4H{{Ik!q#_tdUnX5DB6qtxh^-7tRopmPW*HR@O zX5AZOi0i)3q?nl*9&+&{I_E4Hv_`9Si1$R^C|gt0ECNLYtUhdBOt(7o*kaeJg!JA` zx)v;zhF-tD{OVpr9;AV}I>XcGV zeA?RKF#5QsTKZwy!bVnN*=_wBV8R>av2Udk9E4|q!(7F7?fLDCHp3Q>trL|( zdJ`}rf&{fzB4Ct~qEf1eK`K%rh&F0sWfy-_7LYzBr2u$uyws{ zt>zrr=u(nx(^=aa>`UGroV{(gV)4QzhMtHJF{Na!-Qx}fO3~uA>q}`1RK;*c)Y=r_ zb@9z;R3s%8gVum*DLSLGq&gT`qZ8UJVn`=rZ<1O%lVuXY5|LU}wL+m*3pGYXm2wg? zt~F3d8q``gXG|5@OA-WQT&}Xmw6^sh>4%@{R#N@1djD_2{ckeK(4U+7D(T0#i-18E zgw!e^01;I=0uIJ}0!Nnr%jNB_?PLf`Zmm|$%4%*YDuA)?B_I*BRBR>St;p6|5v9~btO00Zl~OW}=k4B&Dy3-qZr|dT=+ce8 zgw}D*1@4;}Y;FXiELQHi*P9|BJ=ec*E#3?vRpf6jxlQdyk3XiX%2D+QO>3Llu=3fEH0{`3`|1H5& zZ5r&~SKQM6S<3fJ7b#Oo*83dt&N`V6fJK9H5Nper7zk+=mD$r7F$X zUL^ba#nL0SQPGr=-o2h@sdZ5L)YE4vdxPI){8yrWvRze*kd&e|&V(3UzqQ=$dnC15 zed!Nd!*{@RpF=C1-!#&o0kMpw5n}4mcL+hY>2D!|$ zLjAnOKaAb4(=sWyqI*`utviG)@42l7BdWIfwYYyB`L|jBeYL+A^)9$I@(qaaLxH;H z9}BKXy~4AyI@X^IURvx*GAJjlep&OQ$Y|XmM1we{@E2B6_xir<5$=0KQ|JAy)@j{h zH*UM1MeMfkTihFq?VYl!A3JOHJkK%tY5d2_>XaFkQM+F{H*q`Y`jHV*x|JNEoCrRI z9ifi8{Q~GYm7hy}o0p%NbIwa^Ym8S`e*{=N`>E4^)!V-!d}wlB_5)HzM6GPjxzr$3 z1$32B@L-(Ia0l`vwq`?Gg9>wnsh$um%tv57};vG5|duWR{f`?j^0-~Rdcq3V0gVpHY&?fxv)S*@j35eZ?pR@a(cXO-sC zX02_jgI2vj+QYk6os?sa9l=4<)T_}DOO8kZbwJf(+1YT+d5^(#eF%w^?l``t7lJr3 zy{A2c8SygbK0{HR4kb6j*b-~bIkMq~IkyU;0SzKT%Q=OTa_@TYBLMeqD5;7_-}fT2 zH`SFl29U*O06X+ z2^tCjz%J!DjQ=$+p-67lGC;R&qP`tQ@+ z0pwOgPM|<(U8Uumwbp-e9W1a$YORrC(Eq=YG)V)I7~<66rzZv9k5JiEqA)Lsg?Vi+86hsTVc2Rj`q)>qm;3R zQ7MIZ&+Ub1aeY^zYO4WSjQ;8AY1=mKTn@1{xX&xFO0k`%2L#msLr5Yx41L$zDz%i- zKpQ<7qxOAQirA>tud!4+Ov|2C+P@KFmbT=jNetk1DH5Lr)V9ygJ?&YRusG!5@HFo) zWbRB_nk%V+-B2w|=M<&}&q`4(GqTNfR@S7Fn}p)(l2jw(qYJ?lQEPbC%!Y?tg0{8m z)$Qs(;53#+IkrJ$;2OnRC5kN`~U2a0^51{X%YC94`uSBIXI&<>P zD_WaCqma_p+ac%Jb%PJf;6@})<(_f^#D=1^Cf$If*1W|(mE3XnNp+nbVxwoIN0tx! z?W%|0a(f{4MezfL-?-_gQwz=J;veFwx&xxQ*IGny8Xl$5{eD-BG304_m}je<*KK5@T%AQY^>QCB{%19*@UdTdlQZ00x2VdTVLO zMQ+#Ue$>rJ;V+sdXS#7zx7#(gpH2MKx(TiOw*D;Ot^JUrZ{5ovJ68=e;%Kp7(&*Bsrl!M@wwuhe1RsFjp%Gt}D z|6IMcbSmYh^nUtaK>U1<9seqB9r5~jKK}Gmu%<8dM|Wr?aZA5RsSu7n-q_^W5|U)S zr~0t1JRQaRm&@VvGJn-$lxTKePN(lxcd-`t?Ha=Rarpi){-%{&@lW8k1Z3?)^dOK0 z+Lk{|=VPwEw7P9G_h#8WG_c_;_~$LYozLHSKNu#T|3RtK;oSCp8@fKn`us8<`p3RI zYFoIEHPv;;uKUzl9L>1yZ(8k4<$eca9{ZjV+xzpPnVY#<{V^)z-S@Xt zgELP|#+abhsGIAob%*Z$_TG1W&iBJ3etrJ^!*mqEJ=f66UTfjDvQ%oeM0e`9eV@id zEMZ%hX`D1`$wdg&9P`=rIf`_n4PBFs%M=6R;8fT*s@~8umom=s&Xo(?~r*f zvp`d%5Ew-vkThr+LTzRk{I)kK$~i*3-nZ^>Of}|xK}KWDvMgGWD7Y^-)0tKYn^nw; zQj;vDYIG2BYQ8SRlc?7)9$s42q4|ap-%*Xd!>y}`BoDj+^GbU$alS(mv_=sYu#7hxk;L` zhC@o-*x%Oecszy>hpyY#RcT4#np$KQGzD0-q8jLeA~nr^0Wzj{`|7uxUDqi{C61S4 zYrEN)r-%HTcQ?=B_oMw@@u5iY!5_zmn*E;d{qQKbGjj~h*&!rP>WGZ}Vdwo9tOFv{ zI6~Wk}y zh)QvNcb?Nf_q?|_X+F6wv^tge5Yt=ND+b+JjCE_0{JvI(EJ%k~_F->tTBo|5QJ!k^ z4MUM)yDp^+0Ecl*srsax@ApcXF|Ll$3t9C*ZuY3aR?!->-~6s2+7~JZ|Cl? zmsiCHf+6j#9XsQA+kce!xmAteq{2A#+m^@iJp1Jq|K0IvnCFAyE~(!ox;Hw+QXHNx z569PUNh&;aZ_C!>EE z!s7GW(4W@W@4g<7u=$@xd#+qdwaif4Zk4VrH1^8md3isWt^w3qV>Ui1ZGLmVzZbKS6K?B#7})#W>eKahQ;NuZOF2R78b?wT!^U-FC`| z-W_vUhn|gf$v066)Vm#A3{nw{aZP}&zHje;d9Fj(-S>ZmoQPS(D12-nsVo(q6%!c~ zVE}4U2bM$2Is^vuV`rvXOlwFS1-H&=#G!1@cRo)i?365ZMN9{bU*5lb?0+S=?ECuk z@OSt1{<{BI*&m(`5-8*1YhQ2+R7&Y0y;d0@DO3?m-1B^W4<9cd?)PuUktKK5nmvZ0 zpGqm6n{qDFuye;{d$wKK_uW9>+wXi>lrF{;X&7_pRFB$8EiJhcDyX8h4#7LuGh^5n z>rAaRyA$7iH9b0Pw}pBoQ^R}%W ztzc!84M{`@q-x9ssI)4QG_E=PP3`|d`#;s?gP70m@b~`p3konpYr0^S1_S^ANDXQ) z5D7kVA4$H(^{I2whY?vZ_!wN%T{m!*5`sc>)+nLUs1Z*-deLRQ3wU=7T>fns9W>W zFvS)tLamkAH$f=MD7WvcRVWCkq2yGdaK~Z0w=A9Ma)|=yth?VA*ITQlN<&JDq&5l> za_SQH8XqNdO~lM7B#BEcO>k><-=**S}iP+jr}_YNh-(i!=t?YDp;p zW+G6;DGLDU&dlp#v@!;7>&&c%VYuI}svX~)ch6cI|IYeraPuK|chc5P@ArRY;M1Wy ziTp$~V*OC{P31EILYo1A+5xf=NCj_u{7Feh#9Fhc8p_7PF_3kmpUu-#oUfVmpKSYu z-xs6KK&;pirJSm@9V;tTN{PqIA%$<=`Ac`d-S= z%DB%NpvWMIzDYw$+Vr}pJrLQ*Djoapu0gb0LwPBoH>Te=ueBb#;eNjpHDgRES?}H_ zPJ_Gp+kwb*ldh`ce4U?BgBkzb{1OKI?2dxgk34Q@hhULMGwQ+ ziVFE2>owF@-904C_w5Qo$ID}FvdsMW@b7DAG0nNg>D1ll7vjOsgVCdw+G_OsowX8# zl=l!wpeloHm8mI>F3{lczVivFs}8TY2mEc+~dDwKE&H+pl7WIH=e`3 z$DFOxxkOftw6yuo^3drs)^p1xiV#*%I&TMeXhO;_t~5bwoIA|hB<4$Le_>WyB-W~9 zHjGa5t1^1u7v!Ariyk>A?Df-q4%6Ur%1{BID91wczTZbVkAuC>ufU4vnD#nMl zX!V$5eEMKge0@5f_FEE_73oqdYaL3{#?}&{A>op7K@m|kaG=-M)2Qw}x%=+vRXBsg;mB zYn(cg4k)?ST&u>WbIKr|bv;gfCCD`yZ2%|*uMI6(Pt^vEJCmK$657uF=)uv9O)Ai) zVL>7^0nA%lf54=bEmE3*uUQZafB@Y>n2?e6N|FsYFtyfO&1W_KF5dnxdj3%0SN-9q z{q_YDTU85FD-CJ|00AlhPy@n2=)_KI{WZ=2Fo3Gah?sM*##*IgjGXhqxnAiSlUUfo zMnsks0oGJmDIoo}EWI@yu~LQsTS+B&f)>&;bW^Ue3ADnBd&K6$eQ@V0#B6et-bg5` zs2$3FI2_;J-%zplaT<@~aK7Kxlu&*5!L#pztzHArODR{)>ls1$QQ3{Y}D9>$z& z%IPrvg^3hXtGeq`N=9)jq3f(vxNp(uX&9z`oqdcdQ!X-2E~nem<)P-_mqnB6oXa6_ z1N@Ia{?k3mNP}{N6o8PyMU7jP9@?gwF{D|waeUe~QWi*V*rt+W!}osBIrU1N?)U4% z*|pMbn^z{c);fD4hE%RG#>?;bmwktK)8cPj_ue~`+#W}oa?UYh%a?Cdo z>FvjwEj7vQ?Jzz5`AzryM%gX>dzxEtO(iH?Q0A@Ze)Ypiw=2x&6!WFt9we zLPW5|b-Z-4cE{uK`TF-^?}lM)@}~I&kb1!of%Q@BJnvte{!NaCyPr(@wa}I9?^U}X zUbg)@bia;iUecHEF29NXd0np7e(xqLYV%7o6k{WB?^Mi)pP{9~ZIiXl?uTD|e!u<{ zW4xSx+0ccYmNl4W&G@x}Czzy>(I5RDWBY@lOJ@lpdrZrD_=x3{<=eKcN)cdFO10Lk>0*q+Neliol~%DQxe@VQdwJTH#x4@2-77FhqSwQE6dl`#M`~BEsb2Tga99p=+g> zRE8tA6--HsKo)SJR5h5F`L&_yxh5%%x=^E1s>L=KFlbXYRdT^figOZ2#4V3beVO0S z=Z8?L(r#OQD#EM{n^1S9bV_NMcwWEt&eljMJ?j)}6Gdx?0yuR&lxB#1k=?K3p|8yw z%Q0>~?^<(~34JyF!S8V-T_J<6qtSa(%({YTt&`ua;+j)QS@8qltn6NkXg+D4cE1p3 zY86PjZb$`kYMto>a>)T3qO&BZBbsPIh1clX> zQe*bsGaxCqx7ZMRq=vB7R2p=^LCSvtuWOcHSoc}_--Y`I&1-~A3R;CG3aCNXAOj>v zHnjhqxhw|$u)d#-ORXGL=VLr{+WY8~?%7burpaWTQ+m%8r1sX;+!PXlcCHIPk#jA@ zbKl$6l3z3{Z8a(mRS0xg^Uxh~@W?^i(!{rXV8bEoqjP1ChwfM^bi;9ZzuC@!66EId zzTW4Dhja94QnplxEt;NuXp(yk<~Zp$|L%-kxA`y(wG@SfCty-(fW;Zi#XIbB&Pws_Z{yMV6tx+AtV~DZCM~txYHqzXfLn-HXouFZ zXl^wzh(JxjBO;Q+Vg@r+Z12lAJy~f~G9HJ+w#_+i509tYax=!HRB97cTm(YN&h=&A z2WzBNB8cKqoK~Y>f*njZsJGiot##}!8f%Fgldj0h^d%OAhX3=Y-_$*SfBATSo5$%4 zsA|F94Qj1rD6-#=ZhSA5iaELICP}x3ymh^TrWm>ya%Z(ME?4mD7QH`CgNP)_z65Q| z)8W(g?gd!sx`#LC48x$|P|cE4Rmu{oT;g;h2(azfI(=C0>2!3hy+fn8*yD7`^@fK_TelGo@Av1@zpHNQ@T_Hj zU&Ih%Yit@?*<#2AJ99Yp(>lLJUr*B$Qr?r-T_@Be1fz!->*3h#;YDfidrVP`9*LAz zZC<}l!%^WSgv6>;Wgsc$*(#m(vvw-UH&%TOH`Wfxy}jO)(Qa_Nk0SDPd`OA*9JPVJ zaP3ogsrk7-PMvvLXBo%Cy4{GVBwMpSpG_>UDLo^w@CH7m8qDD<#7YIzsMP9y`}++j5Q1<=^M^gy*+$FUV+yX7Nduph*24%e1XB5RwNfNx(nVPoMlKE zVoIe|K&J69dGB*=%=(~BX=dF5U>+twmH+(Z=l*m#OZW5b`Sjso+cqj?nx=2_t2>YE z8JjX(h&HPan|H#rw{HfgRt7?g(v+s?Q|DT1_dUn48*37%WUZ@6Sz65L*tOf$ACFUM zOH4)cd0V4#0&=PR&GojWhn#{jFC@Okp{hp!-z0XOUDjKpMm!N7$uFsLHwl0;;GtE7|b5b&L`_pEE-Zve%lYzAlOpL&bgLa-O|JP;pW!_O*;d%)tW0vDp8qBOg|(X^d-# zS=vNhN&yL4E7n>>jv*Zmhkbh$G+OmJN5!=_swTn0TUm$xShHBGWAUKz?RuS#C(E>L zn>8IGqBiTkRRr62*KI+m1UPhkjYTO{%3n-SK&=|%?jtHLr3|i1DY*ikrBaiawKXfs zeUFdlhyU~2k9Qr9jel7=TJ!g16#;78xH2GAX#k=C0H8nz{2S2RyKnnVv~D1rn*%y1 zx0I+66o?{ilpEN&Cfo{QRmwPH;~t6BG>j?bQp!gjE43{K&`gBAGL)+Vx|P^DOD#i5 z)+j;90<3flqMa3~VPA*Nu?R~8sku!Wlmclr*-826L0GfvG6(UZDl3#!mRaQ^7=c@OLjBa0Y zrw_H}B#ScI@EJpMgI&{o&nqjep=eNt;qZ3<=7uw9-j?gJJ4rFi8c?BAIvX&WXxq^6 zJ4S-0mT1fgb-V7@#>Pz#^Bzp6Q{5b?qrQ-jr|63z?w0#X_nx<5$)rNsfnh)KvHi*$5N4NNWoIaL-osn7; zK-D@+Q!O8)6*k+#KN7G(Te4`?mx={Ar1Vie=pKB!-fr`+e)+e{y!(AUU&fsJb-f=> zmRWB1_h~%(y)f&6wBoK-K?+qCR>(L)>ofehW($~|(Nd1Y0I<~pFRKjwL2=0SnV^jQ zlj8p`OaIZNOVh3AVQa1QWggSr+r92{$HEXG07(NzNFymi5gJLM2u<`a^#eE}86-qP zAOIem@0`!PcJJNYk13mWEe$p%HK|FBvNBiR*L6#(&rAAsPswmK_VBu|{>*LPZ8>lB zvysq}L$J!*imkP3m6`MBbN{=vU4iexysfo@+Ke5vO|^1P!ZjSn_c3nM{KNUtqI?j% zr@ZbfoQ`i(no4fR!~1*v&rm3Ib-UkB!(Rf~260({`F4IdY}-c4?<igFm9mC26Jf>ESf5 zTZ#>^Y88dE3-1N;x_s2Sax<+4hTV@@!`IWal*+&U^{=Pf-5J$t-j~%Ko=R1q^mbSLMQWJ)Iu6skS_J<3u{$Uj}1P zhc$1fw~uhYQG<}%6gpK4fHb14Kykx8m0@&ozbb-bc+64F`EB1piC3#M3X!VHZ%g`I z>s!F=9oL-7PDA(BD7EJo@P3(Yr_*7XlhM6GJulbe$!~ioc^U?-jk(X!>+!?+o7>Bk z`p<;H=sxBhDYH~keJ!eP>*=w-++LW$=}{0W#th5K4<`qb?ppaRt6KSKv~9mc(<4i5 z8!18XfI*NOu2P!d974#OGRBnhs+3B*wr(h?LIY=PKMvD6HR9SD!X?;ut!iVb334+& z(6W7W^xGn@YxzlQFzQj&lP2DlUtri2KN{Wd>19jH@%VnvQPg}khs*T2^S`p{H!;oY z{=bb~ugh29g_Nx8pF=81ecd;uiA#3Q33DseTB}i40kH^E)x>sWqw>|CKc{t_0VW&~lwnnQp8FXG<=c((4 zl+qV*#I|kPI6+9YZ2R6HA5j1r6DVg)EiJc-h#59OYy?HX7?V=+#+Opi675M4HYKH$ zQTeLOAJ&(DfV60SAp5K4W(R+&ORa(oLI4a100^Q08W@3r%!4+f=&~F!m4n1d0eKtRu0(TzVy&%vb=nnff~|+< zoZZ*Ev81(*1(1fg3#y3Dq*nXkxLj_*=~_1JcuxK>cI$i}!ZAe{hNtU&alxzjqd;?^ zU-m2(3e_bS?GC57rc(OxEI=_vYZP)5t}%CqC;iLoFN1sFR+XBoaA})w#0d4b<9|V7b8kk z%Hn)?-gA&mI{KK^Pj9!u6hT_5J#^)U8zPfeM>IQ;H)YnjR!OQy1c>+{$- zs(i^g_x@`T<9tnNNAM}Fhr?lAmLY_f>&yH1@0YkJrKmEY-BM!6J}9mAGAHjwE}E1m z&CHdEjPWIlB5S1I(`%FI@$nJCynO!HkI)}vx*sfjwWrI&8~yq@9S?7p?W56^x?dEZ zN^~GW)7UE2I@u7js;)d*HLcJ{>x*F~@x0>rxvM_Ff-0Y(T%5*6aER zaytyp=)=!He}8`Xm-qSS!)5+>GUwA* z*ZWVlFU+;pdOp4T^Y_0D;WP}xI^R;9-n>11+H=b#xPeP{2BlWQ-qMlHysWR*9i)!d zsgyrDqw;Q>LV=<0`|JFp^JtV`rf1`K58rV)bzO|Mb0Rb;b9`9`CHxKWo>z0%9{R4B&`_y|>YTWj#Hh^Fh4mmYtxWW#c zU8hsY&@8oKoohJspp7giiZ&UsS)sRZofb%?!r@nH@IV)v?>y~8$-7~D#YVXeLy7mLr zT7mnSCn(KpWsHe2D!@+rG_Tca&`LA{K+7ekghaLv+nk*s;a&?64N@x~`O_ zeJU9k6e3!Unp7!GDi;k3jR;C@+6Z8cdk&%BYB5F|rKf4qTDxFVEz7d>2Sb3ZNr`zJ z$7PF^n^J~>D!0@c6DnhKs?J(yobuWo#(ADKpmXjP;|7vM{$pDgRS!bnVt8)*vtkiI z!7pZP08oZnWhF2m9<`r=0XUbU5f46KX&n-=BtfT?CBep6$)Oq*^;#h*6wum2V?`8f zkjC(32nMj+C+8fu5<-aUMnq??Pyj(Vl`f36f`H-%#0tQLB*DXAIjFe8BVO)Rw>CaCOf3xQ;^7n6h`+C{N;eAOq=QfU` za3XA(c$zsJ!gc!d7_77|aaKOn=)fqeG{?-Ob_GURlVM7~7-N(&T6ZXBo2RbRDNo_> z)b>Q&_S!b3Sh*I(RISI-XW^Vy%cV0)fGV>HhelI18LL&RYnAw2IMHRp|A+t8!3M}R z7(I;rzRc%g;9QoneslVETEGQMX(2~*%#Oq9*he)g^>!S7l@sRni)4jVv?=TUdN>`qp)qEUi?_dlwh&%((}M5s%l&x7v}ZDd zVxk79n{q5vTFsIx^e!zOwtPB)#_s-_Sn}~9+_(D}o?0=jFkpImI=p^<*2-;k8HT5I zT}$1@jU$>(=Wm^lwS6SOmPf0P>pq>%{m#6n7o*vmZhCEp{;A|`JlJW@-XHfe zn^3oHKMvpJm=Gt`*2Ym=)z~v1j60N;LQm86=a&1+k z0rvb$%FDYqznQM_>EYXd{KMb9{b09k+vnl&{8!7eko#k8^7QbI8^#!=m0{>(+NQWD zw2qzz|8!e_uJ+U3=B|GqO85HuJPwZZr6Q0j-NmOJRY-B zrf%GYN6xkz#&x}C-cRaj%6|}_1v_WEoEC?+Y*yKEkCP9u?XxE3-6(8Q%Dz9w^=37M zp?|q9{c!4Sx6E@mEOBLFqYd?g-nK>9S~HSz&#yP5U8~d3HNhBbLmi&z{m=Iwu#@0A z%OhvG=U42T%VUA5G69@K(79OCs-&|HYBtWQ+w_w$#+o;K5*Juo$tjUYZMl?`(mtM! zK#=PUZ9p=lnlY{wn^LlUn=cY@K)!us;G)*&cF zRhLy;BTAQ6wI*oQAjNeh>xt+~@6etTB8JeXl&nd$!0Gg`?y=@8Qr+qTem^FG-C z%XVY^@c3~0{Q2e7pSI=R^*YC*Z~(N-thG)xYSXkz^ZMCXV0o%oQ>nopOUxon&Sufb zJ0fZX5h0@sQOUK&^ym+{Z57r=xiX}Lhw)7<*wwINiYjqUK}dnw0NHz&mziqiG5~qf zk!uqkbWjQInxZ%ReSW8XK>D~X_;AAiyMO1=XwZgq=L0tGwF^ouac`PNv>tsm8gdn`jkWf6c~)Ai)tVePW#CZZW8WQ@sr3i0ZSTUc?w4cttB)Ul3L$7S zXzE{Y_YeXLSRn15_kBGazTT1`Ip$jGg)v(2-u6nHt;C$O0p*RKxAlAuBISOqO84E7 z8|1Y1LO(h4u+JPq5KhiQjQQ98Sd;oa&0?3f=aT>(oiogxod6t;H0-+uNTAM7{5 zoYLCY48~8n{TTP=_1DTIBCz%$rX{z}0@~{j+gg419MfG<2Xam+y?x(L_iYH@a6`aW z%eJhye_?EmjyR=5UJfbvyPSL?G_B;I!49Fg8w%owtuEt=cWy zK5Ga$tzDNJjlj{FClsjoL#Z%?ucvhmWJ+CtZ1C?Z6X}%F8vLoG&)Uec-{a21@Lt<- z-Il|-%OAlH+r0iVj_*=DknJ12WcYc=Z58b2P`}`^)9k z2YWglK7IbV>vc?XAD(j5wSW%_Ams#|JK0b#_wNbM0$qdEb^Y~rCwF7=+hmWY@xFah zr2GE(>G@;d9rt!cG-EhjulKI&YK=x?ZLyT(%tK1WI@$OAFurLuHFz_{xJYY*cbrot+gfv>%Q@#%UGQI=^WI9Wb>EZG)+$@L5kW4g zDQp$2^)bdi7~z7T2M05CqhUhaK=7BrQ`tJ#7_*2b8At*A0eYsI{h8HCk(c^q%t##5(^j71L@YylYj3tV)?}2z z+EUJh%CL!*ZjI^|_2KZ{yd(qBI#pW)E?VncIVaKD5;0?K0$n)o%g$U>@I^F2HC4Ff z0$G$8qg!jK79S`@A#@0uh@>QIA@AGP^*R>^twBsQXXLwR8r6U1^3PkqDAU zopB*irEu5xbDCRg4~NI~zH4oBtI6uFa@{K%q-Eb#2pYowyn>V{dvc1)1|6duJ3@;TG$vwW*b@bs~)jmMuAVNV#f_%1Xl|B3z`2 z3;lz#<$iw&GFU$n1g_f#9}nlI({F3k%l6#$Z(BT6yv}Ph^t(^LY^b4`OXg2;lhfln zV-DAs)`!;EE8SJD+C}0~U=Z_s=zq6O_mrZ+opjS69MwA4^Rc_Wz8ZC~PG|gB>oCw|kj&h|1=icu%??!W>`s>04#?(l%*B-}h_%oZ`}VeO|>mN8*#cFYQm-K(2?p zIdXV<d3;yWgVV0&ZQCyokADr-uKSCzAiO%KQd$sSFL2Cl11YL0YprJWtm(!caUw+u0 zo}Qj>|64E*KB#H?`OUi%z-wCSwn3|8KbR$d9v!uPas3GUGWtW^mJk1Rjc!sqEUkQrnQucl)?Q3_dmiv=gsy z8#~>~yGPpVORgWCSD!wOZ+Utw! zREv6=%i&>AX8(vc6z!mvk_yc(zVTkU;6)^wx=BL+KUj+E#{KikWEN z)_d1?gSFcljnH6yo;Tn7S}U^Nw%G_FG1O`!!G$K@Pgh0x8@3(D_DO1O_WJ4 zYjs7G+WeOiQ&ilzFe9Qi7>p?_iWF$l)_f`bC56nQ+P&|4C}OQ$mKRhi zmSnsW85txsCZb%EQ$(5!QK?8EUDw4JA08fJD%zM5i?Xg%fm&m(g`+3qj8{q%ccpCF zA#+CQTUpVVRvJ4Sp=-$5T&nq8m0%qNzcnq!vI#OO)NnushE&>phXGYf!o4;-N3Rt! zE3zBRU3XVBEZ46Or$P6BoNoW&JpR?3yI2x)A4t+fTRTpzlAPP_9a&)eJ6 z;dZ+{ou1;F`th9P75PXgf-eLGYNsI}HOmO7d^djjd^?PflHp-|h+A5_U&`*Jn0If_ zpP&EncnZr>oq1Zf*bfe{q&#OJgdl9v67wcP^zS;9n<52++OB9;5 zk(S^#*TX|1bX$Xqo=64)rA(dXPvfCO#MgN_4sT*yk4Jd^^zu0V zDwi}I8^_m_FgMF+RR~*dO7*3YE2%r)5mMO?ScfK)(t8Mginf;fVGJ6zG|CaJ8PSf;o_-=m2VbG=iq3i1H zx_8GnX)VFqzE{`li&ECRu`%dyyiL(K5?pf4-W-%t%knZ>YaOohBqCimg3!8eq*}|z zz7NKvR9}~=9Kx@Kjdf7qr9?RP56gNB0gY4F`({nA#DN%LMYc7W00{Z0=mBLd9Ao)x z!&148-M6nCOU`3BulvVFyCUz+ z`(??fqVH->nA^qhv$;Dph=vouO0c@$LHhY0WHV%_SNvXe!R^O)ROLepY|I= zR2cTe&IL3J)ZQ!C%BwM^=AH=~-bo2F#~6>}q1KjSlZJiY7rf+X565@wc4OvoI8ECN z>8&PhpwlWKnk}YUiw#NHwXIJChw(JLT>k;74BZDQRLY!kcY4%&{-u^XVX*pG>K;1$ zc-^e+VwqeB*K1Z*lW9tsY441MCV%3VO#7{ZAnF+;3lB)uIG(1<@qC(>TE%NU)cuFh z>6`@cfS}Oq+8|fcVYS-DoTojqvlS$l*oRK82@O(kK$7ylw_KGJ1#i^3)>=wY8i^q1 zR9ZDgq4%yqPGyA}Lhyxa%?lOPOwGwTqllcbMAUN8+Qt%zv=ZF_nHdxmE({!+5D}0T zMosr%>pP;!ia*F$Qv|? zjMmb)v&H~HEk={n)~T*mCZki%=ks}sQ3!L+gYl)6T5ITB+PA?6gJ6l4&1jOYcel&u zufBfw`SWMe1_;RpF04Rm%?!;ceXwV&lu*UIT1Uj}$mLkjQh_pJOPLxQBGRTYZ!H&N zELgSdOsWU2y_Z@yM7Jd{oym34rU_I6(4FS{>uFTW@_irj&)FIx zt27D_8Wxob2pMSEg?hbSANp^$WkNlp*IPD~)PwR$+HPgdTZg)}Ry3XnW5&|-Qg$7% zN>60i_UR9-puJ~!_0z+Hl-Zeb3&A=DMqRWW3P0=62bDj&z*&py; z@c(L_oOYVD(i#A$6$gCs?y}bp?oAc~iVsl8soO?R>O&+T)wpJB2-@PEg5pq)nf~Cr zzJYFxC9%`tOAL>CJKZjpaOnDdjaqBwsw^7!xUI?B4gf@&(wxf@(>$2pA(eHzjYDXy z2vBcE-N25pEP1`zwx;f`c?1%BRtj|7JtIwgWIi19m2Di5J`7p*b zGxyxGFdAc+c(0MP_cTaS(!H+hGwJBs`~-BDS3~GRbGUR)kk56V1U}5uE(>xEZ1QmLd4po ztp%rAEv-_uO4z2Yb%Ro7PkBOADT33sacr$fBduKLoIs1)T0?I8|%0&Y-pcT*&K5~pEVCch-KmGjp_$b2ro`W57JFxnF&<|Ywyno-hw<%ldekgcG zn6x5DLABrvOG`u=)YEkT{Ne3szHip_gc_-)I%9W^Y3e$vc?A}Qu57wp+!yA(Oq}q1eu#0cu((iTTD$%ncM%{ZFqm$dR%1uwTx+?M#gZ>=ah)z1_9)DH z7+vMHC4)+X0rpPbtRXxtJA}bg z(+G>Ea(}sBn!sd7AG_)hqF-rmnMnOmix?T<61~pP+0r z4M)4IQ>$lXc`u&`LzY6so%mbfdgOUnwWlu~oL7;&NdahmQZ+6UQ_fFu+U0K!gG zm72YQRIETIKmi~S0mX1210qqFjka8~uyRI3a;&4arbTXLo*$6;L8 zsK_&~@FDs}Ap`FicVYr@KTU7_#}&`2x5AmD^G2ob;5;51h+< zyFWY*OzG#B@85oSXf>xDtm#|Io_Y`$nG>W{mlJEwd3Utea!{e>w#CCAQLN>KxD3`r^c zKhq;Q<${$|bS|B;%2@4PA4jLa5F;1?Z^#*{B&nK-fFV(5ACaoE?RsZxf34}2xk^jU znpPQ=2GPcKb-oBTu&r_pejp4QyBc9FKUizWZcKaA)ZeCif3%G^!k&b=aAyLD+O`#J zv4Mv6uwf28rRzmIzW{amkUp*E&E3fFQu{d!;pL+|JUs0wR=$nHONk@Nab36L@$B9B z^71ksY?B$dYjrw3gmnoqTUPs8?%$5@UtWGR+Ie?gm+f#2rEx2I9AMdBR8Xa;QW`=& zA6(jFYez1uRq`V*brl+OVPj~ESFJUQ2kvv8S=M)N{^I)Co{m4&)S=ClZ#Da%4@he% zu{Xc5V_f6T^{BCHp)4 z=U)UKPvK#ozvvo~rNrBE_Tyc1r%$!SHU7-N$KmVV4srWj+p5Te7-#zilJcr_1#F_U zY+oBIRes#lJ{%9%>9f{@(u0Chm*v#IAvtEQ?odD7{^1S1@4C086mqK}b$#VNbocq^ zr?0>M{AnJ>H;A}y7vXj2daK=f{mGaEOHf9!tRm5=9ylbiRW`=eSlsvBssY&{t43F2 zl~QVtG5Ax?l_4|ELdH9XTUz&c9Kh1eft7 zuh;LJoWeoox#bML&!k;z;WB@}W;&nG%QA_GwH6vI%X&Va4H1+A%mv+!J~bhUT9Q2u z%k^#$oY6HGAR=v3$$JYAobCC|yv@^ge>{(M+d30s%+~nQqXvoV zHiRyvR8X4MdoE+844}Q=_hf;dj*m5`Qc5f@h>BPR*OkoH`qF9>NwQk&<6e%(^ZkDJ z-Unxxc~40c6aog}%8peBHYj&5_l5wPKq~;ECYs3rJ@XWd4sW#a>_ar3%I;1uV<2V#}u z-Sqw58YzPUP>Zo@jjPuk)OB=5fhkp6*X6V+$_cRvD`QJ0M_q6IdcF3Yo>LmSpoodv zx~{ex8)MA%IQE=l$y-16Kx*AGfu^SX&?o+^-FCgjpuQp=#)0PLXH>KC=RN5?HtkFo zsK#YmXVZPFX{~8KbdUSKtH75f3LHS4(n{52j7zByDgcN%5utPLURKwX_5Cj3NSoAv@HH>vp=__v zcc(@v*I8HBcjwD8J-k1^JYRxywXm@YSeUCw!~c^xTV_vMS|zQ#b-JMu9LsWoM%okrWFfv&afo`xEeQmPU#p)dz+MTC8f%R#sv zh9kGDaa^}!lcnXp^34ZL=#*y02}H%E3xm}gar^*4(|t!UU006d>Hc!a=09sq0=8sAiq7Y>ilm1cHf{B~M*tmcnCCkMx0m$6 z_N{c>l(H?B=Tc`IlJ6Mm@78HGp=LHfmeepZn425a#=zd^ojY zW0Jlf_muaw9>&LO{)acm^LpD3r-vz~uJbP+KOTeooUh03TyyI>UmGHHL4|p`h3-HE zm3M(8keRE}z|2|IV8*zV;~4hsZcSf{LSlegQzfzI)4M(8=i6t0&|tVvUO*#agFU76 z_VJr}o~^;cf?%|>F)lH!y${g5RAZig06vsFrc||QrA?hTHJ8p{++dygSHJqz_4ZjC zxlh-jKjclEbKDeZUwKiwrev@^GQAXd0VTpmVWUkE5haTe&A}h^mCKA=ezeB5fPlT z$LSusF~7!J1Z~i&n)W4D?H67S-3gFu&DPp4wTA1ST5AvETWAaOTCinu%wCJOq9eTk zH)*vqPHSC-W6Gd(mya7}*Q5IcEU_QLa+^BubFJDM;bSdHA=j2z zGH3y<8Z^BZ)A_`plxbU$Qv1HoIU1`j0#sY=ig09%RaI3DA&T5Uuy*P^v9R_19@%2;oZIOj}g zM3gi}stm0NvREKwOhsY^r-&>nQ-zuLR%Wal)l<>R8b+8~om;(0o?6}j2tXN~D}&Ol zR#X6ut)gbaX>gpAG$@s{*1eSpcsoE`HwX-bX-JuroHf;N3HIku{c3|nO zO)(c>QpScZm*$D`Hal;%)||zuE-m@^?&$;LM5s3&80&oLCMCMxRm#Hn3+WNk4ttmzGwi;u01%N6vC7Fvxh6cHg=IJIs`woYE zN)c8)E6se*)b}YB?|o|x5o^s_lhH~SDjn!StAda;RE-)aGzEIAMWDGxqo8fvfQnWH ziZb>?%*poKwp}cpR1MIo)F;rI6ja_wF;Y~x$Fe=bZyLaTE6KJj7k5U7ThP314XH5a zp?lMc*HCj_s=NrF;Y-ttQYffMG0MJWeXNh;ah~?*wC@9MTikBrhtszHs?ra}q zAgx>PM(3t%_kf`!1qeCuy3+A@sBOve<2tL}U@ghox7d>6_$K_5+wxQB>dqHwVzi^C zN&`%Tp}$ne?dAI4pC7(KGyd_nvl`u3aC=a)aP87!Tpsy=i^ajT?E zPUYJBLs|}ypNI;!Q`)N^lkp1b(OAAtzi78A^>~{<4nbSf3wO0;aCvL{J9)b+?@~ecGW8MCwra#EG}$iLAR#FSjT+@<6Uv-x^_2#K*B)regC^B2LqEe&`N; z-RB8Pr|r=9Lz3C`pHUu=oN-%Ox#aZFdXK*c0j zl@7uN02o*+k49-6S}iC^sSMZm@dCY2Ha5LKx8 zd_37@d6s5_|2Cy0!b&Luv2@HTG8Sk@QnU5b;GXVT6b3~`Tbs)URM-3u>pG3YH_|Ye zg9yixYm0MXCRpn}PkHRMs5-N*T_21-CP&7Unk!eXt+l2#f>Ef{W&7kv8EcV@M%|F5 zZMjC2QENR-7jSwUN2ArWPS*I(uu`XsDoLUfr)nlPqiKLPr`&ai`(^67!=9rXx@lVj z{*Y@voz7+56C!kuZs#a+YM3mCX=!R0cHfat`7C^*?uyL{0FEMvkb8BCW z+uSOMf8UYq`_PqZ+j~AbHzL=$TNUhyVx+2%&BwLG=}HF|Rov zhS1qAHmdss3O8lk*bQmV2yh(6X`f?F2mc;ueVu z?WI0gQ-&pj{|&^*L(e?kt~s|8-2z;1c?< z$Jtm)J1BURCS;~AU|frazf@L%i`V6sRH{)Y!eFi4$}gik?c)Djvuh)rmh7OsRQ#-5De{kkB8x1 zgM7LF;V}BV3DI|2Lz9iLemRa1>71*#f3=stI~=;YdS{7rO8a7+1Q?z#)806Yha%;8 zJg)NsETcX*H6zispzk}784;J8xbE4g!?Mun_%7~w9Q3-r4!wzscyB|ebD4$9bl*q+ z4rSQQj}`_60XU;6bDkN!^-s4?&y9ZW27xvLce$+Id#;Z3+~R|^=lk~4hi^~!`?RNK zEK*Z7`LR2q+V*s*O>2EH#-7;r?RBesA&DNmkXpaP2KT65NfM1>Kf6#HNhe z_h^h$wcEY|wcM)n12^0xD+Cgt1_IOwGSnnZCrKK1;*5eUC`1APaL2UN zd~du0Gz272NW_vwNGYcbw)(IB#lL)h`J}Z%t>Sj`y8G_KckiCwegEUnMzh8WE&hw& z{nyL7zuc}LFE3L`|MB;Kk5)Bi(E(beQWQvVu8BnETpMdZ0ZZtROlyH@B=59|T)j5L z%@Y{}Ey^g%%(kR)cxY9GcVNV(y>)~PoUQf3nAS*u!_cvTy(LOYDdmD+;*J`&X?lM+ ztK6~!6akST^!It!#>BQ)$-Pn1AlRW6R?4(i$YRX9c28yBzJ32x_H~adfm*NkuOHt{ z?IL_ocv!Yyy7PWtenjasK3WQ{3-f%h5>KZ?YdGI?7mhPW0|(1mV@!EzVhbD4%8+6D zlA%tz)n3*(|l9Yzl8vY-< z1A}N%SlN>vZQuqX*hz8N8|j6$5L%6fgt!cfq{ z((IJ;mG4AkWF!L$)E6casg=9}mfE7Fx2fG+ms1K*pZX)-=U)_#T|d6wKX#*Cr+u&w zUaaBV@(ljX;Y2_F{L|y(uWPNTUi!YjUs~6%B?D5oZ)~kah+PLUURBo{9riV)RIJsV z^CI&4`ug_mfdKEXTj$@^>TR!H_dls_P8*i(DY!RH?jma`Ed;}`boRJyTdTI~ABO{e zeEmsChwR08nIFgDsa3LMoSae;vlR3Qp7V$q%z`?;`#k>CvfwIqLe~V=1LI>3mN@_jRqDZKrbEGdBU93Jl}1wzaJWc(NVN#n=Gb^6?zrPg5R- zj*&|%#;TZeYc2Gh#I8YONwr0S=Ey4Y3eB9a&beG-*L8bJ%IJC9jkS-;PTbbL9L{DD_3>R7!aWdM`PG6eIydO&Qh}U9Yyi)tzfY5kvyf z2nL~6?Yc0niw-Vxi!pXxmvcrf!T^k^?#7!YwTeo)HPs9#VnUV%tr8@W)OeT52#6>m zjbVp2VNJ+8N?~b@S-!mea`XWp{_3lDd5?ejo8OeQBg#C_TI+GZub#evLbV_wK0Q6o z*Oy>)b#yqL|MdNz`|7MjD+0P zZBK75yyddyBnXb-Zo{T1Kz&1SXT;Q?F_2bY(tpM(p}@5|E3GDG2Gpf0Yr}2(96~_E zm{KXtITu2hrscsO+aAeMq^5+`meJTfJlYLP_5&TiVO_$L>^@Cn532YpWEuq?)UV9v+Xr zYnSPz32HG)DVCaZ1Qu(3Yt32@ZDLY3^!rk!n)Bh|KJSbaI(55E*sVIz2r+ZtGkcb-@#nQ6|MwS*-EOXwaAYrNz|^$D>f1w%1hhFrHZg>9uhrblZM+ zwhAkhBMNIHabM4;r)Aqy+O!jEO%Lzx%b%SmltB=+sq4DM=pIgo7&n1B3`0p-SPLs- zeCRKi=c3K2@D6GMOvnEHwkWW)Z&UE3H1F#+jK`b{0BPIBy;*B>xdSENxfF|tAoyIG zcHVk(y(}b0t)#-|fx0ag<405~w|(EPL|fyP+jrVD)A63~G4HjRR_)>N7Ux?H{@(7M zFjP8p=8|vR_ipzo@7fuWmTUPE#aj2hqpqPAWi+9J2DQ}IDw9&0nM*4nxLC4tA(vWf z^}bu$PO2W2l3b@!dULP|mp*NA@B6OA)LTE_?!NDfFzP}?^SpOq$Vn7=t$j|_*#)%D zrCDpCF_)^WnzrTGkJ~;Ifih&2&6x=TH;k}*Z;_?8?3|mXX&jE+T1se*(!h-qfPldy zEtN8u04%D~moD8x%mzWD&$fb8Y)P6RaBT{SnF}>$klM%)Dv7P^xhdldM?@`*%-pbK zl)@@AfPo}!ZzZ*sxE6o}oDq;rYYhPuAQLhnSdCWytH1oq&!2t)kl^jU%n#@DS6_Yg z$3OnD*Tqx!>u-K%j4LfaJe+?2pZ@;auYaBObh+I2eRnoIKfiwW>)-s{-~Ijb^K<7P zjgneotNHy`A6QhX^7!V%|M!3Sw_nx)6xKvv*lSIigq^jT#i6ny#1NcA0S8|Aj*WZs z=Fs&DwwzMS;r-Fy*I60YTGN0==~{Dt9LuBy?vD@t`E>>v6kme~kr8LxB4$jTDdp2_Jf7O!P;#HfA z3pE$2*@}t^LeR%$i6NLJ-mTY+Z1@TE)JR%AG9%$iT&Z@qm|Q5KhZLEc0W8|In3kY@ zWvW0dN*SGVCTvP!Vic~OH+kQf!CAk@s*Ih|Y^`lvh=>K9b3}SwcCY+hcI`>WY_!_A zqV|-S6$UA(?YlO>;n0kd+Im%U^u8-8fAPR*QUHu3_+O##tm$2-Io4d9(iV*~7Nt7q zEC&=qD&Bx2t(n$#8~b;ySZ7Qtubt>T_Cq?~V*h<7D+wDTQjOg-Q$ptqX@*RE3YEl7U}ixuRdez_x+) zrnYO%Rkf4t+rEU9iwRRKH)Gy#J60x(n^ZF%H0zgn-w@sy8O{ec&$H5iC?r;Etu)7c z?>wbe=R1$blQ1Eeb^93w2mROBa;z(LGud_BTC0x;J{Kqg0O-9bHO254KxvFme+TUmqe zSt=1>^<6kOxubbquH*Ue0VLIWX%!Wo85wBO?c5m8r^i&5Y5oMd0e8W^Yp^30rK|HW zFIN&!%4y|N{zz~Z9*kEaSAm*Vjpf+7j_camVhz>G-u1bN2r3A!zs3BTQ$KWX7}jOK z1V5yLT81hwO4q!dfpnvbvc__&T=p78QLAXp2+a~38kSPZzVEHI5CRb{%hFoYZm<@l zz@7`Jtc=rWw)J*;d_PT-Hq=^etpQ*NVO!(lp)I?KEf6|~O=K4=;)vI1tyW4ksq4B5 zDd$}9eSNMn=t{+FD#DkNmG-vKU|lNPnwHa{OUoAgIHe5AvT9t5g6527DNU*%RAz@K z0f;4c&Rd{Xv$p!aO=yk?&1)!eAtFJeR9Lsvb()!Tjtp`*9Jm29)LLVmy*8Q@Gyp&$ zTLfcmwkwKBizXpvPBld+OmfnPCGI-7XNp6S)^PzMHUbM0R@DBf{KL&7ywA4F(wRQ7={k*mNr2?Kc4%(AG-d> zKmB=Iwtw@_{#r6D)B4ZQ)!ymu@{ii<%Z4bfMEv~mI*wA%djOqLJ?nq;+ zp0q==d_$uim`8yePo;Pj6m7zVv->IwcXLf-@%7 z#d}vXP}Sq1+sazGh|(y!Grn$nAKc6J#W`oKC8AQABEt=Ht_Fug=a%&s zQcS6|T$~9(jVZF$)>tTYD+T+mgI2GX=QnT9sc~bqXAzwYVK5d75bDIaYA&rxu9^O z*cCNl0@Yk=1Xzs%WqEI>!QV+&j<{byT$H0XFyQspGzk%r0-aMb#&D@bfd5qWgWyS- z!YymD*7|~>VK!BZwW2MFQcVMU>>Y-P9>G&(} zY^U?MWyHp(u3PW7u6MWTB^-T$Vt}*?>(s{n?Y?E}bm0}LQuU!zjbkZADuEbhJRm=( z&%YYq*Mdpgd0XH3zNHO=o)_iaU`(nd5544sS{_~h!|S!TzA1(Cyq->{d7cSItKR1N z=L)OyB$W3p9s9q?NrAcqJWW$Ry?q<&yr&IOS%W3tLhwued5`x)_iKW8vE=*qC*S8T zKQ8O_csy?r#aPn0X+~M*3PN}VN z;#_h*lRH+LuJfnU_^42Fc@+Sm&ijX)z=1|ih)HNKdkOyi^?tKsA=OG&Ewi(=Gw0Ss zX`52A-fjC-YwCIn(V?Z={r>jt`?RNZUWRipUaw0O_6j?oUD-9}t2IMODzI)UEEPyq z;>l@A6;Y3cW3LYTbQ^{`Eqm8}=gdi|vc%{-0VjbXjk%E~U$Wq^Z3~axw_CiBex`P) zYd4O~+b~Vb>2$bUKlKOm`TF|w?wgeGtf=#EFY^`57GpH(JoE>~X?cBp^Y%1v^R|}5 z@UCUymTK9O74JHQ?ta@sI2EX_Q!lrl&vuE08hY~mx~_z^*D6?Z-R_+o6!~@ChdziD z5Lb(4m=)5oe|UX<`S4*}*4JFD!4Jl7)4XCiu+hII!Ya!)oW#r~;x5il;v8LZQ1pufbw{th# z_w{D5v)(u1*ZWQP1tMwR-IuJ5E!-YX$91{1T9%YT2(>boYOF#85zz`!hkN`?_C)NR z+Ln7hp1-^9zYro3Axh4<>$(yPA|j|>?Q@Zg+B;*3OUjvvRT#JdKq*NOx~>C&6iaKZ zswF%=eSW?l_0U>Fk2UWntvx;#C|kNYuWq-S>yL>Qx*qkuP4hAIoEniX0-}`mfwy_D zx$8PEm1EQj7@F(ctv(>9!HDDqQDUjuhRhfUlv1T6RFp~{hvV%w_2bc%oLbF_!6+7i z+MH0({&|0`fRz*}M?^~40vLp?N~{^QT2t(YkoMSX+j3fkyKdO#O>0UV?a*DOwE{(x zis-fR#wkHA1T0#bU0?$Q03alVnn4;M0232K1BIY~KrF!7g@=CJ*sgg)uMQ8#l+tM! z|M>eq{`0^6XaDA({*yodl$arr$nAh#zKR)EttafGH2Wu;&Tp}VIkH;8eEeN9e{#fuiuc=o@dEGhVa!h>) zaoYg^|EUf}(1S9hzL;VFm;^}tpmlF-X?ZKjkUiS21LYLvm-pANp2PXDwR@ZuROijURclYJ73V>lVYw^7+u*+vknIvIaP z)zGcDnd9MC>$={Smp5mccLT+!Z zPVv*b@Ew$}mt8u1onF2=AJXm9!MzntsUnlgauK*4`^VRh*$%z7KIhDU3Q1Z~NXSwo z1LoTH)5EzWiW{~{A@thF9%n@EJDazaK#>rEzI6->IH{GH-MHq@A07hSZB`elN0iYy z^M#u2x^AB5M{`Ott>tE%8;3XRQb4Jt#FKlc%^KrHLojN%FLQ3&IQA#hYhuxUiW?f1 zvKbOfc-!AO560zOZ`(c*X-z#h6`<_Np*gZahLzj)csSo*76_ZwBVbMW0#%Ov*ZZEe zs%yMIoZpowLpVuekp`S!FQ5B&AAs<3xflz_gS+28_n|APl%@`SH^1E9p5NyJP1ua) zx_s{2KuT%nQc6RAsMzKbuW|WcyW4FFp=&K`k9*p3E!OuS3+Yg+u1p{bfTR_q>dhgg z6|jl4Qi{{T>rhJjT>iv-J{;ap@pI@YPDPsZ9fImh-hmTRBO(TkWPq$MEv3CmRYvF1 zhT|#amP*U2vMNW5`8vDd++rJynR#D8tQsWu*sM0xWUlf`He37kyiF|88j)IhW%HKC zaGWa|W0X?sIxD3BSZiHsBCSa4X09x|M(B{^X>oj|NOuFyZ`WSkB{G#y8PY0|4(n-KO4IFzWn*8@8{FQ zBcVAR$1udp>;A8Q{V(30?d{Y2haY}c59*%$>%IK?FUI@&(?9(2r_1g7-~6lp=H-XK z|L*IjeSiFikN@FzU3nz|yHcx?BsE2PsysS#O7PbEdz`#=C({v27Eq{_fCUQLdTjt8 zx5~`k_|{sj)fsyTeOfn%YFk$Oa6~Xzsg`{l^tMmjz}ewew%`Ja4%(wMuEjW=@|FNp zWrOO_51%ie-@ku5&v&QPyeC5EY$!DfMy&xF_1)pMuKB(Nw8MCeQd>)9ncw$gQU=wT z0dKcIc@G*w1H%eh-JCIH+Yg6BDK#}Nh)wX=xn)_b(h6zcci#mv`f|DG!EP<1(ZIr~ zj$tUNDs86{2lNQ;UbmrBuug;k&JY<EA zj#2&BnV?gKNAIJ~C+SFZ;f0ItE|YSqYlZ9i&yOYAvY&E_)pYDUHEr8I2oC*KdwnV8eIMXhz;Dbgt} zoi>xzD(-!%YB<65^2axa(^fWTzk9jtPhb1zm!FUQ*HBGs$=gXQzo#mU003O9L8EY~ zU#PeQVy0U>oz7aTb(>KgOUfaHw6D$rx3%wS+o$v}*e$3uSi9}?Q_IH_{ElMI)>QeV z>}pL|=Hh)<3zkIZ12$v^EU_Jj$C9F`eTpxK{+!ZhB}cD*%d%tmk+CheelR8$(0)w0 zpr)E5*G8aNJ-fguZz!ErZ|12yK6RJ-pPjKz4J|9uDsifYi^YO+y1fmd0aUI1w#?ea zmdokzz8JeLD@*29ODVl~wbtYDxHg#Pl}LMU@3$9cj5G#DbKEq966M&3Wxb8Zr)4p% zja`3IC zxb2)vtHb#yjJN6joqe0iwv?S!VO^bbdDDi@R#k!0$`(3ZTV|j=W+Cd7$vG2Y2=0s9 zs21_wU-r*q?y4WQwmw?8m1T9IOFqd?fSN#SjUu@wma43>!^8CQ+7S`Au{&MYt&&9> z3~vGzsR+~%+&t~f82q@!;=SKfw?!04Z(_XZ96`Rzi zk&s0MwX+#2GzMfM1mG&QNd=Uk4qvd~Qc6D#RhXG;tvwxyK%h2G4fu=YKx@rRN-1sv zq>6wbtZCa)d<ZVY`=fhlb9?Qozue&2SiBUSA7910kgV zpta^ZmvjEBzxu1L>+bjaFP}ff7`e5F)6pm!V=gHJv-dU(!CL#AX3Y?kJ^TKAIDGT| z&CBbjpMU=TU;pp_>tDV7+fE;TdHKWn?eHJ}(;uW%&M$6!^FRF$zwZy_{fEb3y4&Zi z*Kfc6TK%)X`OE+I|M&mt-~48I_BL%!HmOYy@Z;-z{Vd=8>UchO|M2%8|MkE5Cx86o zA7k`+nasQ6kDp!!7=HNWK08>`ANRkP@srNnDf3N@$oob*7+3r zeOaNg>q3m1)e5-HXotR)GLQB|`CL{!^pwl%M2!KQ4LMgMVh~D=h{8Oe6C}N@rE`F4 z--`$z990p?IU=#vx<8Ebwyx`XK0eO($r#i7kW#9`#+cG-u70o4IILwqb-v`S8^V_2 z-kjl7N@71i2@m=xvL>~1tEmcE8{>8y`m}A@1{MW?L}}Sp=T6w9$fB*OQHfClw-xub zv?qNAYggK6oA58Jw?Gb*5*86eREP@U$bRF*gkGColou_iBDWyYc_*cMV}o@H@P5A^ z4hJgAn!zf|wQ);`VwEQ}>|VGjVOuh4G}?e*^j<4XC4Z4n*6E&3td#2+G&hXf8VrFnG`5N&%(=`8PYS!e{A3+!I>Z=BZ`j@|98s<7 z{)vdJ)1(uxCn6LQfx;LW_9lX0$MKw!>}@V`L9o602CCY(&pssMzm0{IozwO_xYKRg zPETXIZ6c})34-;q%$H;T(6ZT!cFqWabp!x;j|#O?=9>The12T!gyNBnv8uG$>PFtq zQ?5yUc=MZe-GNi(`FQGXm+bT;u?yWdH~xrz(}G&0qtWQ6`+a_VW9Q}a|EKFuf3?lB z^FC~?wa0sUhI7t)-ubP$x~rggatyeCGIi9u6Iw#~prIeVtZq^|dOj{%;wcYQtwRUv( zjFk%O9P{i#Ca4!nA6+al=OhGb>OvWcHP55a;!oRD zw3Jc`;1naLY?KZmND`x+iAWYOtc5ZmIw(O_(nUzo3js=F$}#u|BuHwrN4u;i4t<<6 zLt8CGlGAS1UCaqeCL+~!-EMcDd2ZWka+wj3h?I1}BjQC!nzUvZ2894FcG5IWvzrEw z#j;{a`Col}N+}T;w4?|*1?DUjB67$saUp^(E`<;|=h?fi>p}=jsFW%wbIyo4mr$@U zGZEyRb=}37f={z|s3j5=!6C)jd#M!&=}Pj5AIG&agM+M0tJW?lK=t0$CoZY zJh^9-NhuZ1%hmF1yF0#oiP(MUg^%32b!)rXf8{G*`QWq9eCbPnbnV(PWt39hy?a+F zb+|q_I#}Jmd$+0W`Psv+F)w`hW7C+H>(!g@y>Wc~xLsC8W+?OJ)wAPZ677e1y8YG( zmwukyoqKl=u7IENs%~yxw)?xgpZw8JfB(+uG>xlEhxZ=7DI|C5#^!GSfhU&VeB~_e z;un7S`B%RG_N@=xK$E`yrMK3t{@~*B6t`#NeZ5-Uc`)34uwSm${XE_GV_TW4%T>I8 zj=-^23AqAMXUREhpo)|c5(*=t5JCc`l(di+=c+~BOv6}Pt0QY|nK_kGASUIbT-EyBhHzvvY6;svxX#URG(y zMpIo6+g%4z17w-|^JQ&be;26ax2~?Rv5F!=#i-h=UJmVQG{xQ?kX)4-P7Uj9ZshRh59I5SylP^H5to z`v^iPRoUn^ac0pivg-4mn@SF%vhuz=SxDK+`t~rirdca|Z@h;$g*J#4pkM$fM5O;! zR3!Oin1s>^7*VvSYF3dL3LBC+g>|<`K9n4-wL(YY8nZ!8lAM%4Y`NGnb!}@4Oj4&1 z`>K}H+zTN}Y^9W&Gk>tU>f*VvlM+im9N5nLp4VOD;saFDxk`W(!j*iYbjnF%SsP&0q0_9J6Xd2G9y6C$_X zD`cDHJw$eMZkq!i&l8_3X>(|)m;za*AlhL{8rqwrCYsN{Sb1eU^Wd$ix?s%aC1Mog z`LR6b5_VQ4r^ebIPh&(ZA>Eu8pbIuiMjtLn;3bHbD?o;GNz25JKx~@seltoMo z_*x08x?1k02a9gKJIAVOm^OPiXl;$DN-QNLNL~vd%!LJGnG#j?vCk>fUTB;~-^qqK z`!H6O_1-I`Ldsg(RHD{8rPTMmlA^Ay_de%1q2h|<=vr<9)v5r6Ei4&f;angRgffJX zF$o13GP$=^U(CKnv&p5?2RaI585ZD4MUmkkzmDaklF8ESPKaOqN zE(R=>*o3|8tWqlb*pQxfqwT7gQ%+2XQp&>YFeL$XmLWLgqP0e37enSkQm2S@Z9@o3 ziG+1#=H?H+_ueDdZs{_u>g{Jf@*^+a4wt(OOzr~MU!h=dK)$yBeo%yC&Tv`qfPeLl+ zc=NSK4v#MDHqPNJ&F8-Fq>(m0DwTk9GF=OK}1-%scOi@8YBJ4 z*-}weZRWTW0#lZV)l%>rd;)aYOG(x!%uygml$eV~2?ZIUswy{)wKhuK===AZws;rA zM0M3B97y%-sw@z+P9bSP%n1}`1S*`MDu6<{6ucJg?42~Z!sQ%hkeI2Yl%O!D+*Wn( zy;2Ht2_X_nYipquBE~%DoH4U1LZkArkgd1Qk@Y4QW%VvbE~1K9S+$M*QklU8kcycJ zQqBpKa2Iq)zcb6WpSUU%aSnnyS|vVT*n`n!AQt3&ocq}yEiU6F6b(s>C`$qWMM9Gh zC{X}}WI>QnfioBEgl<3z+j{3}V@r;e)KrQjp@cxWgJdabiCL+rh`7j-jkZ3e5t7Y3PQEmN{Z6#dT+TD=cC<Dh3w#+$aB_{?QW)090T4lxThH>l{EeIWalKmK)u4ti&bxNmuXSG$v z7*1KE>idx-nx@Xfx?b*bWXqXqqBO@6XF>VF&katFI+4!qUT&dtK9MvM$o-^Lk0n?+Pa$ zS!uSFh+b#k1?@{L1{DD_WFWjqcV2YPC-0o=x=X{9>)Ot7L(+Aj4KPi4RJxu=ytF!Y z{aIJzz|pFs95Lr4q?u-{+bZKECZD2^YN=J@;=a)77~XHX4D&4?CL@J0RmnIGzFSn& z+(Tils+hpG*11WeqM}m3rlpt@BbAg&%578c$Gz>U7;{d5OhSlULfducyB(nh9)&C^ zu~Dm-5g^SzNKx(SOiEqZcIqch7^bPQLLdyN5GnYfZ6VFU^14=S@MADzVPs|yGAAen z5QI{iqlhIlV=hHkx^VJw(psf*a)1X@v{{k}W2wt|_6^qtDCa^TxL_(-Xn_Kpn?Y(q zmXM%CrLpgK^|Fg4Gne4AAR9tKM5WMHVw|=q*UU^rIp>g5)3q03=H?)|9JH+)N0+0l ztC_j#7U0H^GZ&P9S_429oUk;cLI}v9wLugiaw;f=#ykL&5PeryAzUN`gL8pwi6OOZ zJ55gMdU8?M-8lJ3j+sem7XwQg!jv%2F@SBy9M00b%Q49d%sUv8*4D$cuT16r+%CF# zo?T2`y9{~%nNNTE^_O3M>WRmEu`>l7aZKP7AKvkLTb{mMT zwU@6RfB*ZhKl%70+tZUL9(nZ6?f3N|FPr0O!;9+DrS}Jbs6~Ts+;_9cRS1* zy-2Iw_O1OmK;681dg8|ZMs?tGm@1k^r%7n>rw%O4MGL957LWzt1+UCVm1DBT#276Y zK~Yia>Vq^Pvh-nKa0m54;sR0t;eOa5K^%+AWrr&-rA%%H9DIR(xLN}{M_ z9erSvN(NvSBoW0lO(ABbl&%iEpBpRLI|*E{1S%dfWN4*AZK)GKXry{bARIF(rdoCRsq#>+H7c>S#Ckgt;{+4V({Sg$*u%YAI^~S&9Ib zLMV__xFY2^Wl0v4iL97`FhW}BLrz&rvP#F%1TCuG*`RE_>Q!R|FD?;vyr~08$CcY2VaU%2S>@ zV+F_u05Av7Dyp!uS}3LVdzm2`6BMqCP(no)-sMnNRXzI$oZH2E)$a#Rv$f!(lcr&= zH7qDqrM}zmq7X_O%n2%U6x?X41U#bjAv=PqADt2e#aMm7nKr|@v<8GOoS<-Hq;s35 zyRsWL^E?uW!^I&p`x0E5O}^Bva*QQ7L@@)F=t*gu=AG8Zrl|tlo5L=-#;Plos*`;% z4^v55fZ$>Qx&$z~{>J+9c6cx4sPzF66+U5}Axmb%c%bVB#QE<0j+N_6hmV}?worPl zOfE?)^e$F*kz)Y>yF4$+%p)PxM3iz5asnBndn&}fZaKu6NX}DQEsy$skY>vNYAvFi!}JlLy6C8s&q!iB{P-g@P4R<>q9q zCc$$Ia|)}P&Nf5SbOqu(``R`DDaF05uO-i%Gjb^$wN*YBsZHY#g%HEI(RE_%x4YBD z>Zsobw-gde4Y;lr+lTwcHZD3+*wzOj?4$F<)>L|mXCMlL6+#fI;6V!GV-f-Zvy{YX zIZfwPOAucd4$Yc`hUZ7h6@^ekZlC*2eutQmZQI@+Pxz zbJcWR5j(#-MI@i8)7~iry-z{ zyBI-I%H_gE)h!nNFw|W;?e>T3?&_tZVY{=sa1Gv^dy$ZgvyEr@6s_EKP7*AMUA>(-Z| zPi@sGrCbQX&6f|4_M3Bqv1A|;D3r@F5s|5UNviBRq4VrXtAQhFrBl(NWM0m&7g{ki z*%hUH<_SsX;*{1tC166*!Ijo_A*3uOX3i-AU$_Vr3Q|hB$T24ppb)2h-*wCLVNcc? z%ndaAy`PjLJ#~`ZnAvtU$^y!VT-U7|_SWjTL>3ZUbfPkGu=l5(MoC%Wl54qp%6?E^Fo>cr3k5kY${d|5W5sC8?CiKl1L?{ zrL9XSMk)uw2r>c{2q+4eKxC9i6D?QuZgan>^)&RW?kEQoRAur+(i-T;hwJ5mn<)8Y zD^aM)ajc}y8KpJ*X zmI|2ntLiBzRm@w!F6WqWPlX#>?fNY%L@B6bN~Joe#%)d55X~X&w5sMQS}h?7CNHFx zYMsj*k zD?ZBzH;?|kw5H}W{i$!idcQpELO$7#KiHm!ql2ZY zc7O2u-)ic`2aXSGotItW+HO+);`hJy!=L(C6K{mgTrJLC`8FIJ3KHj651&6hc~y$0 zedH-}XQty<-hcCxAN!m*1rca)(4;a*zJDz_3yoODD7(Ioh(E| z448`|VU?!LF~-cfR@Mj!G3T5lP}g;|1W7Qm;Q8I*ph>So45-Cnj zspVMCsnRJnlt~E3IE9dw%Wl8lmzb2$b=}M^WEMG>470WN;&gZc{l3UkkrrL>x^Ci} z8q>u9O4mLlt@kp|E{wTuxwI{`DZ5-{Oj9mW%g)cfuIxDWq-91D*n*nfuxs^_5lR{r z2=7H{rSY?uoP=EFuJ?WvZY=--r6OnvH55gtNQjIaik8Y0G$@%lVlkRJAyqEUxu$89 zQscH?wF|*SvDDTUt`H{4kyuJ-2ra6JGAI-X0t#x>wXyr%wz8VIOr=p$Ow+*GU%P(E zyTMHe*`lbG@Y#n&dpyK3;TcjcL0em=V34-iRh7{$Kj^xf!}L&DmcUvRH-V&^fJ*7f zyKN!aw)JLnuhWl9Y8>BVW{}f7*@ab=ggLn}`nEgtL#9YsDxqc{=C-~ZT~1|CCZ(8b zxCye?Ry!{NN{lB;3&G3Hcu#kn0rq3GR!5&S;i8Lae>uU7nxhLww#w)L^PSW8k5|`H z_JXvn`Qgc(b@w!uws&u>yJd_j6dHW5Ev1kp5kgA7uC1zVCYt6cIj0!`bQmFNQCU?0 z_HK?TFN@Rx_N@Su?NN8Qolg7Wn5weRR~H9P0QrYU?7sY zSQHE9D&_}ORr`=i7LyC6R-}RpJ5O10l_pu!Qj!GVScbYWGN?JQu5lQKqt$gEPdscv zbmg2(M~S14K}x9=N<_>hh-*Tse%N*mao9jggoe~2W)?!AaQ$?rsnDwBxbbb>O4L3; z$O1@*G#e}ZJSnA=s!9n`WWbPfK$O5bMQ%D@ihj5qbSni0g`jdHQZvX+r516Xgz(ek zjxRm+@T?bx7g7=lE&>ZXKN(95&REe8Hs*a(3q`7AHO{_jMaD5Wp$N0fRAjq2ym#{O z=;qPAv)h;Kt-?IaLv3ZF<>usmx2{Q|V-J}mnVfS7p>5k~4qDZMY)q#l!1Y+y^=^D< zs+w>;!>+3AFm9!kVGfmU{1lt2b4j8)6Cwtu1UA~>FpXL+iCijqn4GOE?`LaN2(wWl zb@kc9bE9gDrcw(0EQk>jSQBUgFE53N~HHYJfnN*n;AvC{2x5nx(- zzB>m&Qc3|xh~wa`)gg|~4~K_qA6!nQZQJb_y0#6o6M)GFt+m!Vgpk9|s+Q0M(i4=7 zMGG*@!lfD52^oO_pfi>r^}8*aB|`=d00jvcAu}*zN^;TF=bML3rIDeOB2W;@S*8mn ziEtpxhw?|$R$+b{mvpZl4!dGoy&ANcX%XFvSx`P;k49>3OJFDGue2G#F> z<+U46U;o~#Z+_s>50-LTwavkmENTR z{@{y$RFP&Y?xz7}Uztjgh#^4ERvAN*nVFdwtkMPuQt6sz97iHjprm5LToSjM4b$UT>#BR);t$w2~{vsdu*(E+pWxsr%g?5m69GC6p{BrIapA zPAR37BLg6!#3mE(-C!%tAs_$|&Cacsi#!BdRg=uI%y~Z;Qw@c&jLNhnD=jqU%*-wY zX{%U-oVTnIL|xBdJGq;~8V*nJJz1}0vE#lMt&j+k)ery?5ts=;BQ!!aC@ho6IU!3) z0vNa`B*{{NvC0&frfF)b1`7*7BTe*Su~>B8F%qLdL;)xW6oNpK-tmS$i-|L(DrO*Hp&Op$1Mu1yYjHV9arvaV}ak@$xip z501?+4^lMqcy5fb;`&TQLRVsvst1~8morP|5SkfO#WZe(WFm5N z(y9@pgaqe@iIdPfO-<-cWfd2fTtO(UUCntn@y0_bX>HZ9mNUTJZGuCpIOjcpv=#Qz z>S`B5Ef<@32P&t5N2{^!sN{=tE?uxb@cFm$b88%rUXbL%vldHPbZXT!TQAr@oi6X_*FpdDCvfd?AH95va z{Hdou@W%UZNClt%;B)WXzlT+G%d8gH55M-cmwxU?fA0EPWqkWre)nH~_OoAjq= z_o<&t;nx5AAOFMWe`xioANjZd|Xb^&?kr`Eu{}ZMEy)dFtks-D&gn@BZ$kOYSfJ^uPNz z|AYV4^~;wZy>_*0tGC~N{YO6g+4tUfjp1B!Za2B#~;PL@F8L4Bi^0r$VG;xAfk# z3l4Rq$PN4DViD(r!laT-rJe7E5K__{JP?))E~bO^VVV1!SlZh87;=(Q<{Ycqj`1NB zl#Ch;04BgfNK*N6)TT*5ZW@+toudyWXD-@yE;uQvgc))%x`~A|VkIu70rLe^g9{QV zOz-Fo~iRNo7h3(JIY} z1z;t0PRVK$ikBocDupD03=km`V6R(|QmuqyhRCFApRll4Nr%XUn1e4!gi`4WrJ_KE z&q_iC^`dL~eORw9o%e4GbpWLRIg$a;r-+gll|CMQ4@@L6msr`QpK?{xFx&&8l)biw zi&SDEP+85+ZBbKlCB{fZ(#Qgc#Yj<6nnI3JE<@~<9kn>dVy5^8fR$L6QmiIHl?yq- zdB*#ydTNfL?{~GeDa3=rOZ#a)>-!}QO?A{Xmr@EST!<579UMvBLwwYQ$>aOls#sjJ zFq}G^nDiK-%wDHLZJUu{8nA}71}yAmF((rR#SrI0Ns0>mf^sLN^vRPHLXI)rFTnsI z2!mvT2dxCa6vI}+6_h)lywS%3sxY5govku*PM$DGv3HXYYGLcN?^mY22UJxY!jN+= zS=4qNb1IORdNmG787gw`{Vd2>llRj*SDPQ2(;O)x14T?BM=p`FR0<>OTq=Xc!hYIq z+}Iu*CQsJXd5)ddkzLNQl%ng7gx;K;FPF>xW~;Sc9vt3(aH6$6k_|$=?a$1@yF9UK zg^`{&HDAsNL&(cz+ixFQsj|mLuf)Q;>8?(VQ3qbnISa{_;jAvrZl1x~R9GnmC4L?Cda`ViLCaU+_oJmKl`)VJX+uk z<<7ZD;EHl5s4k`GrLLkI#!aj>7h$8w5H~ztNk=Rw0XudchlDAXm$StuT7)uq*iP{^fkRivUB{6eB5JG^V=c!m^ni5UO|N|AHaELc!TMJSn4l3Mg}3Xo>+gfg*kDC|m#z{03;p0H}q=B=^1 zPa!5Sy7|;6KmO{=-@X3q6NlaM55MuXo0l*D%x6FUt+&2cBz@-kQ-A!OFMs}{pFKOh z_wgV4!k2#iOE=aLYH@cu{%b$~Z+-1gzVz*vzwzSq_EJ!GsLRLVQlwgQlT3PAJ-glcEM;mgZThKZb=A5liq-01@AXQQ; z#yfFSF$0!>STeS3jg}Ken&YCa+}yWy9bJ*ij=^QfwWD7HA;iQ2 zfJjLq@_sBSnWnwq@DN-~ktakXA%cs_HX*UqQa}MH<2 zv#*!>kjFtO6?w{HjyVEqiB`)h#^^&|w>1};69GHK(F!G?z0bo!*cIB+du!{V#0Rby z3oByLiennJ);S@ei7^VTlADcgLT~`VDlJ*YFdE}C9gkrY3Y1b9P)kjc20uznRnz)V zcGF<1>cY=eS4;8_dMaI33vJI!6fDaCv_4R{%{eQ=u^+0cA)$njE~I<&$$BM3orCM|?9mGog>L8j_z8yE8~pJHlT>cE})vBMGIh;hN3fHsLI zWmmJSy3~X?)~yJ!2TaD+-e)eLYus$<-0l`^)j z=kbBSwq%Ji6&R$-F+dE!)R8JFj;p4dhB*NOrlvCect^>GQK!g=%F2qc&H-7RMJ_@J z%%y3X{WO*W3dq4oD4D@~2UviV5OvHDN~$Y4?fYeQXoTDhC-$&A9cC?JWtto^mh8Q^ zRhLT6kgZ0HS!5<5J#i?k7%P%f*kRL7F~->~h0#L$qJp#{o*StEU>>Ksu3emk5JCvY zGRA0(RZ2PMGMA>QhhgYySyg&)`w&@*3du6aaq5{OGFWYxb15a2n2>!8jc!v)Iq?OR zx|G5>0{|)94I8Nmr9e{qew0Q^g>BF?mmpw_UMT5O%v7d0bHM`0LIz+~VmtPzWXy3C zGNlk1q*f{9FZ{^o!@TV(J+*i=s&Oj5Yd`t?3*Y+E?|<&|pX~kS>tPdEzsd+r^8o z{I?_QZ(RFJH9lg*>yJG1p>KZU+vl6zgVU2?44?mzAFrD9{{5GthSy#`y?;7?=Ho9s z`oWuj^gCZ%tX8+*eB;MH`4ijS>73@G@yGt`pZ(wcAO4RY{pbtVj*oA@d*}GpW3Wwc ze)r|4UU;k$VK}?XwQz66FTQ(n`9m+<{oc2qS~r(3A0FPQuHMwjKRfTzM?d_@eaPQ@ z?G&F%<-hKT<){kAi49?%Uas7PUx83ULTOT-i?;gT5@4M?aZ=UyWosH*r?nPGmjoaV3 zdhq1-!L~T()3~4b?%ALf)|ERS%7LzDPPf$JBu(oqcMwJ9g%nCDftX{V%#wseSxTw2 zNhy_5y1EH2Xd$^2A}S4HA39O>p|2ZN3AJ}_lT!m0p(sKkN?Z_>MX5=0st@oBS2vU&rld*$#jt=$VCzWLJ~xX0wCt3h`1CDWCVH0 zMo{5wXp4*hGBISNjKBg5VG4qM?o3q`Wm%8KxjYP*ZK**}F3gApKof9^Ir{1lP9E0E zl+)dvuDO|u53pq~I{N`7A;2c&9cQ6rYw>_e?cE)z!N^96!*TGb?2&lU-r_<^EQJuU zj4XEryrKrlLi1h`F1Wem#1q5}7HeHg43}c&n8&8;I;!XRK*}Mzjz!L~88=>-33Vbx zQ;R%!Hp=K-gYFPj?)-%XeVt}}-yU63N0 zhUMTVG9ZKmiRdPllC0IuR7S=EAef@SZ615+iU19sAQx3~Y_zGeI0w837s!qbf^Eu` z(v~4J1;!C1BUXSq^H|EPM9iQBs$<69@6S8vyTHYZQfe!zR7ObNCn;r4Y;AQxeutbQ zZ7NeZdjiRzQPw30A!O0PNCYT=u@EFK9CHriLHo#VGbM$}Hi<)agJ7sofnIRQZDoTS ztq}mIbTe|^oTp2dj`q7v+gwHz=VzPCSC6CLKW84DH&IzNp6^Aiy*N0_5zTN^a&Q*& zQbazyw7hqEN^rRBK9z@;!`RJo5{tcxaTo(r*Hq8$!&}DK5@Rb3ghO;qBRVSHsW(w5 z=PN5$6r(mru^#*P04YN=2Hf-=9_0 zVdk27O-1$7y`~k&YM*BYiY`{x?$ewzhhc)|Fff+DT!It~fH{Q}GU=Q(#u9WaDH(?) zaaoZ`xdA4nku(UEVsu4jL|Ei{o~Pz${cy8w>xLSee5e7m5HyR5b!|DKE~SYOikcG$ z5DZu%#lip}3aG$l(7Tp%1C&T6;K)TvT4*tm2>?U@jhO{!R9RxqSrk#%^*qlhMPp3N zX&8oru-`Ce>c`S8>kNEydb{nqLJ5mv0Dy8qKk(5BtAR);m6;JZrIevu1T^KGbI4K| zQaC%$0!&+jfPUO5A!m-B#J&V7WsVLd`W%I+eMpH@mQ)COC^>o}m{X8SK+GxRgwaqJ@(q!?FYAS z-#_U;{N$$(Zmh39dE+1aqksJG{RjWEKm6)fS|k4YpZ~?b^B@1GANw~x@$}W3X9q*f zH>68r!O}kc^z|qH+1vls&-}y--}?INKk~_+*iHL8XK!xy55PbA^mDCxT-U;JDD-n>6w){9%$uD<^MYuRGO4<9~!4Gx>TeJu0( z8NRYSxc;r-!E;YOdbT-VAFp1&bFTvY!Y6+0t=sn>#@wjmNxdzWhuYoT+}<6uQ(d)L ze_VFZK-%0L*X^yl=WjL#(qNIUMKztfaoMy}3f|35RA^-k(Icds0RT%OM1`0r0Rckh zl7Ts;1ehb2!bPH#LlT+1DLr9OkaJW>OHf!@30$%-kO>I^xdJ7lou!%E~R8HQpzw*4q~kRD=iuh1t+dqh=!@VY zkg+fW5C{gyAdHw}Bq1g@St)||+E%HsED;gC_d?=4`=-$hECj`bP)aO0bH>7)0GV>m zYuoOp9t8-AesV@>DQUOgA_84V@C5}L01%M?4I?2jWCGSwN&sY(BwQ)P0GunSH4zpz zBngV;Ab~0n289Gf5I|UnuIOf%=d4U*5I`2GL8^q1+$kjr2QJZjZ=@0^rBwa6J3f4R zyE|=`au}wV1|iF`eI$)%&N*YMre1I^Aq0j5n5w4khf^i%9NH^auIz_r3rC`(1x7RKWd6bqluI?+1;FQfjO!C2h0F859~|)&@iJjDi^!#tvhaIwu}Wi5?yp zW7s`H7IGY{t|?ih;?k)y6~d}dJ`02tyUZxcz4=s7=~8LsV@{kw6QeJ!sUXG4lMk&? z5g-YZV{yvp7=W{lfk>;E9Eqfjz)XIO0@LcCx_kaEBv^GzQ3!>aQz!_rWDPA50l+v< zb#0M>h(btGS?7DLrM2zJ*?C=8+Q2xDwOlDpTv9He$PfU>Ac!~dq*h&nHq1eSG)*@L zoB~UwbV*Ufx$HUxNffLaznN515pwXZt!ybJXGn!oEVhL(7cJy8owrpDEPa@zV$YYC zoh)(Bfi*RisYB`uj&rJOA?CCky|2}Ys&s&geV=4O;%OL|tu#l! zS#)}wcG^^-KqzQrqhLiPGoA)Nlw!51ZOyw~kvbx!6d73OTo#?$?H?R0o;lk+PzGI^ zgw#Y7;#}1#@PQlya?BcpWE)Zpd8A_CBIb`lWT6Q)0m_sy`Kcr2oC`*fP$shT5tlUt zN+}5;3VWfFcY{(&kZPnIW1dP*oN8mo?a*~y;kZk(#C;`<=9N+^@+K>YSz6pmw8(Xe zA{0hs5?N5(=2!t*B#9z&iK#H5kt*a8V-!*+7CLL^3nYun?LuH|I>f)_y6u= zA5`t~6K}ow?&*VvSFc=p{`sfA@zt+YdM&Vd>$Uw8PY6ERnOXkh-}vS;m!CO)`tl>! zyW>Z1{_=nIPk;6cKhtl{fB9E`>3{LR{a^q3Z+{BTh< zhxOgv{hG^X9)I%NFMriOad5qS?EdEd!Eu-T@bM3R==tY9^3`vDcYXDm%)|A|$Gh{* z+u#3wS6j~ByWEfI=9Aany?5`u_cup}H(z@3Yd0P_c;oh+lXI?Gx!G=?yZ-3ihYyZa zohmi$do)Vsl5@twl7v7;21Jx332viEkm?4_t; zCN4Pz<|33L)T&4YS_7TMy)a-<08kVnx!J_rRR_5Q@BMPQLQumz#T>Mjf{=)ub3zC} zWXMXXVHlJ}AmQdDr51oWCao~WC=>x!I?(+}q)SW6vzA(GZwfU+M` zWlUSGb{kh$+NHe|TB~ky52VfoLO(jKmYlUDDkUD)hx=4^uBZY_Ewo4uoD+G4wgv5R*(MxS}$oGV762Tl3k+7&0bB<96iYSO?Er}BdY<*;* za}gs@<3`K?{bp=UyXv%$Cv(~&Ntd9lE{AQu$=NRLWeI~HdQJzb&fXWS zhB>d+3xp|zS?F$#i1MC73qfnqauFeUQ>)p}%-l30`KW0nkW=YnKt+dRI_Xx6?e<)2 z2>@2he!tOLW*mvuvzLq@G^Z45vziCaFl$uEAT-R=rmbyCMcFpt42)Q?loE0%1(*$j zNxU&iUtIg=oCT40QPtMy*nYolAwPI$^5wI|G zDTSnrG3U&PShCPo#u8)UaSqj@^>H4iZ4g)#CBR&Ob0I_^W`MI45}+#?ks_B1Z|Tv| zL1CQ6-J>^e{p3&nE>f~Jg+ZZh8f4#zkBzcd*fgI%m1T?{X0UqH(tE^?9~qImEB628um7`j za5GKur=ELG#B%)T!Mz>)&KJM>m;TDn?9Mm!(ZSbW{{tSDw+^qq|GhUq^O?{5-~Rr8 z@wp%Q+`~KXedV{l^!Uf09^T({-Rj-bJGX8fT(+j)?wrx-_#}eS6=`1#Y!=+=IGu(`ID>br_UZpV=kYZ-v%A#Sl)c|&cVUa{rmUV zmoF_6-JADMFOGk3cDG6Bi7%L=7XnO(IY(k3qKb$ZjF232W2+c~yjU?=v~w=bP-To7 z=0=(Q>`%u}2 zhyXC>%*@6ZDdiY<%or0Wsg;z>jHSfj&_p2Qf{B3(Nt8k;WJuY8R3T<1iDS@O`%p57 zS}+R=B?}=^i~>+8<#X!D#u5@IA}GKq7G#oAQ)JRIWFMkaf2z4mM*XSc{%ifc@Ie3q z@$t_8g=G`LP#PA>Ci5!LY7oO zUrJ%l)@ZAYCR+lR#4+#d)&i_TWM3XG7Y8Q~{H4RgGkB!Y&5DJ9kPN-RY- zZr&S%Qp(_CRa@tPL}Fwgp)v<4A|fkUQn|}Ql`Km}t1O~$o_*le%Fa^+;jCGQu>v?R zysyRMRPA>|FLX8*_IK)OIwW00)+&k>;phS0hNtD zR2eGf6ufU#CuL&VF-nQGb1CNt7?}bWtg3EGTZ2~#Z9l&Yw6|vICRs8SVQgJZIVUbm zD1b{0lCY+sWa%SRjb(6LVsydihKp9lb4kTpZ5M^70t{Hs<2{8S2uT`{3IHmCPRR#z zAY)z0y`QyF5{Spr>Q8)_UnC zbbhd{8QrX_RY?XoYBTuoaHXC&PYDY)IkE_;mE#nusP{}pX)OS=Lv~7lKv^CV;es+Q}Nos0Li zmXZ#TiE-pvfMk#vAR#jsnL%eJQj(IR-~tq{tT0+}U^fO)3n2<{;*xVlWXJ`W5wrk6 zC{0!Gb~}c5^Ho0aIpFvK{+EdcJ@O3nAutz8JxjQubj)i zg`fEgU;Lf_&l4a0;6MGPfB2XG!Y|xDe`u>`Pw%|cEX<#L|E&)_^X#wv(*N^gpLzb~ z3m>liVxr;I@4OPv$7etK?91;yc zyAMk*C{1;>f9bn_vTkq68+ahD+uYqgd;h)R^sW1+`v;p(KJ(P8Z@hPW?aJG4zGdsG z*7p4T{CK(E@Ap|jPRuD2A|et46b68bL2C^_oJ(!2V1!g8(uBjJm|E0{&}f0PLy|kl z79(X)LIX=50})sO93!kR;WR-BBd3(0A|tE}6*YsCl0pbMvlIrIqyoU~e5|Tk$tuR2 zV!jCGS}ePvKVu-J3>RW%6396zf%`ZL6auki03aa*qhJKgfk{lWmy#r~lF~)CRf{Q* zR-1meSSovD1u_vuAB?h+Rjq0$Lf}^G^>#W1!je}?DJ4sA!YV54b8%XmN-gIpFz4l}p69bT zc8lg<9`9efw%%^0oY>ajf)i9Dn#e(#G)@}=QCG{&1Z!=G&d*QVww)#qx=IXEXHHQ7 zmxOh-ER3=E-tAe-t~!_}PavD7n%(N>!nTH8-qDHZ&_wN;+^+|I2-Riu*? zO74W*kK59yIG5T4!Le!&Pv@RQSy0PcInYT{UB=WZsSC`R`)S^4tr-l0MT*0G7f@Jr zC8Q$F3@$S>N}-HCKR>sPHAWA^zHSzPHnSS0-q?Z|t+J4*q>AC(2wCPzi#qcll#EWx zMu%yVB+%$FWM*y@nw)A?jr+cB8l`ZUw^1ZemW9ko_nZ#xV&0yKmDD_^-Buqg@^m>D z-_**_dnE|~N&#Rom0n6}>*c&pZM}?Xo}H_!<+(ajtW!UC#?I4TSurP9)iq9wzTI$~ zYqp>a^pc!NtCVBRlE&z@mKftgTMlU&=H<~riiHt{)Wd#UwB0<6)|j9+1>`u!wq52J zwa5~%I3AElWTBwn&y`&n*`(kFC2fTBVNO9=IZnIAbZ#uG_R>C{^2F=a;V$;3=DFW9 zs;ryF&qi4Q@G-Cip{tnWehhUTWMZY%m;*!RIasT+qBwYK>yR=-;_Q}-8d8ovk&-dD zbIwKdoo?7yR!N^uAud!Er_c+Mh=>s4oI88Ss*f2NwID-KVwwZHB8r+?s*rW@A?GA3 zQ!~Zgw5f7s8=FcVhM{R1?`LFa>w35#WaK0SA!5wAkZ>_*RnN0G*1E9Orc|}soDW*H zMN&%XBA6kBpp?!r0YDLeh~Cdii(G(6dKXBlUEYuq+M507QpN}@jqq_oR52AsiaBas zxsn{FeM}4h2+~w3coY(Ix^i^sg=e4r?Qeed-~U_xlSX54{`!@hzx{i^7r9)2^v30; z-NktC^?RTCfMfAG@-Hd+{WkZ>n2Q9>i(;$rnC2?EBs9 zpZL%-=i~Y9J8wV!^yiL@SzcZI;a9)-Q=j?t!_&>2(wKdj+xWJjpMKW`j`LOzy0rIguCY_ zBhL@cAKZQC^{MY~-#>lk>SO)A)7y9NR7b0X<+xd{P><;>l)O5+^z4&QlTSBq-Yk{C z){gya5Uc%}f9laE?!ABKV5|KZ(v zcWZU&thi?{-FUD$d-%?6t|S7KQaG{@LP<%CDJ7G@)Kv&U3ay21R29b=8LEZjA!*g@ z!rU0$0YQQ!g%BPRS=BPrI891ZXC)**CsS16=$FC*(-t^Op7y(CwI+c%XC<{j>zor* zgn%N)7&0?)!LC^$V&C^wRVfHDXC=)XVv0peN+}pEr$kChO$3FRxd4ZfV@{R8P?(XB z%X(3d!<<=|dNJ+JKuVvJ)>;<8!dh$RXGLCU1;jq5T!>H+X<+0`E_-d2k{IVnl8{=B z(}e$h&HdBz_oBuH$VeFqAOIj11|UKzB3(%$*VR<=;Aa3hWVwe_0JIRAM2x{0J9t+J zNC*`(0a#R4NmZCrmI&6E;-i+zO0%|2o_#H}L1~rdaqOyE0|*90^d%HR+csjeC8;ur zP#^;mkwgLj1*!lr0x8+|y)ot@siPMAY|J!f8?;uE;9@gulp?B-`db7RH=n6DZ=1>euPWir)qP7RaxLcM* zVi}9CG$t1{F%X#ga5{Zp7dl~xQ06IBWDaMJ0lfb&B9;CnO@t$%U$FosVt3Ofg$!DZLk%LFtTQnp|5q z>|>=(hNeJ9%>A%$n##}4ScM?QP!6kVoOTliQc?ziWVSgQ=#8orTF9kv6ap^<=zN-p zs4;a8c^c=#iFwp2dNF$`aaBP-R;YVSso z!sX+`wtkGY-FCYnqRBg3)dgf>6iQ(Uv!77QEO46rqF&*Yt0MP$V|0~#OsP~=!v(z$L{xRs zyUa+b6r}9JT-9v|NhVf8AeWqDCLo~%O3Wn@mGY-o%864dJ`)px%i`j{x!|j++7Cn1 zG^yk{xs(}|CZG}`0Rq7kXHl7yc}~IS1OQ4X35bPHWn_fC^P~t+Two?IqV9-5D)0eF z;1iEM{?yHzC%5mwasB6i{@?uLKmOg%|L{lSkiPy0uU$KOwBMe7;tS9G#&7=WPyO^S zy!qPM=fCjT@4xZo`}gllxO8b@?%zL&ikx((x8HbVdGyT}e_(Gun$9ML!<%>BrLI{o zu5RByJ>Tt*o_PF`GH?5!mvzxmRdzSTl;_ExUas}?Pdz((w;e6y`GrTH{POR={zHH6 z6R*DZl_zgqd+^S^kAL{{tD`I5d+X)(@#@C)M|bz`_ilLl!;ioF4t(#GhoAk-r`~z% zTbK@Cef{@rt?Ksby|az1#GK}I0YC~}VX6y>f*Z`9_<)Q?5pzOhDP@YWltMye;FyzA$`JzR zoU<09q)f=npp=v$V__|oK}DIVuoVg!7s@K71hW9FQR-3*Q)la4-jP&5q7>kQk}s@Y zMX)3#l~NaY{$=14l7)OJ6+%E3+ zry491r;?;pf(&3OIViz}m$SRy*hO^3)KSWI7$gXX#NN+RApme80FY9|G*^w?_IqXo zCMh+iB2YF}9U}VBD~mBkEt`@vLKYYsyY?{@LKNiO%(+;p1kxBo)zl$)DBM+bDZJ~) zf`qcyw9u*xe(qX|VXk#I4ymqnVwmPxO6g*#O`UU=Qj(kngF>TGP4g_Qb|tX{w@*r` zs#YQRoTCsz5bbumb-V7f&neV^rm|j%*(Z~zXeU5nkvW!9B48@OCBx#dJ3l+Cbk)?| zJo}V#NJ$x+-Ik=9V-nJsx&BB(SZiH?rpXB*jHyye8KZZ(t=kx*AV$u>Bv9?hOF-b9IYll= zQlPwm=_3*%0&wP(A*T`(=bVU{g_1r7q15c1vZj>6B9F5#B#cr}kho+5m%L}pz!^Ak z0YpS9rIeiakVpy?H2RnU7(gk-%qf%LboF2<1+CUck39P5gNOI#alCY^ee&_=nQ?v4 ze&^fY`TP(4#6SC||ILs6_{U}hcH!pX(UtWTDVndo_|m=YoyVWQ`OY7m%xcg}@y$Q{ z<42EfY1Mq^wO4d!tbnk`<>AVq3u77Y`x`f}Tz~H1rQiF;=RW_1m%jH3)ik@X-G-}I zj+C9R(UqG|AAbL}cfp6JpSpba?zyoGANsp}eB|0uiVr^Vi4T7F<=Y>4^7B6QlYi^^ zCqG_fxP0sBmtKAQ((1?l=&S$iCx7yXU;fU^@7{a;!Cm>&Klg<{`r|)->SL^FB$CqFJ-Z#Jgt-t*@|Js*-|DQhh!jFIJrSH7{#H0Un z_6K+F{h)1+Z(ggO{?N0kzV)>iU%vC;ojY&#{aO6bb4QzaN7W0L;IS(=k4*cOFaF_F zaN|A9{gqoc@9nlHyFO=Vq!1~uY&*J9+se6F037H_lgJ1GN}Y{XBFwYkA_b;YthL4% z;R*xPQWIbSzL?@I2#1pT7$c%6M5zcRC&Ak|9@vUg)Jo0cSl5m7QETmEspli80!X)BbJBt&K`2!fQk7(@CWS{E<(Mc?;d zt2837K%mifjy)HNV#UG>s3It(324Yc2vu^?+Jpou5lC4I`NYUnN|9RU93d63CODr3 zjyV~vQj9@!iP15Y7(z~nSPRh=(PmoY?1U3JDN+KZ1c6Wl5HZd%S`vzipXSC^l#2qQ zSiB@rgp>=Xm77OZvNmB`HiYX~vQh$(gb*?IngJ1XCr*bGDdkB&R#hd0=!d=JB+G=9 zqYGLavRDhpw>|F8;yG*$d0Gpf)(KPzG>Z(XNmlznbS=fQ#;$|W$1xjH~xv84$k{>50OJL7Mq@0yt zMAW2+WIwx{vk*c`l%PNW3JpYt5D`HLBHrxwZQE9D69MKaNS$5U3Tmc(rNko8EcN7q z1^_8FO0>ck<1e6M7Nd>iFlrb?2iYaGsy<@@v0RR9=L_t&pt!Z$xb2DJE)|ME_ z>M^<@y1K48^6XMVOeIuO@7*jQb0J_75=j|mj+jqQPMA4zx_|bd09H*SwB8OoNwTqx z5P9}-*bazjwM3FWdx-*&<`QS;l$Hf)a5GRUf-{yP3PXvQIHyvQkXHgmhB?Iq8KIz+ zbs`28a?GZ#$6+t4x|CF?%y|@|&N-GawN1V2x27_Q!KG9QPf%PbQtFYpK!%t!P>xv; z5n##ySd`GrImWn)uF?ic#uTFSF-AlrqF7pn?B^&XGIK7#DAOQ9$xTyRCO>dmW<99+MCa1(X;{jdDSkN)V7e)a2L zZV&1wo_*rC|G`U-eW+btYyaq_Kk3@zjXPVKqs=Sw2d}?&>A7pa^E+Sq@UtWad%I9CW+O+Fm`>k)5oIiT=(#JmW*!SOi=h=@wwdv32;7W)&;WLjt^YH$i zC$Hap#D4J2dtVpY95(shomV{``n28M*{`oZef#$7&pmtP{rAsT%j0+6{u3FAAa??IfDJBi~i63nV)~KyL;z!?z+dXKl!=W-+ljd8+Ne- zKM|nB@%56^wZnrr?W~4c(cY;!CSA9FnnuEz^Ax;p41_t8M#07!iBwz+C6|~qlpYcn zUMhQ@ohH(ZJw(};pk=Z$F+&X2Co9DkzZUJ-yo zFsVpGAeBu?Fk#8EWW!`*VPXIwNB{{5R8UGr9iRXK*p!P@8VUfalu|A_0Ei+AG8K^s zsZe4v03jtPDHDv4XTU9yhFG~Egd&OprTW*bF2?W)_T;3L=Y~Iv@!}7d)BIwW(6bh?1BdJ~&-2>YU@QOpWdKp>(SDK9Gc% zMypf=Ik;1)4_x3Bb{V)f>wbP<^d(MFL@QL~R6i%yrRya1bDy{{i0n#=KBa^hNKsb1 z5JK>w016a>j6eYe#*(Y1@th|&2tg^wlm46(IVcKIYQY?HPQ`O%$Xa+8bPJ;wjb5ib z?MEl6%e)V)gr*b=1C&w-AZI3Gw-;4Y3yFbz2#(1IoI-Zp^4MgGF)O1+H)>^v$r~k< zQb>r*SeO_R7Z)7>T zf+RqZNf*KUk_L8}BF0j&;Ms%Kr{^bK*VbLN-|t87>bf5KeHK`uRG_vHeRNAGlFB&@ z0L;LJAw#ADlsT6YBe%A8&OrfBNJ8y{MK zoHJ^pcDo(OElHIb07WSP1uJchlz~qmBNs?f0uYpxaRL#2=^~?mI>h9qkQ%&m-fKx5Vc5LA~U5V5esl3;jV>CWecbHUS3)hY$%A5*Nr^1eaVgAaX9o81FW>9{=D6Kk&q* z_3DL>ed?e5^Iy4j>(*nBJ@$#`o_+J}_X1B=(c5pn_VnYAfAZrW`;FiJWjs2Mw0ifA z;7@egoNacm*{XTv<#S)gcfRqUIJI2feDChthez%C={YC!fydifpzJ z{Kt2j`I~Y}fVunGn({d33PWioX5MFC9L1R4+bo?~RAU&^4>Y)kjym-S)x# zS3dOIQ=8MEMf33V_WCeCe*N&UT7T=SFaGS$ef*X0eyxw;#{ZYC{|>e-z3#)nwbmDQ zI`QW4a(=JV>)b#i5FkL11VJ%Dlq|`VX^lZmHjd z-L2`XgL?DNee!?zjcO&3x0Nc& z-BGg87++3TmX7alPmdix*Y0JvH@CVIzkDL>?d&W6@P$(+l}c_6_m7`FbLZ~;tlje7 zON4f-i^8s{lv2L--dG>17hlQK>6 zI#x{#L=>54UX^S>w9kq*wBGl&vmYLode2-Y?grkpBer&1+hcP>p_MM`P$ zl~Ed5(o}^Ig#$lOiLtG)*I*n1etZRnt%#Aw<%%|2#{BKnO5l z-%7J-?U*?sLs}h7r|mo&)zj2Ss#OS4CyDbMk-T?-L!zv6F0~>eP1ESaI^Q?|8!2!! zL<-0Vl8^y~iRH+iSxB73!+Pq2OL99p+#^(iiPAg1z5z37nBGbgi#SH zkBXX5mQCok+f`MTld-ai!Nj|ut*XL)no~KQ9C##rJ_<5N7Axij(a8pl!!7LNdO~9 z2m#Wxn1l+VC!@92qsiD9Lq-oKhk24=s;o9%cwCK?B4d`s@)Jjnw zl}2VzBq6w_76Iki`w372VyrR92yDb-2wVY=Yc~xOP1Z-{qiaGe+WENjhB!-;orA-o zD8^M|l2%}va%s^fY%vOkz*-u@hQX4lAxiWOqLCORG!j@b1Au^MFCr3whE-pK-N&B2(CII3m&0RcPd;|$ zVm%tY^y(KboO`ye?biM2Gml=(boTN$zRjZp0yW&r4}7Q^KNuaLUbuN<^Wnpt)_h(a zOivv@wRw1O^Wj#<(9t8OYP1WTbnD*MnX|{syZh%JInt=fz5T77aXlQ4JMG0JvF%=w zYJ2L;smX9>ckjmfv9nK{fArP2-f~zjto6?yJ+b*9l3ID}vVP;`8%LH-RkZ8eC|4)? z*^$?N_|oFirT5;tN`*^1_M<=k$uIu?*FOEpKeN5JyBbILA6`ide}C)tpZ)w_nog@~ zQvMHr|2O}Sf9>;kuDzER>i+G`gy+kx{G}@oE}neuyVt*PY(4qR(@*{8|N8PP@7(;$ zfB9c;aq`Z~UqAEV=fCvD-+unZpAOY_H`^K=Ok2(3^Ur-~S{;=2?#j~Xrj+9+1`oEb zI2nBW#lKpXx0jY7rG*<;c6+^KVfpCm@7!sEyM6b@`myDerL<*=_4TE}c&poy<%Oc& z52k59|M*9rdFtt}f8(n!JoVgYGCXy3{r2vbE8SdozMhtycDn+aOb7jV7hC`+5>h*-HMNs`(-rId4CM3Qt4 zfiXl)fQX_ggi?ZU0eY&oV=zcL*%Z(xGQu+PeXTSi0M$T^s9Mw{V5K0BF)E`g3CgNU zSS8j;)c-%#_fN{O2#5d>gCHSA$$`40T8VWb)WU7R3{=pRMFJ9yI)q@AM(;6-c($1m z!JuR&XVhd2Xo8R_gNY8#&*Tl(rWDv%8#oFhsAw5xewHBwN5V*+nMuc)#2z>TFp2+|o%56)TZI^!%5#9F?(%W|Cs) zhPJpdn^JVg$Dow5)--iwfj~hpgyQS^!T;p__c%rfn zNXo|;g}rk{o;qLzan57HlQIXYE5A#zrwr8o(1(Ol<(UCU2nj*FkN^m<8nZ}1to_Uf zFo|f&K)ISk#w^Y1+L5JEH40HeJ?$y2l=>hJg@A|V@oKX+}4V!%|F|b9@krhFJFhQwpY#FMn%9_MCp(;lL z)N1AXhX)$;z(nsumvTbc4y6^Vz>K{^g)BS+f(ZyUG2ny4gCt3u#JZd+uSqFTs;;W6 z$f^)rp0czC|TwsHQq%cc^ zGG$d81Pq)K9%4{N2?h=<8UUb*o&hWerKptJY7>MbNB|PYEvwdN6wu; zG1_VF-{4-h7B{6hg5SONhe>~~W_9!R`~9Tdrp&7T(Rtl1;KuHKljkkdK7IViFb)sy z9Del0%UADTpO$_!uEyQn2L~I8#jhEiei}d8#?&keLJ729vB`~G7$1*gu-0k~gXCM3C z_kZ{D6Q8_(>s_rEUVrt=mmXXHwO{*ZfBkR#&7b(p&wS~NfB4?Rmw)P~|6&X)ckbRT zQfYUrNt3!fJbhw$d;873{hd$!)qnkWzVgQF*WUV%{+)m8@}qms{W@(-#D-Wk~{=!FEo%O4?e)HlZr*6N`3-j|khp)`dojP;!XvMdZC~nfby1ALH zCM%boc=xqC80h@^+Px`vR#wpHcDu!{S?J_C%Kl`E>RNxP_uNN6`OdW)AOGA(4mJnV zH%5JGy)!sCy0kps=^rXpbbEXI`$iXeTHHPu8ick@(I!I?TN$}nUH*ISDFO^>WQ~j_A{RtdR2%H1; z#U!YthN@3ta zTLp@=@f?+)7g1!iQiPZY5a`HJi4GD0Sx}gOL82fMVT{ZewZ%wIP*^|}7||(5wUD>o zrqL0)NftHh6SWg>ki|GUs1v?!-?>X5^_e*vip6jN(u{+Y5Y-(HpQZ`5= zjS*2LH-lGoL+nyR+Gqt_dafOj)K2ATBQcHQLySsCPsXt4P&pSoaP5>*5}9xEXcuDO^u`tqEgB^H-d&3Fh~>GL?8&3 zh?ocsDivaALJg5QM&OvvI6F;Uw6ZkI>#9anq>{37x!Fg~rd8hSw`w;46Q;f~x>Gx! zAO#lEx*-NDrJoYRn4|d6PAqz_4FwQj2t>P+!&aVGv8f=YSz7t9Jv^wR*U?8XsBjvl zB2uz1>sTlQ7zHK77|@FZ1qy_iL8XRM^i*2fq^gT4mfrfd>p$ymqeYac>Pk~B+` z@noQkX2Gd%fYBJ`SOSNdcR^#!ye;y`G(4D2n&{aHj1ov1sJdBuQV+YTF>z!KHi0_5 zR#Q69wGt50X!W3~5(2a`t!vEeuqoSlyP4Dq$&!+QY$)geDKxGTv0990H*@Bjg)u}h zp1pw1vzF&r2Tw+WF*Zqq^V7yCo*aCQWRj6l&qfq4usUD=-KQAcWcg zNaKB!0E8Z8T2E(291Id0tb>n8!8;DvL?1wefdVxg7==X;SP&Ud00;mPl?W&mV^jnT z5E_5t_|fIX#a_2Nc7v-|?(FX$E-tK&%jv)NEC0%u|KPX!Ej`{`dH>oEKK!v4E`R9! zy<0a|7tY_^+P(bPMbkB8c9JkRxYC(Sd7-s3jt9}Corb%MiyK>a5#qwa^6J7u*$k?~ zhcA5O^8K6pSKr&UT~kiR@4w9^E^Hs%fBK2Xo5|qx@wI23e)6U7ehiZ=HGK}N5Uvy zO4kR2WaQGN3y;2arR>hHuAWmjZhX6y9^2b{ubAg)U7kPn$(uK?=e@ABe5~4?vaIe6 z?p=852>Vteaph!tdwZWT&y%(i7>oudk3Pb%KOT$?s@0We&pvW-XK&o=A3bySk#FAr zs#w1F;P%P2?hDU6dH?pqUhmlL?cIH;FC4$Haqq521qcnm)HPG_5>*fuq_L|V1f#S; z*r&iTf}#QVgR*L;nMn|0uprjCjCEnMGL%Xg-}tO(H%*fmQ#UR{8vu!vRw@bykQf>f zi9YsKJ_0uefe-~?A3_eQ54Pqw^ZoF`jNpR@LQxcBi4>e}2n|P-;mmS_4tQ@K3mXF{ zJrSA4Iu}zgc$v5eQ5-NDoi=qyl9X9*4+dm38KV_N&*B*)^|C_$|F6CPihvjc%$%$R z0W^RW!6sA`+F+6-p(bb-eX9DbCjbgb3AB=|@~o@@!oXaRVSp;QA}^-Xsn)hCE3w!~ zbB@s%{z#0(DTDt%@ONQ5wj^AX<^2NCX8UF)%0*2+~S&7XukPZE8Tp zh)GsB=h}Kfrc+NYW-7AFxRvQ-;H!y>F*0g$J{qM)O`X|9E8GtuwY0takRXbSd73d- z`&DW3)^ISLPVS3uVyG7Tz2QM+yexI{{ee!3+U9C+njlIrWi2+FPIbEMtR4;z6}L6Y zRAh56uJ(InHJZB8$B@~sM+qTLtC4LoZIrSaRB*8gj9L+y3`{V_fL7D0E>!9Qdyqnz zsyZN}0|Qy>L{d`G*NS-JM;2Q_MKLw7ZJMdJMq(-XQos$gO{H3wI#(` zD}<^%OpNXnW)LhuT85wqt8$zd#(STdECkRB9dk1?D`E2UEo!T`X;qAJg2sE|-V$mquVEGx=#3IM>G5?=@2os4lBh}wxdapKse z`SxN`bUN)t=XAT>xq0K=$@K1n%{QjQtvS=K9M%oDQmvGlP8yxs7!#X$&rKw5n#Pc+ zTxAsax=eIZ`REMS;5SXEk+{TTY*KfRa;^dq)`rNrQ&pz|QH@@+;$1gwqy;ooBADN%3gO6W+`nSG# z^}$1+9P)J?y-r3Zv3jj-HYbzOSdJe%Qp@4abUYs6FMr~5gL=EHch`@s-r60u>`^0= zt({w`ZDksUlgZlh!XJF`i${-~IM}Ne=2w)NeCO>i{%e2pFMs8$?~3VqY+v~BbHD!2 z|30J@wKiY4{JFt)5gO9kojSR$lF(iZp5D809Z#Ko;$Sn=#s2crh0U$^QSx8@rN8z^ zU;B+ueEes(A568hFP=Jb^~%fdymb9n{^IAp`OPo=!Y}^f!-sp9pT3;st#5zxyJ@FW znBl$6mwxeI{rI!bz4#yet-n33cAtIx#oIUd*EJa+Qf!>#e#4>peEr5mJWrY@Yj zJgje*VN1{DE9d7e07hZ0R1+G77(&#BAo4gBE@R4wpj0H^G$B37r=c zA0YHi*7ychSj@7jsx(PC*)Q^(V+_t)#Bpl8_tvQKaFlA()~1z!A{sUmh0u7{YcH`k zNC*6wx~x!R>W&?0kkpS5?!xU$h^T zL+7Km*|@0^AIfr+W+oaQYlKjxqKrs%b!yVlq)c>H0!+2GPRb_MRHvCO%SkDtLiIFa zfFVg~w02FcG4-`75{KkDkP?G|Q?eX^RcHpgg-u)xlxN%fdqunDSgNYZi4s!cnoO%n zITb|b*=wDZP6{}Xagy7{RhEoO%%rZ{1fy~ai3yEcYIV8snA$;8*}B%&6iJ5&rghoG zF#@N??g}%4*1|kV!D9+6+Sf`Yb%+!t#sHOzVN@!VG))iV&=UubpiILYwXPwENbr%Y z9yHT#k#il96r^Sxy#<`=j(`t1g2s8(u9;gIRYM)yJgg4-w(wrnN?WB{fk~S&Hp6(> zbM3;ZUaPg!>`YYIN{)=B98CAu=2oQfN~0jT2xKdoW|Z(j;MCt<$xk!L^~1wBJzUS8 z|D{j-#l_>tTPt&CpSV1+kgXTWGzHNUub_h9?(K{;@*eCO}I{JmdYOV^z& zIzH5vN6n<}aHt4E0G4*%^3G4Z2bQczOK&7`O`M4Mx&);QM<$T4R}m=GL5XJ1K4MUc zwV9T~#$Xab35G-kU)dlb)zCC$04AK)n#tA8R4EmMsQn2k*_WZ#pcw=p07ceD414e) zAgMexM&WG81HMVLI6n<2%2E?5vJ~W?lwd>5RhrDcVk|-HsX!LAM^!=at^yI#B!rAG zs4AlpvT1NW01m5?lEf1>E%w2G&g+aL#O}IKmYaBQ%{!% zvU%hEh7GP?d=DR(%X~$ znF_~iT{m1}rKMqp2;3pd(J2w~bz2gnRa5yyg;-B|-7fo3Rn@*e?AS~i?h>5Oh zPHg6y5CpvUM384WCh6g1f{3g&kPue{qPZapk$rS9QC6eY3JFk~;CO%l7{NE3D$0x% zkx1Z0%Bs=E+-n9_n}oGSCNCOMp+ywniN+L_)L~K+A}Gpq*;E=t5I_NH00NOH5tsxt zKmc&SQ2-*&iM2uj1w^cnW;TipC7DeivKEA~OO2hlS{DW8d9yc~&pPB{6(9wjD9ed4 zN@=YFi$dz^HXujm*wryOO~$55aZV%2T8U(~avB&(C2XZe?4)V*j3Nk1K$tPriLtgB zj^|sg$#m42?;q?Bt1aHcE=|U}bKPR- zz02y;)5VGG#(7d@x=wn?gG4PVXaaCxA&dfq7!5EXwxf6-6Jx4+LRmJNPLd>vfji1X zSEFzUac?>rv1>qTH!Ucs#D}U55^Eo$fy!6j*Cw+Zed$@DAfZye))1joIx;gYMka7xR4uT!PJU_ zL;z+NFfp?++psP(Evl?qsHF;0Wc$^mXIcg5(ttJzhiJ7KxXM@q>@}cL2*9AA;t(}C zi3-7qF9aiC<4aSEi$tPuF?Lwe@#|@X#_ISFjGa6&v zlx}{0aeClAC+E&Q_2z3|%{%LMq*a!O1P4v2Ri>~*N*#{2$00eLw(Idd1BFVCzP5i; zL3-rqr~d5c|MsaDkDa=_D6vEel!f&;Suw-W1oCcCN9Q<_G4sch6ZLWkaPqOgefsRF zFZ|R0cb?pC_<#_!?dbt`QcVDWVm!nEz$_xfJ|fR1odgO{II@?DxoWZ`iU=^x_>m+Y zql=tmI*;TNa0FSvh%!o`5bz3}L?Gp34Vql2A~1wdfDXbTMnOU(R0M#GLWvfUz%lxW zz(Hv}vx3(YW1M+w5RpWVks|;IGZ_yY0G!4k=s5KO9FZs@RKbBT z&k8?BVPFveh<~Da{E+|>z~kM|`r2Ff_UEVRcoL=u_ePs1=NA_ik3G0|cWH6`-M8<~FD_+W{m#`lm3rdg z*7XaIoWyoKoQzg-vuM@uc-rmXeDBKWnWtZT{rg`Sj+T1u^=g8*-km)5)9d%H?=JV% zqZg9#XzzRPzTSG`)OYvB7y3`__}-O+hr{vx&p-dz-S-}5`Rbjkhk%9gjoW-bZLdB5 z%O_5+og?r6+3)<*pZobgd-me-@BQ#w>uRYx zUFx+?-Zz7DA9@~>`KO;)Ut7HJgKxcb@Am%x^*{W3-}&~pN5cV!pJpLRXZr>f;-zcwLJod2`>7}QZ?jGFf7PPpW zzrQ{1Wh((DI}h;Kqus;Foh#UzNVUkXCGguc8BaO)qb$IS%Jm2S;_=5PY0V1y1jfftd#D? z7+LBjl*&SM7=mbR3E{9REh-W=nhrzR&Jr*T!J)`*8JRgrEJ>%Dv@_#vR@V+K29xsc zq^2wdLqz1gn^sdIVok;<0);Fh6gi483M)Va!RR@LK=gqcT|R({Ridg;2hWMI2wX=m zgju`QcM4gEg;-fqMxjLZ-bZU1fSD^bcq9p_rM6Bq3Mc@JsEANlKtLb@B!CmbAPB+< zV;^mbY6RZ1&xmr=O5to&EJz5zsi~&Ze$f>d5*BNV@wDm9^^39w1WRUyCShO^@e&iz zh*7iwh*~4FL)Pr0OJ*8h$ZUpQQYLW`074oQBqTyc7GWX+qzIj???YL|nx_ZzxkW|| zwyB*ef>Dwf)6_B5nShQuG6y3%&2)r=tlu=#a=xX7IW--Rk`;mLw2&6WDW_RoNgu^i z&q>3;GRs1jS+t}o{4^jNWyd;Eq$3g_u@X&8z=<(d+1fRt$XY8=Vi2pfMApj0=#A0L zlq5-2xx^SC4#E+|H;q^mII3FbSt~*uS02DBjXG&f8?O9BX`B&UopZphj?nm~6BQ#M zq9K)_B27jS6CO-Ur4&RC!OwMiE;tVaMrw$WqeArTvLp+U5vdAZS#42k)InluGw(es z6i{PdcXgVQ4n!qHu%tX&22vb?2d$iIP^;Q|(u#ek$cYFr8d5O?U{nIBw)?I^rPC~H zLhvZAuJR-`iJ~YvMnnuEG0x^Oh#+ne z$TTz!Ae7N)(!OzxRYj}cxGK$)UAIs5at=PLRO9y_xo~=aa61R+h~8}7v6v;Os?lVz zoweHC_a0n7a&|%PO!j%uWId+_)&AkRwIBQCzw)1c_!mE%&c(Bs1odS(u0b>-Sh&`J`GqXWG z1JPM9un0#W01?L$0?Yyo5D^K9C=v=pU~!~C2n94CAtB<-7kUbt-F@-P3DKlku(%dA0XX>a>|`1t4l!jNvia{GtRoOy9?aKCPCxA5|v z>Gy6v{Qe2Oy0m_BYcM$W;l-=_->!W8r4PLr(O=oRec|}2>kpf&2YW5-KJjDczV(%j zh1HXhk)-_ii>GWDCX?>YL5VB9y>k5IiAz8H!7HEsu}>#%63dZ|;oNEK!`;P|E%aGpEPkeJzQ9ryKL0l@Bh}-zCE%!kFVXn^JjkMbMI}wRZTWm?HQRYm1bv0 z_UCi!1YUV?``JsEA3d|Mv@E~->tDWb`J*@9U!Ko)7LUx`xq0jK(xu6E`Gv3j_Rs#( z&;Nzb|JDEGzxvM}KfU_!-c^0Na3=15JG{_x8H&;>pYF_aD4dH92?o`K_(3 zg`@qVbNtnJ-}sr2ef;i&I^h24^T*tHyL;Ryi|@X>dGSm#KX2GES74IeZu?>)kN@6p z|JNV+_~YC6?=CO2-gy1WlaGCBZ}LFpley(&FxtGkb+^BCaafw%L_cXpgGpj~AvBSv z?Ov}usOQ@2OtGx)Rn0gmkO&erN3ApqbJ6OUG#Tt4pu(z})*(Wo4x0u9w4xBC&>(_k zHOW;T;K5?j*K9x!+Gd&K%E{9UZ*0CI5{)9q;9RXylR|}HeN>1jf(R5rA!M08129X44ACcu5mPS&%!Z5s4_J~` zTJRv9%wel2Z75_E!XkDQmR8qRyvK-&W)1@cqLgX^f=NS&N!G5TPm697LuL#SrC`bE z7MNU2OMy~WGZPUiW^EbSp(oUcN^3i5MyX1H(8U0VlUQ4o)v;;k7^4foU@6P8-QiwK zwUk!VXe5f!R*9)11AvYQks&QaIDjxi1VITQN@OrWM8w5tj2Sjnh+$MVBBD?O&gQ=; z!Fd;zQl!+hs+){9i^_F}|!)Z6?8K6@84tP=`pM97puR z%xqv5@2XN;f(NYvvocZy<3p&&;~1kc8e()+-Eb(Nr?aZA!cl6x+gb$ay-APpi zZStebH{QQ?0ClgO&;gEu=yd+rrDt<%I%)Fka&i04HC?z#IZeCEkrqGuGk@pb`&&PE zY!&vl@5*qz-I$HL+ml@`^K3U-D*B4LewB>PGDR)qj39~L29LpwyZ4VgvT*UkPrdYe zw-@Ys>87K4v`7+W?#0G9?~O4Lgfua;4z)0fC>ACnq!|}PqW54>M1(nt7zHS-gi-Xw zp!JdjtSq8X=A^bV0q4aVmIea?Fae67Kn4^60Vpy@j=-}F3WikAh#(3ug81k;M3xX@ zpeQ7u0TD4GcoYxZCLLHNp(%p{tpp*=d|C)S$0`J7R){kjqo^>!=mS8Ez6{O)EiW(aZttExe){4g&%F2Ejk8OS9jjI@=>Gd}zx;N6 z>+^s9uN}G6|I&Z=kCwV~we>;IpPT!{rcd77xZR$+Tpz})w|I1A`Sn-d8r|vq%IE*> zx#ju);lKIMtR-qE-FA-#8z27EXJCJ8{BY~hr_S8lx^?`_r3ogx(y}h=OV6s2*n?{fP_?)kH2Kz8NC{C@%rPbux^^H?UkIsqQ+P!`L#q~FC{#LTI zx^b`E*x|=7{_K_aM;Tst@yO!`gS(sM{e1oO;bE!r=Ji(|o>)7#PIISP>#u+BU5xf) zPd@T5zVtiKJon<^!QSJKT-vy?d++Y%v7=|7d-laY`pWOa_=iVN{BK@5qNwTfhC}hfjR_`@gm@hi*v6 z7nd5@aq#|oI~xg|IQQ6-)9L11e^Qk%ug`bGWPD_KKI=Vk@bLc6fBN$`9$tUAd+jHl z`LTcgt$*6L-N88AyuG*Fg*rZv(aM8+yUWL{v8RyK-3Q+|e&oqs>+x@1|FwRvovQRO zUcYnqt@Ec&O(w=St>`T!yR9Xb(fsoK`8=Due)YlV;6bY|D)_ci6Bzi0`@Q+`Mh!?h zF(K9vgCd)zdAnV>$UIZaB9PY61+8?9QIqNR3-5jHe6KerlFau`6*;Xf&(9ZgbKRbZ zy!74IO}{r79EPR|!TT7LL=X{y6j2jtQY4@Nl_)J>5K-LBh8X}pXp3iLDTx3oD)j6_ zWFpNhVKOO#HkvdN2*nryL@Cl*2c0RUBuWTOEJ#`N5c5)vRFDZ(;F2C)bc zn2E$F4uUL^R6&L~)(|xz3Nonx*qXW|Nt}Y(vLQ00Eg?3IR~lG2L=9+2X;h>r3LBkP zHT&Q-qS6cyl~Oj%BUjN^5hO(Cd?Tn>Fh)^`P17)QjFE&w2nY%RI8>=kAW|>yOeamN zl^?pTG{>aM>bgV@y{ur*F;qTO^R1+52!RBI5J)Qx9EDIYnyOV6ImtwiFj*9j7&Qx_LP3NGz#0(L2vM3S3N;C9v=xg86pTt`%u=vUbk-n8AB|{bj0;T=VCKvu z!YDv-8W!?4B8Ct$Fjy!os7<1cF&+&o z6NCdtKp~DG=tVrRq{@eA6O*NBtert+tf{Mt2}OW?wAQ%BGjj+bOR`3zXo{kd*(u@> zy}||&5Dqcy$7va-X{itpjjko=hydV1P-LR74QUPz5lbHgq7Oj@sM(9sULr=Zwuk~v z(?CKYh9ogfU`2Y{Pn#IDcm@;%L&6e8BD<#XzLm5>^bIg6^4>dFPt-U8SV*{=wqz2U zdRqGW$kK|qs9c#B-53IsP0ZZVk}j=G{B>3t>2BcK zM^<}_@NjbT-UKG`Xn#0){Dr5i{_$V>i$DM9i*1!&f92JOZ4g*qYlbqB!t`5HKWEdP z?L%j-)_^&Hk!TG?6>6*u9@}ptm$WFrU*D2 zvH%B<$V!PqY=CFgI7(0qTB0C0#a&T;CLsS&`HB!-l(sRV6)|L5)gdTrLe(Uw22dlS zLID6q01_oi69ochmcX4iBC^x%SkH zPw#A8J$7X2Rx|Cc&E0+PjR({1OOHJFGe7%@|LXt#pB_1Sbn9WYy0(7$$+h8N`s!O> z{jtw{@(161W!j`mFJAcCyDuLqI`+&dQ>b76o!>B>mFJ$gG->vaEG+P2tE+ST`n?C- zFK%vENOLxaJe_P@+j(s5#cTV2a56KdiMIxao!m}#c6Uc%<=DC12fKN;zB4#nKRVa& zr1Jfyp|o({-WX>^|Hfwe;9h@zW#R1`-@g39Q(MK&-SN)luk>xxLXt%4jZM*}S#+>RaFa^ymKE zo!bW|7SH zT$=yT&DZZ-IN81N#*NM*+RpaiL2G}{FSYCCW7ETJ;p#*R&+LqUtKa$1*6z`i3hKC% zM<2a+_ZwR~S1+7??)%^Unj4WU+6S9ExV*nOe{y336uwVMTv_^=F}^VwtzLg{=OZ8f zm9M<~Yw7yC)p#{c?GL_u<&jJ4mrk7C+1i(hyL9Z-mD^EIWNAK2TJ^14FZc4rCoa8s z>(1M23;mtASC7N`v7^&EKG@oxQ3FT-6$I}V7v|>YItPPcrWF z0E!3_2#F8{oJP{5HJ}F6qGp{jff_dr02pfpv8o#pvDQ{8jiZyu3Xl*1B}OMf+A7gn ziG&asQAm?RanWm;Ax#kh5rqj*0f7LK0*MNd0ThbXTD)TfQIrZv<-CchvKEY@05vmU zQ@f%lV(=V&J1+)ws&7tOKON4&Z%9I9#L8Fog0_+gHQ3xuHF&brsF}Hc( zm?-PLUs2guVhxa?A)+XrkqODgNZP5hZG*zOlV<1Dm@hXZl#FH{d%P6&xoo3ilMXr)jY{c3zyEL~|S?5VQa%H7O zM?j@Kd!rykmnh31hSWF)LO}u@JOWroBMJgMXb!E?K>`ug!DrDafyCG;fr|_nB1050 z5(Sx%(}>olk*N-n7X28#cMXWCf@M;L7Kt&V{7NnJvN=V@#y8Vj~>5x`J`>fvfP>P zEx-4T+n45_|Lo^KePd_(`M>m&&tBHsw{Ks0=RTOCH2qkW!+U;O$Lwg(3)A)9B5N3D z&FQA45wwCL0i%`k5~FUslHF$ce0Dh6a|lCsAfTg$T@^b_0s4VgXu^mDh)f6|z#@cEa3tQ=QUM2zoq!&As3m9FV5fnJ1(X^~V*v!3g6t%+ z1!aI3If@`EMWO*mbvXlnl`=&1K{Pm_G>sxPL!O%$8i)=-2)qEW@(6+u6(T4Q3;-^; zs4x(SK%BL+90Lk7AkI=q(A-F|GjU0`|HD3uC1=+i>K7_ zW&iN@sT<8#e*PCf_0kXD6GiXe8BbO30dMW!I{x7gzqmhX>hb;KbK10(!uIZiL&v*B+zI*%nO7|p|D-ZYH zS~}nHFsgRkIJQPk9_FJJ2$^E**$q&&&QRxb@T1vSXS2-2W4w}a*c{(y5Dv(}mYAM9S;T+VNP{iQehNt~AZh3S#%T)lH^eQ_bB?$P!2?a|#ESLMViHg$6I&b!yH z?>_eU$7`6n>iU^S7V>^FsB0=ZD#B>@VSp+7*w#zj)W?@r+uix`^yWgpR&J~-Jh61* z;vGdNaxm&;nQ$17MkY-v z4lT(vDF&HU-^eTo9+;ZinnXgP6~r(}NmFF@+-iihQbso+BC8q@Eqa#Rdl1q@1ZuWt zAr@dpueGR7ZX-_==F{;g%Z=7~-LNr=bXqkIFtH|KZM}C^aR}5)*G7ZET<_RuvY%&( zi`e2G6eDq}D~aWCOf&TDo>Ud*G&7GP#S#OQ1X_V42%-!Ci6Q_n2!YU$q?AlTlNysN z+caJS&ho0pZenc;WJsB&X-E?S=tML7D1e%XbZ}wjo7T!wAaKnM-ImYOGt`nmbY1Kq_(Wyv?sIs7l2M>_vpdlL&|w zR8fGmpeR656a-xGjza`!X^RMe5CH;W6h$HkSYZT+sAyJGi2xP(5F!W&fg)syEHKma z{;0kZQH*gm21Y~>kQo99|56Q}o%`VFM@{jWyn?FJ0JNLKo^0S}axb?kE?S1pD`)}O3eYCedmmGWh2XB4y>E|46 zf8iVd_|fy9JF)is-~0Rj`Ny6-x&E2Qw>Nga|A$}cucS{OJ3Y7WTr&UNAAbF#pMKh; z+2(`6;wqAlnQs?Cx6?^}HVn59j}EBUwhKp&-F@}m;k~=iPbI>m7asKvuHL-P$&l@w zO)kAaVRUWWb*s}jIezvrqPYN`Kw-ac{_y2beDuk5uz4`u+a5kF#>d{hf1};ip>S?8 z-CsUAc(DD<6Zq<@Kg6z;MSFC)xc}Z;&D_ej-}(0PvE?jg&9FLl{`fmrUL)TezE?i| z+2?CBT8$PSw!i+voqz3@f9l~|ue$z|Qg$AH{QU3!^Z!MGKYMEZ)z{y;c=EZo-u>3< z#Zy<_y#MIqXYXwtf_?Pb?t6B1mp2!dRF^{9&gX7ie{b#R3%;4yR z2ZFENc+=YM!r~FT;94OBsTG^58V8_$XKpmy6&F@kmlj*aFl=L-Oa^7!t|$miQ+7Hg zG?To&R5^ctYuEb-01{%WC`4kwXfr$4Ul@Pd`wwpJ91afz!59;wCqWUJ!NPzuH0DQ-B7MM!kQf6Ca|9NI8BPNjK>=u`6=C$1 z25hC7MO8KRAyk@Fs*M3cX5nC!w$=)&pg1^BEHkiKS(PNIMnphh0U?E?00CKqxg~lZ zAfO;XA=DLJanq0hOJPjQP-UDE(h*Y9xwc(hR4%A2A1Mp~Et6waueZC66H6*cR6-FU z0-(`^LN4ScV4xIMp zgK|}@N%1t+oyFeH-hM~-lmQpK)kd4lWSMiGBO^h=O)FJpv(s-&sP1=K>Cm|}>1lLP z7E&5T7*|P=HP8;_IB8=u1XQE|Nkjs~h~N=~Yc$3*&gHH4csfn<#5bj}YNFJ@yUf@^ zTR$lYxs&87IGbdolBx;0NlU=Y+OnLe=)0X_I;lcr1PUm|q!T|*fK)6j_@c9Tu)kXr zg@o8iTFNVMRB69$nBw83zA<>Lb8$q;CKpBLxU9{@qq@A{s5Z7A0s;XFb zd=-pFtCR6`Y!a=6(j+Tg>~k`;cE2urNkNnGLTjOPzH(u`)h8E$V>5|_s;>PUCaEZe zN)WZRQ&+Qr+BMcBMAUd9LTe0qPlyq~U=k>TXr>4zt=!bIhAN~*XIdwtrR1Y46d6 zlSg-V?yMFaW7@ksN_e4hS=(yY$q#nQji-*BSHtZ1`L*52HwUi1 z^z29f>fiWt$+7SJKY#JB|J~pC(z~~=zWMORV7#|-_9KDg{Bq~I?cccn_Qv(ytoOpD zGf#I8rgNP@9izR<3~7ogc$5f$1`~|7X%EMvYqzhyJ}N~U?is@_X=1sNxEPruSi{jX zB1i-alp7@yMdHkfP6Ut1s|+4`zid#jH^F|KF<~*5(ZKbnIC!N)Vb3qKJ?tv zhll&;o_Pd1eh(jh_7^^L_5JUyEV}Z&+lwao;oINe+jzLQBES9nfAFvTgvz4`ENQRG!s{r!LZ>+{D>efq6^d*?ME)IE%lzf`|7={uP=Y(^2x>G;jNvI zed_1`>;Ljs*Ru8blwbSaAD&o0(qC2!^Q-T_yLo^2o%2thPJ4d$m1{Pc8|&V^ufAuz z4n^br!lRdt9S(jA_vpb>U zL2or}FRkY--5R_X}zhizIf`%0=nP+<|~gr`TV`BcXrE% z$LEiPTkg#1Q&%P%JJW|t!E~O?mX8$gUAew~OXaWV&{=H9oY{XbKU`<|A)9KRUj4WMgN}dE9ts@6uyu#t&p6y->x}xo~gq z&BbGM{^F_IH}1@}FFhFESKK*zvb8@fwhj)~SLYW=Z|?3727YmF`Cwab?;p&Nu>Ru0 zcrXm&)1spgMuQy-#AWG5lU~tYSXk)KEd-8R+k>;0mbxo*&7^4+X8-==y*GEM)joOV z6baOIeXzeDLLh;vsuYqUh#!oDf*=qP03(706K7Q#0g!-102m|))f^x~Vs(rXn<@c9 z0Id~AcFqxzC8bb7V9!+zF`zLDv%~_6bB-7ZwPs<9kQ)ObTBTfF8>LfY8dq8c+@QC# z8d`mCu-)s9VqMe%8o;z&l@yt=Z8IKo5h_$5z6VI2Gn;4w#h8SsNqQBO?%%@=bKD_WX1ly2V0_sMU`1 zWmS)K>PhQ}h$^W{wizmALx@dfmKKkXCPQtVve7rGBGpW*G%*TvE3+J`-S3>)y!V%;%%=p7}-lcWTB zB#Eq2+r;1`3G74TGNL50jd+96yV~ezDDid7NoQGJho)?%X=cGqlrkZ3%N8-R%~Kye zQ%Fs^V;k_JmWwJ+8qsarKNwBd`b*1;Ppm$3zPC`ESUaXd*6NvjZa(?)#+BW4adoZe zCmn_aR9DLcx!>w0^$u0%vX-@(J%Q%o*4GksI{bjii3|h)fRS(z+)QhvMOjiB5k)9Ut<+35G>asi6;UDz z41f@M#u~v05<~!jX8e@!K`#*ja5iri0YU&!NElgv@P{C16GA}zliU}rD8v99(iSum zxP1QXbC*wK#p3+pqmJXF$L3%9{_9J5@A{4RCXJmcefMDW#FL+YBk;hd*i;nIG+adv(A#{GAK>D=EOpFCxj=5}{CMhExrKY8}WFZ}NR z@bq(!y!OhMf9%8Oru+9s)8zKSo6oQI%Z8>o-yUsh6m8OUND|hZ!y{9%e9vr{4mM`vg`o)#EUp;zcW#heZ zYhm@&@%5dD56a2Vkx#8Uedy*4{(hScpW96er_ZgpvO=fR!%xvpAVS?ZtdSa+y44hG^1`Pk3B za5%7ot*aXk?j7t6s;05ll+Ly1=ZsAWOj%A`Ifc-G#26yX9&AO3qP6u+qqXs_)})9z z03!k^6{3hp4KM*lMB$)_P=sPoF&a`rSgPoemujlp=#IiD_P>hm-yBWCtQ;x?9tl zGF8U_0bx`+?T>(a~>mk-8QY?=V6vaA_It3)aEJ`_nS@+gMNP+Jo*)?Qe}e!ttUW9bo? z6>2h+Hi$mBJRU|D$vUs}l-Xh`)7V$@2Rv+>bO?s}SQj<(P$Ys|$7bX!FB;Wn#Z;=Glxx#Vzh8 zN%^44v|U{+rvC21VAAcJ%i8^V6z7upv&%pK-py|wOuu#Z^iO7Ko8WM9`5ZV&6g<3r zW&hsr-~D$!^*i7Ee|_~2um6pI<2Uk);nizbjvQb0lXnIW@!oBE^}T=bna}>E018NC0z{g*vkM~NjIbdr0yHb#BJ`u%Q2^s? zl@DB`PyhvfbVcw3MdJ+W;#t$lIAf!HFzfa3$7Zw&0HVkTdLa-2V2(3I0M7DS{-p6m z0r;07W%yBHMF9CzRIxv9cte=k;r;QipdyI?iu{C*EcE}%zxgZw=pX#|kDfa_p4=FU zez4ct9K7VFlVp17;l20ny#4T*b025ESCtzNHt(*^pI$%r=vTh+hpXA#^|)m^VSjSJ zJQyE&^7#uFANk#{{lTCbojd#JV849&iBGPr7sZ_V@^AkjHuZwIZ@qr=vp@H#(@(D4 zyXn`CUK&kQ|HPSp^k4k;YHR87N6t*{+&R2+?cD0hr3>o^^>nYEifX?y+JFA!kt;X8 z@$@zmKfH||_*=joH(ql%mMym|HR%cstL`iDPkT1#E& zzjbfF)m=PYEOhtoZUM>0+Z!)j{`7DB?w8L#`ON%+{l*WzIG0Sn^{sE6ICW$?zSWyc z%WD3;cOPJ?EoU3!-v0HS$1nWE#?9^F_+Bfmf}ZA!Pi{`@y}CBaR#i2g`Yge{s}I&q z_xiP)k34bi+UDkXoWA@1t3@B$ogBEcHGJvx@sqdiy4O=U+IO!O+)7duVn4*dv~*iOjNt zAja544l>)Kcv{ zkQJ{xS-+EXQdl9--8OKn`>i!!w>$0mByTc*IG?MsocdRSS^Q`M@g*v&g6z(tQG>FM4w@Qu?jeLg)6ca z3FT?l%5wr%!bN74L~CN6D1*ANEh{=VDp5_Q%T`(>kmM@I0NpfC!4x{vOcJ6JAb@t3 z`o^WoSW@C8rAelysjaLotu8f9jG9_qX11`(P$b1<&{(pXj3yPl(OR1%>oWM71c(Fey~g z(pe%pCzF#=0aH;Om8GH!ou`-;Dlb%;>oh@=u+G5F=O>TCVk;O)l_u=B=IWT3ban0Q z~V)CNhZg)CfrxttpO4}|;WpX{w zzSq>U78IFT-XnZ~n14{-|K$%5DpHD+Qi_Ox5D3`(lKsFx^ zreWvM-@LxD|KRp*ufh}O&%E;H4@o2WfJWSO*gBkhY;CclcdxziCJp9#;rQ*>ubrIl zT(J4}gKO{K+{)NKdG^U&e|JB{U--*^3x}6ZEIqPyXY-TKfBb8|`-SHpJMqjTr_L-s zxX^`u6wD~>AZja<iTjwty`R;3fc(A*Fw0q)2|Je3};oYst z!6*pRc(ng;@5VlFugx_F8{h9!<~GR>9-cZXyZ2uSqpRalSUmFR%QxOXa%S~oPrh*X zl{fe9+)8v|px@6=zW&l%FTC)=?|t=4cMjhYJzC4p-n;$q$!DK`=gwOPw0q^jmeLpI zvUAN~_qDga!e;xRzPUHqP)SlwVl#=i-g|F#F0G4vde^^x!EaMXXD_=!V9}w=CM;}T)o?jYRlo;Tdzu~>(T`r ztsGgse(QdS`Ocn4GKbUq9&QiDZw0b@pI1EkceLc&YQATU6Q9z@f zfkUX3F-8QVF#OA(S@oT5XJ=oP2oM20Km*)>IAw+sha3YD5Y!&Fg1e`Mtk#OiAW4h_ z2rM7WfthFEQbMY2tj>&53KVGTHMU4njitn4Y_!4LsH&Z0;zK)c_qubaOp+x1eh-0D z6%n^d?^&&bZ~F6GH5-}H+IK8w)&i&rsRqR7x|LHGv7(^F=gl-26O+tmxpocqlL}mw zsD4$U3#GEt&>UJIV^oqP&aqaxu*z*7eFN+W5-1V({T6oa3Ph6mlEg#oLV=P))CdVgO_D0f zQ|T49aMhsQ;^6mtodmoXDJ&_Cqy!Y9LC}C0Kru;8RC`nOg%4EtE#w3V^H63eVyk9gE>ksGE zbcIJ37K?Msy`Q}J=&{R>Ep|GKMJlO+cAB!xQI;lM0e9@;da)p0?G1TH=dPWwFL$%n zyv`IQTgqpS*?xHCD@(S2EPvv6 z|KacbSO5OMdA756`#WFa{dD@hH-<02dFP%z`~1a|3)RWi)7MAp&G)V%*EvmFCXA<( zt^KhNM9e0JipyyQd4zJju{ELX0bMvU{=5JE-y02LJ56$>a;6SoKe25^Eo%#{bE9*m z+QznwDM+;dTgtX<+EO+_l_`^;QVbSVf|wFzL@7~%XaQzdEI@{s6XisyqEu0?RH1as z=(g5vty@}kv}$YJ)uyXWf|Q`rf)$;#I4gPq&gRP1)l!u8wt z%W;_(sm=A)=8dx6T0iQRS2}OKe}he&J4SocYYmPdq1Se{Ci{^``#1JpTR-*j@?QGq ze*BjgTcyD8)JbU4Z8(RksdJFyIKYaVO&U*6L zrIYWx|Nc+?nP;xN|MsOzAG&k*{wF^E3zr}LIfdP=2e&@{$WJUS_wDJE-`g0x@#gho z`4hXh_s<+#bo+;Ms&jPy=+=XWZ@l@=vE}|+(c7I2_x6X=F}I4vgVFTWci!AR+_p-F zSO<8@F3aOV_uJ=O%}DuH3otpc<~6K6?G#`?0TE#}@bf zbayxuPc~#U!De%4j~{N@?ymA#7CLl6^}4ZjXXom@V(!^~ay~2D3wGfXKlYRBCr&(m z_E^vDzxdQg$GdgQ&Z88E`|j|dyms|=X0m&O?DoU0c7c9;d;9ixQ6}@N=dbN}r7%XH zrs@9vel@N4H#fD`omN{BDx(0=XhQ@H5<+Oa4EO!!{N}TN#*?EaM*a4s(Hm7KTQnIDef!o#WK~g zqhglmqN(a$(N+TOqUBxbs$r@rO{u9SMV>Wv*=uJ_IfWQ1TuZZ~s?j}AOE%+@i|)w^ z)4{M=*qu`}&Qk1WmUyjAvl zQsf|RqLi~aIuvX#(efHO)2|Ed3my8Z=?$rq^w{e$<&b5kiI?Yl`LVS^;|QZ(Up-eg z50{sXE9L2DFHPNj;q|TETQ%>EcyM$7-J>U-gwfG=zy4~z(FcC@xBu7wx}5CJFPbA~ z^9TFyUwG`|<)=UL638D{4>=|s>nBuJUkIa4NB2<1SzP$85F7EH66ZADVVT&YZ}oG3$_ zw`ot?Hc%#z17?U8K?`I`=h~!7TU3_OGAafPtN;}t5hzqe_6*DJS2!%8nqn{a$?wIoP%zBspbYim1o^vrWl_mkGa12cHg zU0*yu+8GQs>k}(aU%#@CumM^3%{L#k$^hZvele2yxzWx2C3SrN{;L=1;pl}IpV=GS|Ip`t!n797te?31*4qndXMN@9t*u*ATeds>qo-G=?qL6Lw7D6o z2|RlF@#`Dke|P7`!urx;Z_()D#)BQNle(0!+kEXi2d&Ptx9(S7$6|T$&1+jn&Y!I9 zO}H?BfATO?!*aM=`>NNQo0ra~k3D<+O0&4Ow6(u^>;Bf_`ZB5{NK?#j*rmt5_395E zyKt(0Q2x{lpC~6cW4<@go=v&AcY8S5dboS*(&ZC-!~1m@9y__Tdob21`8c@r$k{yW z9`1#9cX3!AqKi|7d7k@moi8ouG;LENCfRu7%IT%fO26wX*cm+B39r`0_IvlYnugMD zyng%6-3ODQKRoxuGqa6s?4UXH02Nxv1s>Q zB$JNQG!4;_8MJz?sit{mwb}<{5=g`vPAwNui&oJjjaC_l(k4}wvZYLX#tvE;7{Tdu zyVYlhb;MRHTk~a|CnZK^AF?bHh(uJ^jnRqLX{e2o#QP@AP_WJt6&--n6cVGDz0+zr zwOybCt;S(sTCF7?jB|kq$%sg_*7kaH&ef3(iveJzAy0}(X<|}Q87EYcc8m^D1R=^u zh^J*VG*?yDCT-6ZtI-SWQCMy zW3^4SwWOMIWEE!y*gNo?B{qfC(h5O?XwVi_ zrV|5{D6I%)sHrd;AWf)1tkQ;11I`>=2}KbC$64aK5>$*0>dnvus}rM6Tspsa_Vn`k zi@vvjh~qXL7Gx-=lG1LkYGnh*3J%k**R3jT`zelG9AR@nJbSJ2-O4kITZj@7(?7H?Dr?tp`^gxIb|1fAM=? z{Pk}?pnGuc`Df0{A-%Ve#cuWN>e_VTzWI&s&h>glN+#CJog@+FP&<}PAl3DOb9Xix z7(P5cH~u^S#eY{-4YAc?n#x%+(s&yoiHtrdX3e5mFh(6&16U#pXaXmI8BiveBDKjB zsPaE`q=Knn29%>pg>uxhBLOl3hQTnT5z>g3If-nT6PAS02o#_*rc@{cm7^{|r=U_$ zmN8*W88e|AR0~ZPZ3~qJumH&)AM`=DZA6TShCexGcV+|`VMbIC6+{_PrYKjGYRrtv zjLwWMv@VQkY17gsHccx@k;Q1zq(H!0E6f*Pc>0AWE;W05@7%v}_u&4?-csDE-gy7& zdtbYwCj0w0t*stozco4B(B+ZA#>1ke4-Ove4z^9I-+%9R71E2Ro*WI>jYBBwep_wb zd#l;oJlenH}_m`>X!H2Rn;Et&};w%Knk=VM*;zAG!XSPdSJweWJse4fk7uK=rDo@ z8@AED%XRgwx^=6rKi;2{-t63dxp_DZh9;l#A!FxL=30OA`@a9j*5>5mxfgS}+U|Dz z!iyLG?A_m*M8T`q7tJh&ZsRCAu2p9ya_ZFT-~HgeU7g!LJZUT~^wapgtxZL=;np>mG;)izS)3UMD3>EF9nZzUXi3IIb1uFd!Vl>e|9) zx=StFtrYh9dl5XDrVo$y_l^&6vGn5B-A&hU_KvnQ7@Tycrzgas3q`lo=BG!~(@=@m zU;k|4BTF?Z#*N9r%&48l^$hj57B8+v()#?hXMc43kSRNi+Q{Pj2b-43h~-G_OuA}g zb1+v}x%<)f-sb+{?v{}+2C*4MXuErmBMKVU2&19KmD1&JedA3#XG~CfvawS%42s1x z=yt{jLw=mv-2vKo{N6qFc=AkZ$<3jM$B(=1!z@Yd)&(AW%Fe|uMviKD6b<*z%_zHY zWQvn7ee&jw)w!zUtTboxlr(d!WNXMB4h)Y&lj zY`g;T*~tkAfJ{Lu0pmIaN@;0mO#om7D5L;{6k`IkMgSoV2&M=?M(NoBPB6hxL5%Z= zmQ2g18f!oR)T(Mhn{rLCZ4d*=2uKW-qp@_YyvQQ0xMi6Nn{g&6bF}gRjUmAzEm$O{ z^&umKf|(Ti5GY2J88ELJbKf#83)i=s<`GqMAyTWrC+bVv|~c0u5QG*S27Q5D`hSRLF;6 zI}vqAiNx7B7(=KX#{<*}CJ}X{(lp63Kmtq<$cl<1(n8RVQRrA%f3%^<)CQ7tqCkYX&f zR7wNQ2|$Emo@GF5!6l{yNlc(ZLNJXL0#FK~G{%@HB>|xb0}54^VMHLt24-3+Z6%JS z>uzG{%r*uEokO@{+H87x5zLy{%%w*_1>LwG0EXCeVxE(#Yv_#Rst*YhO#Sh>A2>kMwc^sabDbIKWXTIVQgjN`Z zfslSM*;)Vi`hxMTKmOAgG1pg&QboMv| zpg;&ihyaB8Co5eV0;zQ>&vq9y&{F#7we5*W3Ri{5T zArZrYTX90wB|_|N51P|1SWzEPvgDe@dFUb!@X&B#*5SlCDv; zqvK1}wWC2WP$X{>%teOHcv9_eb*L*n%Z;?HjmRpp<6dmqWuf@?@LhKogDLLpL>00clY~u?<`(iHf$+m6#6}ZbJd#D-#@urIe)S}^w46h zQa?UDT`1XuaY|w1M*imN>e|-MdT-~XJX0KwPs1?8UjB4Ap883xWI;WtmF$g!p;C&N zmeffcr{m*8j1hp!upL5(mV!&60mRfqgl1eCmX!(43}%|Ahr4YkXkB2 zh}K$53Auz)2_b+G6k*FAI1EsfVt8V(HUlYE3C5?ABc4c^W+oy!Nhm}F(u^anCvhkt zG`Jckyk{9+mI%{KDJwE&X6ZQRaz^4p&J~#vM;_F+Wtq(6rU6Ap3(hHYfCn)!n5*za=n;gDL5s}Pm0?0HyX-t4Fd1aF#*RV}g$eFVS zs(Dt^q@H8CS>_U2G;PcFhy=0HS^>*4T4(>8lv2~6oO4R4ZQBT;I2~)8QA0==OBgdl zn-;`Gq&$^CnWo`+9;Gx*Q=X<2W2uL(?NFVM13?V|(KzEFR1V0ff%BBuTww*EVOxyQ zG>N2wS|JDwpb0|Epe7>*r3OXZAlfyENw7%_2AM-W*C^VUK&cpl1|g730-((d`jcxP zrNkhFQED+}GinoTwP3N#%juCZN$1w_tiO75a`XDu8&^(lcqc8{uVhO~}%GER)F>Ke^rslpL3oT}y34?D*kO%lNo6;?jPEId8x zM`INFy4UNwWVUGKGCvL{(>r&MzxBiLM;q;P<=Jbqjp5;K(X)G7A_og`+nKB9bEaL& zmolyx(p8tjxbI-DcmyVXFC1^Q!`@*h9p5R{oqzO?exr)YAe`7xWwFl)iqnXhX3V+N z2>(QH03iTU3eGv_TnR}bLQ0>t8O}b#03r%7gs}!l{p21@AtC@`4G2J3L#zOSh!SED ziZR9zX{oeQS_!3;)Jke0m6BR9h|f0T3`8hEXWefD6O1tlYTh-l~$!#R5n#0JC`Bo3kuqF4i@08xNxNEM(OA_WM54yFdg6e0kC1ezm| zVIToFsVK#+6M?|1eIHeAquNPuEp89*Xv8G`dCz}S2L5yLBt0n97 z^a1D@Ww1~wI(`am^CApA<|1S9iFY=;%hx}#zOmmZgM}Kbc*gAyKfLh7mD0H{-X9;$ zpRZ4+IHOLPwdb?;-}>6$Jsm!Lp4*Z$sa_9KY6R)*6N>^rbh^Jcz z$;$PseBv0BGK%$;)p~d5{>|C*HC<6-O*C{kd+^cY=JHa=v*Z24L8m(j10O@9G`~MO zJs!eAP`|TrG|7tEDw5epE~vPX3Lfv&U3&lM{j7nGqRCOWZ95(y`Hwf>hgQ%iUo_R? z{@%$!_xPkw@-?eo%1Vfofcx1uo(sXjgVPUisID$ut-H&GHp33N2q%^7!+(5=+tYvMUR|x9!YmBVR-q{jm@Ktoz8m_ z4$6(IcK%`-j4_h#oF%ie!{ z>suEtzmO%?*3n36nYBM&I!}3UlFM{=e`g@~%h#L^KgEP~!k*!2Li}I+nNPGT=*rcF z+FTQQIY8>8zCPN1z$cH(Hs9U2ySIO+VY4&n$6Ros6wqg&Uq&$kno*My0}%=##?-Ri zyq7CFxt!(LuIpN^i!siU1W1M9nIBA_fo0BKKUsm+Kx?JX)L{UcggVr45H$f_WW{q% zW7et|*Z>e0t=y7XUeyKUX`P2K4#3DjVU7&i(e0wFLA2w~oW1-Y(#*04Y6 z8t$y06cX))D&=Ub&}q@CVQqzpPJzTs#%bTn6Cq>AW|mE-{0P|Nd^yKVMo3Jdl3>DU z>YBnK0o4-I{NC!a!GYCu`>o5~MpKm6yMg%IBMIaT_aw-5~JmuV{kzi?&x*m2Vk`z&GQmd2) zLgGluaj=m0YJyV-=omA%+4IMerXq(e43o4=pwa|#B!bC_=p_3*u;HKEo@j*K2 zib;wB=xU3kGL0r-+?OyO1>?bF43LhKV9?#ye5{!@4N{$`JfKFJp?ESK>>DhoBUvRH zL{pi@Q9Lyqf*4mgLZldSLm4JCG1MOB3wgHSP$-SI)+2*Zm=TwiWFnbTHOofOBuE-C zFd0aB;HN!|IEkQUMi?Y2mA2_<3>|zcQb(j0`T4!y$%eOEacSR59I@ouTo z;~Jc1;;6H`+kP+(F``X3w|v^p4o{9trPXSK_&kCnJi6OES6Sos{nY74s6ShZSMzHi zLH_jAh`48zx2Gqf!jHjZxW9=Hj!t*hKk@o&zxK<&mNL6atx&5V%WNxWLdzw#PBH@l ziX=jS0FBbj*0@ORqG7wtLJAOx0KpsxEV+##Brwww2?eOuDzgb82x^LzLAVe^LuQ~v zWQkG|Vh9Kl04Ku3$k5UOD2I(Av|P;$MG%AtAtV%rGE65zCTF#&VHjEo6B-oIIF2dA z2E-e`qOg)g6sTRmh64x$Dup5g ziXohXMxFytAu3ePv^-2p5a+mZQsI~-1xyYM39L_l>Wf9U^r=@~`TAGBaPs)U_Jbe0 z&}cdh;rk4b-cd9<^^rfYQPSz0PJ*yHlUFbnO7_O5oPqqz+>m(~R6SkbIJ2-vz>S!k zoc2za7M{hR?1%d|uFX$QhBfN^@bUlg=-^vne_}vC%({1We)QU>-q_vxUW?!vr#|Wp zDy1iO_C((w&aAFH`P#Mbz5g#h@$Ad%_up^k*%Pyuw+{!^%QxnmwS!o+malEz+nXyo zSnXS8sIpNxpB)`{i;lka&L5t;eD1-+4==aCFMa+si`gG<-BKbhS84+UyHs_W+H}rx6@ks;lty|sMFll z;+qiMwTT)9{-O-?#1 z#bQ2(5X_hcF~FVCBuklQmF;`S_90xWUo&Us7M@sM-+4#o=;{sg{m_bw#R#)wNr|7L>|Huq@DuQFJvk44W`S z2t)vDptaCK2tklW08oe-#6U|P#n@C(1B+6WMV_4(S;{mLJoIdmCVeMo7)-?(2ha(l zvFo~#52$4&SzsCU!DziuZW2T>lYTT(D$D1~u@ovR*`^&u2}FugELF;RRxTHVNl&VT zGBTNl2F(G9Ol)Tgr{XB)o4*yrfRHI zV7WR=d@WD{6H;L~Hlb?)S#DqnFR^tf*)G_juF?BfTO3Ca(EuYR%Y6^grgr4n1QIzq_DcA@Y2r`gL zl_`J`A~A(NVQJ)}LXlF4pjJprWN-$uh-axb0Dz&2lNcvy1fZEFED02%QQn;Jc}qLJW4phQl^wbtrZs;p->^1WwGMYbKMw4vB&^Yfl7HM z5eFu~fDJgel~&_m>J)O-`VGvehB9BelxxMYP=+ci<)f^S923%F%UQI2_}Y^{`?sI} zmFM@0M^&fQ?>;Wsafr?RA#atIr;~&7WqSJFXl|~05*}7^3)1J(5V>+WE(eo%T*h#v z;OzI_vRKhF=4|040uhaImLsB)NFMOsBHk?xqBXP-VmoGS81I{e`O~CU#h4`hez)1G zO3oG+7bga(lnx5sO9!fZFzwl0zM5YRcj9)iUGv~cZ*x3sBLS0sRM7LHBTqIy-rpNA zpq_Z<$`AkGkH33+=NzJamYA67p9mI#){0;PA%IXTArU+StO2BfRvKx5A<`7)1&suU zu04&SJhZbc)0PQ&W>YIm6GEs~DoqocnMg=YQNd(T;Z#V005wgfCE;Reij_rlB=Iv$DRqxx3q$pDVXV(O&y;LP(3Fbluyf_= zjm}`$9gYf~xwNnx^)_eR#lxL@P#V?dh0*waWOR^OKHk_{n|ZfK-_{T3B1_ zPj&w|f93pqxnfFA9&hZ;pSvJAibvV!Kl9mXdOUlsc--$^ZCzG_-g8&3|1ba3zgbP}7j(C7eP7zh2 zy-qNh6zeQR2)t%x%Sb+ z`>kTD#&V~}N5f#evN|_9JpRNNerj(tC8FYYM}GgL>E)}|OnW>SE4KlxEwXNRV;Y~B#gmu3OXEQH{5XoU zGM%w(a@^Z9Y-ONAG>s@U2u0&@gbgpl%SMCN~Dw` zO|%kO29#2OoK?e61D$Effr5lOl)8i{AhBshNlYx0X>C$M5hPTBpG<2o(f}Z+Z2}2E zDL_a8il7022WhCp+2l@(kg=SM<8rN*O*7A&O(Sh0)1fp4ZOJXHYmgi%ptB4bR-0J` zNSW^sEX#mI6kS&KHWYWed zrc?=wl3bPs7`akmB_ojaAPo_A6|^-2nFUgqCIGlVN55Fc=>+C;955`92;F)q7a~6v ziA@qE5#Z9KRuURo7(avK_XKHLo0~u5UknzNG+$_r!28dA*ON@cgsS+GvNPyHTC0Gmt?Kn;r z#aaRY6hNAq#$-CSGln5bqDV`{t0s?R;jGT9taEKRq;U0RjHi1Qy*s| z!s9aacoJRrGwPt6uFq0s(1}SfET$rat3%bN= z%{PHcg7(J3@k$q)&{pWMXA1(ip`^suRV2xPx;T=4!w9&;Rj1`Uk(VwH_`kHqKw1`PcvU8*XE{efsFa`8mJ6lM`;D z<0oEv=HLJOZ$A0#rw&d!;b1_6(J1Sg<~gB^oLe|09e^`tR2az>n|2b={heI{L#m60 za-zwQ>M~;B;`z&+qvJuZKOT-+jppO64NWj3af#qavoc ztM1%`jrOG#v&7O`6;8&RMCP?q9;Elmg_XGL<sR5g}0`rfZsd8WwUzjqF|^ z2q0zGg;Io3K*$+42meI1je)i>F*M>@bHOzR1?qW*6@*d7IYLNAE~1`md6Gldu!N8X zqiGa-j^<+OSZ-iC+*qi!22!U<6Ng4v3 zIG&wFgCxvM$K-++3dPAdHmF4q(y*U|hULI%(zC5boJ<@S#fj(96w@(~k&YWkAj|ZL zNJLz=43vnJ;D{Midn!%iL@-VBfGI#^s8ZV{8jcDs)I2nJCbQ;bIx#H^Gn&yfOvGS3 zqz0qN3nLQ%hgtI&P{nLFSEFX$m&FB-=WF=@d6_09!b`P61Sz#j++;RH3`<~0RUiam zSCR=kwk*SgHUv4qR3iJ3m`GJ!(zi@5)TH3%CovPi$i$>tDC5ix)6g)93ml-ON~K{t zqbM?A6<|RiS6U!#AVq-D$V> z!0;@Iivj+L*GOx9Cf`*GD}aSUP@}VPGR6`Bon;yTEG1C_VZb?O)Tlrr6h~BH3^Sc_ zC1RaH149KU!9uHyOC*4RS`k30iMYULm@5S^!~g<<5CTw3fi-Xq3keOC_GKCfJ(Ao* zB|nIty7}_8>n{cYsMeNyy`hw7=je8&S&l+6jlyD~Hl2n^9F~e!l;9{z3`hIEUoThs zy`E{hqrPvn(s2|oUzo2~8V)N>`$5?g7G%0mJO9MWqpe47t~%RVxwn3AYyUVF=*IIe z?;owlsS(G`zw-4jAD(>N?eBi(Q~#hF+`n-Cse{8)3d2_2*xx>ScjtKJ(t_fFhQp_?B2Ice3jqyoBp`?Xm zlDWV>z5B>6RAA2C;qgjk>F5~v`k>yrK*-{whwB%XK7r}{or7W(P`WF|ft5YO|L95p9A7nf-R5m};{N9gt{g_{EJ-c~l zJ&#B~nHJ9%Gm$?3{3pNj7k_eXp%L}twP&8+UVjXP^m~Klh4}~DTa3`Tm1P|1kA__c zlINDHMmWOda_ZWD{;m6;`SeRW8@^R^>T~&T{NA5_?aQyzbhP*3{Tt6ddvvgo^Bjbs zQl>`s{!uUQnaE?MviJ7e?+m8>nSsyx1f!NLk%GX;UjREI95D3SN;* zm(#uAAS>xhy)d*Z`SBrC*v)h=Bjwt{e95n6DUKZ9Tun&qpz+;j&&_exJngsb)tXwQ zr{ng?_$Zp#5L%^LVKSZqn?2o5n1Ya&gn_dcC9o`xJVt#l~N+D&RAIg zC4&T&RzN8Pfd)#eK=V{f1fauAnPyti`PzIE#VFI^)X#e^u`vMvX@CTTKq{almYQm* z0FVF(sU5?@3IRgmm@B9$Q;Z3!wUDt=07!tTMGTb09im&dBc@T0s&fLyMsAt~c?~0| zv|$phIff_>V#Sr^QVG)0pihwH+J!idRTkFj#nHIO41%$hq)8!HjH4{)IZ+fb!+^Si zm^}`LZr+@ZdO6q5qR@s_gj}WKOhuXMN}>ddBZO5(F%ESGiL_-xnPEoR)N&+-6C+|A zNP>Xyh33X2gHYKrGn`6JONvXa3^>{>AgkQEST0uUBf}yDSvkut8PHJ)NzJlLOg2f( zqL*L?0AaZKQaJ87RhMAWLkadX+cI#$PO=g52PPznu!5LVY`|RxC3{`DD26 zmU5|f#~rBstXP(Y@=*XOZ-y+4m5h06K!~JPQrm`EG%XDWNr z3&A|5H9EtwgjqyP!}hdN99Rlt0+k5?Ah{xzxoBvq{KZl|7g!|~d^`2qR<6KhE|nZ9 zPPGzHLB%u8ZDL7?k%UU56j6X^#xn>hA~a6YlpqylTBI|%V#;}#WPQ!;Vz~gHo4bI8 zGpwI$LZj}Mid;_xjvcZRYoEEMn|FEcC}{&q*Eq=L>gT$>HY3KQ*XykRxXx#R8_t%7 zj}`7m%5)kSd7+LW)#hv|8b#cjqXesPM8+Y@{1jq4LZt|T2xzS}2eKe=f>B5zR=^byDJ&2S4QTT-%rnpsYX}qsXXbGzH3Z5;gdl<- z(Fkd+G(etVrBMjt%qD~ph%g&waVBDvVgQr_TqQA}m>DolQ-Fz-ickYdNukzCBA}I$ zKqv~xnco-w1iS?Sh5*5enTvuDNX5ATmxY+?+Z-x!>t;9v_`z zi~+6%aW9He21_dO(ul**iYG}7q`^#+89d4`Le)YENR{&(+a=5_W@%{IWOrx($`j8$ z{lt}T{>h)3h&8?8sZLvCH}Og74m?s2@W3VC8_LnPwx#8cqP{M!MXG2`s4BGXx|{_Qgao!#WqOR&MkfS_y6Ea7gt@@d~^TKU;8^h z{X4(?A6tv(o_qca-~0COEw)On#-%tCjHbJLk1P423X+9|Dx~qH3k%yD_qWz}JgYwJ zag5A3n|hd3s~2}pALh%f=v*jUuO@s9%#UIosUQRs9GZ+-y{1TrYVJ?E(AjfAj-$|V zEXiY#DbtB^y2InedUY--j|K^X*5ds6i^=;hEPh00FXd~ztmeticfixk8#l|N&&GZ z7V!`zQbbrW1DnJqOep{%5l}0w6_5s^L<7wuKrm1ea4l1Xs38&Mf@3BKg*@cBVlf;> zlsG&k26cmwTUNn9nZQD(2|^s{giy_hZ8I7~KElAY^E^pC&rHK0lOxwHLv1JlJiCkl z%VLy8$aFo$Glq#joDgcH+-FoL@zirJOE@yk98Z;3$W)qGsEm<_(VS8d50qEZ~gE;_tG{rs#r^cfzK04@g#{ffSD9fL5(4V7-0exK@@lv zfe=o{Neww;EeBe1&11za%PLz=y3vdql;<8&(BGGbX_R>4k8p+pMy zgOrN|*sde1R$yj^m54(E53wRt+l=S(WV+JK2TSNr|G2$-I(NG5X_bU*T3ekBH+Fix zP1mks$4s-wr@cwBc!?=kE#kojzxU?uul_e*Fw+1;d}}Xy;VYHM2&+yL82r5-zu!8i zUDkg8(Vg$#dFz>LSMS_C`MWQ^{H4G1-&c_}O#^ILj9DTGCDa-qL?HkQVsw`2N06N1 zW3<*vYb~|bT5AcEiU_m>l2MizSj#vEu~srOA<)XSSQG^|Ap{VeWUgUN({v=cqp8OM zmm(wpGfEUTA_4IkmHtcA)!G~5JLnt09r{QIaUUT zN{TQ8LMcd548Sx=6oC-wum8+fE?r*zXaDm*yMFcB!}}lAn=?{MM6Kgaw^pgT)R;^r zGRv|oDdh{(Uf0bRX6lWN(a5&h!ugfGoqY%q2wI*KGPhKz&FuHOuYKib8E$R%|LD~# z7cyj7#aX+!;uNh*muLJ@WVo?bUdnd`oT_vw3?|Df*6LdG!}r@7l6!Y=U7BAz+S(lV zhflry%plbd){o+oqvxN#x%KdIGU=7w`TgyWDLY+Wao^u~>rwAV&$q7c?cILk+JE!S zKYaW2^!WA9T>G>C^asy2tDT*L{ShlKFK$14e{HpxjUf*X;@7ZH{qKJB7ryq>#1R<8b2Dq@ zI@WG=>^i#Z4~N5*bF1@nGjG26gXQJrVW;Q&e!j3|8L%%Vch>J+TUnKlc3!*jQe*A> ztv`D6fBe<|{?Fg}i}rEyU;q7|`SHDb4-bZ){>m>N9UWmJj@w5Xs%oP+=yoQ4eEr&$ zJMU~A9rv1*`CflmE|muT$TYklG{1ZM!P@!xVinfiq~WBu9vy+4*J@s#_As=R_HQM8 zx*c#!1^Y)2EoXV+O78FG^+r*crdQ544-R}Us6&Vk4~~(Q8;&Ow8sp)J^E8}J{{K*v2>q9L;4hC~ ztpNvWs0=E|@wXh@+I(WcUX z0)nyjoaA=@fDp2fD+azS7OGMyMO8fJ%(ml95kqqkS~gLs z&rC3lyFlb@zz7IfB2Y$Yh8Y!rEv3ZF46m`)EL5Am5VVrVMJFt*K zX<>Ly6pa7^p@lWkkV=7*LQwB{3iX!Gxz$3ysK7Of(?|WJF*TCvL&PC1+%2 ztrg5_q+~aoW=<0-6^B`Bo1shxR0^b#6u!<1m@;f1Ikwa0x1g3~qpr9~NzgyJ7bO$N zTMdpGG%#o5K5uK+Ih@M6L+lDY+L)}cQoUG)!7%ct*c3YN(Sn8KlkJmOrkdrbXPKET zQxmdcwPIdrHs;>_=$+NohG*&XS1#>s?($B?@X}x!jr@s*7+bk4JvOXhzIE-}Z@rn7 zz*fAz#ugsk{U%E0^H~%39_Wi#fzfQV`+FxJs$~5efAVc@!#k6cFaGq;{SW`|-%tWf z3|J9(Y;!v8od8ZD(po_cGpR6uLMei%)@Pu{v+1vtS||Z9gj(x_XHX-6kdzVtg&;y0 zYN9n)2qH?*8LR^UgfQa-QYkgUfoMAiRE3md{aV}Xv0FRW&7b{+r(S&K zt?$3P`RHhQ?oyV}U?kmKQ4?r8G*iBxXq9I5O10vZclLIw)w*S}e!oi%Qmo_z>w_!6UZW2_n%O?9?i& zTI!X>g?4{0Uz%Tkbf5rz^Lu}C~8Hk%8fx^%jj_H{-?$yWaOKKmZ|iF7!qJD_NAGO>?0@z7-~au8b^7sER)4wIwyMt3N4Gz4Jf=r( zmgsxa`^Tr-Yb)okox5~DcnA}n3fb1FocB)0LlPgPU~;j!RV9x%XJ;2@srvU_}YWr-E)_h`oqw&?dM*4Aqb}Tx7I7IMieE5LSgC$GoID& z_j^;{t2L2PwhHCTON&4L;MQOH#jheJPxlAE{`ddUci;TaH=ext%!{uUTMIvW_Z!DY zr$G?D`ttLg_CC~NZROJOZkQ$3m%jM*$6M==s`l`RGGiRbz5UMRa@`-LCppz@M2C|H z%qlI^uBDyBR6PdREfroS58mbOzCcK5Fr#!r_3OFhoiNEVV)a zDIgV43P`Cf%2XUuVi?%STb`PZ06+v$i91%lsL}*umLy5ub2Ff1n&#ccVA2s%0VKSf zm!>I!5kQ<|W*SC>5yd4`*kVo;`Z<@4Mx$aemu8`qQ%13oug8%fm=P-CBxDGf#N$R1 z1yg{P0z6-sn#P=m=wLiG=wc8A2*#=Jlm+3!1+d1Mt8xX9ub9Oet5z_^?rbG0P`l_A z9WMuEkfXkh%0>*evamVv2eS+F{a_+B7>a?NgA`*al!=YlkM%?v)U;WCV;o?W8CX;t z7fMK}8MG&Mqd+a_MO?^Hng;`d{`J56&OiM1zx~hu@D{PMhDM&f!?1UK5<3DZpk^MFXHXL`VqB+9bwQCkP^>AyN=Zg*C(oVE{D9piqQBgfIYvYmjoOg(`Ta6iOfJp=3q)piLbHAxc=bk6Wv<(ZdWdU1ig~m(E>2J=#O&7z2@jsuk%LD%*f!j|0G zdEk(sma9sxcaJ|_Tr8aIvI_Q&Pqv!NqTPPT=u~r-`rIE)KDvE>Wx3I6nkGTJ8@(rQ zT)cDdTbG_q{`d#~+aQ(sV&j3on>x&I@9dmxE?-=P+!(gseBApOaDDmm<=_A2_iN48 zk8gcQ85<49^}Y#-xsa=NuzTU+^WXg5ca>XiCqvVyAlW5xd&aJ~PWd3acV%ti_Pg)h zyfE87ZWnC3UcT`0y&paQ+@;>W>s8hIreFlt8mM0J4C)#d=}sP2Q57oT0cbN3L50-CBXeEyB{e0h*fj!ptNc8oNgne(!-u2(L)1+um@x4Hgk ztwCk=+JnarZhd^`{Kd8RZ@qVZdCeb8&YwHiJDzMDw_p6k3vQg~qk+QCNt7%-|GY8R z`rY68jW2%fr9b_>-+tqj=PI@3*2<-S|A+s2(hFT$YR)h1ZSNsCT3l=aHCbMqDL0Id zKK>D8?n!5i4I_@jUT55D<;VCDg9{{nz7)K`)n+l@C#(@@r$(P(sPNwKn;WB(rHz`w zReqsJ(`#fa99c!X6)l!nyW_*0DZ&$Q%%w~xDIrA@;XX~XxGxH|atz|ISIQU6soznG zmAuvc^RpzP62$D#u7^xg1HD-+bi6kkYIu?m#puQ2KSn4PM zFtM4T(+Nm1ppH$=kGm!^x-=PCNDno+|!=$6Eypy!xN%VK`y-d(i6?5*^N}ccd+$%9JFaU zKY#UR9U_hLnVy7`2y0e(<+V$-0*guV37YRl>4}Q#7iKRPDpi+8Hcu(4aCeX;hK3|X zNK1l&hEf5=7)b<&5Mq-?JXNWpmiT1=zZu$+z`5k!cRDJUu?!gPeC=dp6Y`;$0zgwCKvEHKVa zTMM%w?+gGTR62RH6A!iTE|kxQ>4fI7lFba9C$&hTFy%wbDnoEZW+y<66WO&Y^Ta>G zq+yUaC;0wEkGRf86B+$Z9C>!L;k#qJP@~!zo%C9e0Bh0VeN(2AO^s?EPi+HHh69kM zIpZcwu}~9GC=j%g9{NsFD>{2?H!i;x-``jt|MA_nZSF4T%bv)0Q)_8;#w+`$<1Npy zOK#33<(=(A&%@da){hBoS1MKW&5!>mB>vYf{p#_?KsqPqFJCu{YGeIT1#{hvXXb17 zcLNiE(XfpHz~M9?2GxiM1B4Jj3<5+c&{`7+kwS!EOko_yh+s-7PnA#-00a@tObf&* zkcq<>p)_yg<0MOw<6$Bc=2JlpHUOEHF-MAGW=Sb1kWdPcXr%!H5MqGOeY0;C3e z4X@Q5O<>(kMqVYL{-D=f<1=2V#9K2)Z8q#xc4)mt`9w|rN(T(CK4%?5nyBpcaR*s*R z+L58h=Cqy~O8b_+;JNwD%^x54-d$aWU^=TD6-1{`pZ~eNjka5%>l=YfS4#Xi79Y0Y zXWsnqZD{C=t+~O@-WJvM>b2={?aE8_?|k@S9GT6f^7`?8ueQ+Z1oOqk!|~l}cm4W8 z;UoW~i`-RB3`-9?-RAj*?&YgAj{Tlt&+VTEd2=a>4TE$^CI7Jd@$%BEijy>;(aGM- z+6q6>&Bf)tgIz+!&X4bZ@rBpleEjkAm#;qB|NeYKJ%8!qAH8||;)Uhq`IP{gMSW1X zT3WjN%+m6u_uhV+Dc8YR55Mur&wlmK z|KfL^x^%$=$ZgDWR=W4my}$m8U$?aS!{7himp}O&F2!-YeR2ND;~#DRo#+0=fB4Sd z`{ZYyyL7I~rTEd?-z_Gi)dg2)t$u%iH8!k<0K{#Y2S?rQyGQj#$)9$MMQi(@ZDO

7#0;$Dx`=YfdYPvT=E78NlAjVR!AqvxcR^D3ZwU zA0D`kg=7ofT)gq#?K=aVM1u|zwKxft;t**mG5~;93Ib?AFV&HzG6v8PIWNOy4XFdX z2vTLbUTHi&f`~%sF{~lzk`zfru|`-ys3FXR%A`;j0_AEE85W>4o=lI0%<^uz=T8kt zY*hEG(kKZvOdZow5M%8TQ*+@u9!ay9qE;06mcbeHfOav^ZVsm+6e5Aj6+)O!a;zTJ zZO|P~uw{c-RvV;mvdzJyR&YZV4&8O!SUznJ)6=8b{3R@5;w^iPg=)E~TpN{aS~i`c ztt|tz2+^Dvrd`fWDCmfZg|SCGG>Gkz7Yh`nspA@HIAP3<(v)iRo$+qq6F!`lXX1RW z(zNqqvM*^VoJIm-S|BnTy7jd`e{-uFs%M`l9{as}cTStYg(bMKHVYiX*RhJy-KkNsmGdyn)j08c@1|?X`onQh-^(ELIe`7m6k{g&Y<>nY7hznQO*^DspHhE-A4x|3!#b- z77!DO5NB8lgeU`X0PtiQ2(2~2qmZLao2i&`E5W=5lcHmL()D~hEtcH8<%nrin_q;~ z%t(-AQUtcmP?qgGc`uVAN0pJxW8Cu~rt*9i1E1|{=@AA;qYlK~2;b4j^CxK*Neh_0 z^$mBGsbMxcEe+zpodJnxc~z+-#3K_sIfFCPUbIk$Lj`N`3EbK~9-CG+cBcccP2&lO1qlvY4zf@6r3Qifq* z1T&rjrA3}b^(twfrp3PYeZ!RV~Rb;hTom~bf= zgh*;Am4T3k5NHJe#u!5lfCgnnh1No6nUVrX1p!AIAWb1krXe*AiXeoVbCD@YDdw^= z68_TDuPohoS&-O@vP{veWkek3Q>&0(B9hxiVvaF@QC7vcTx}mep4U0oAar4MU|aMa zzq*}nhub+Pr-Z5AIy3FFx4YE-uZ}z&Cg~ z9!=*P=;*;@>c>gYzj}eabL$;AP>DZf&=HwE>70z{q#Y!={AI-xq{EZ$14TWPExG`O zLsTpg=3T$r{}Hy`;nqYXiPvg;>-N2$edDv={@#x+Tx&=rZoj)eKRbs^(K*?jpKCD= zPd6Sv_sSPOdU$v3i3{Vy;hL4(xc@=x%B6{E`GeDPGWpz%@|)kkm78C6@^-GA>-C1k z@&(T=9*6e;F-Ki={Sw^mb;fDYE`}8D4EB<0tz@{(YOc}E=K;m+K_B!cqGkm-b)i}$ zR_35)qf$Q|bgo=&tS+Auv4c2nJ#p0mi?6--m4Ee{|8rq352dS;%-^4W>T5qe?e5lC zJYTAD2&FT*t#*|MuVf z@1OnrPa#82M&bF=tlw@gFU+ow|K(HHUc9pU^eBttz~{j?7gnyVpZrinJ~a4DHD7SO zykl;^f0zcP;!NHx)Y^O5#@@DFW*TOLLC^~{hP3rWab@`;A4jHnq%+gSGo$g;qiJ`T z5*3Br$&H&&_aAS^1ME6gWr6xyt(YtFQ7P#i13oGgnmPB1<>)C21t|&LpPsOkxJa2L zcZ{&`%mSsd-@77GcGl`GVyq7zK}u^gN7f=!`HPYQV#WOfh)wn?BF%LL|3SBuQTn5T(t zV2qUDO2AzM&&DAqR$$n%NLiuKlu{WQM?LL=6@|Dg4?0N>sYc7`@@d(D)}$}e&dloh zFXS)fqk?D8<_g5I%Py^3iIpTQ!H$8k#WP?i>{6OVfgiR>&Zrm=1OZJ0uUc?woeh@i%PO_=*K`p0MFw2Tp@zb zpvr_0h!7W2N=XnU5F-H9PgE)xVMqv3LQ1U|p;AhN5SV+I-}*%HRPbh9A7wB+MO(I-TC-4FaO+l;0>slX>YHtJy}_v+kvAr zX@fEj%y@ojsoZGYyZhnKeC;a_*SBi3i|^jP_x#h(o^(6S*3$4~J&sPgopGyr(;we$ zHFH6yYwIO~*88D)INk#@mE1G|oraxutG3;brz1XMjtDx_-tmN<2V-ORDttMI!s(Gu z9PJTn8W-=rckAze?XUdVcmL4IEd^tF?dr_o#%8H9m|tD{?sxBHn$s|M<4CHW`_d(-~)#?}0+-JgUjjj{owac@M^5M+i$<3W45 zKRHqM;|3|qQPrw2&519D1hnRy;`{IaIK-l(;$Q!}|F{3wKlz_t|LHGoe!MYkZ`7Wi zJ3jJXeCD$p7*kDAMDyV2Ghct>kACp`j}O=X+t2>P!Rh+#kG^&8*~^QSOAj|Ty<)>H zJhA!VM{lgnK78~jj?GV9TWp{BhBx=6pZ@&L-lIG&pz`U}7O!|6NEUill}eCHc2(^S~V<%@>HOf$E&zulNy zn47(L(mCASI_aMbn$<=q$9^0Ms0+2y9A?j5o;ljQ)v7G|{?w`gW$DQ8*K3R29(&js zTv>j;f127rwu-jO3EuW=Xu0S$B}BncF)~xBokM^W%mEJ@^Rx3u4=lX%UL*&+P{S529N#cQ}{Ja1C_QUPbYQ+%Y^rfdS zrH)l}jpZ8b4W{=W9{Ce&19kJM%TGM_#Gk$UJ}ed{LPz7?(U`VKT5J?!=rjzf9jk11 z25s%@oQ9T_2DTG`9LqxD1^rlhu2WJOrcm-gB7!~7NfRC<4kXeN6OzxnDv^j3vmhxpTX$7j)Pz#V0RS=+09Z%@pny&%K#r3nOO;Kf z!+_RAWLyA5j0HL)c})|8U}7EZ?<`$C-vzz{#v;a}y(59AjCkFX#dPF_0AWv#+q<)~ ztvE@jvp)?|TF9OF2RXB%)T#tS&s)&x*e=TmEh(Wpq(^#M$BD0|mTSbQmcP*4>29Qs zEa7GxWYb*1Ei4(*j{R-BMPRyw-7E!TUl%QD7$~y+v3F?cr8r8bg=WEz;@Ran4~S3a zF8!aq+x>yv-anbGfL6<#=2ECtZ!f*F^88`AC)25AYTHs27IFfbS2!)Dcpa{}LBcv79kV1qJAcjc@$wV4wF#rHjLJdQ3F({n-si%GcANv{&Vy}=8 zl#aH}6+BFUoSv8X*s3}y9E;@C)Qf6*)YP?OfltFW%}q+gMH%JwnfNAdB^!fb8$isJ zgb+%lAchdYARW8*oJQcMzWTz_x!Uf@&Uo7W;K#vF|I(Mf_1!m3!&zRsu)V!)SYE=j zxmL|_g*7c5ccxwylxHeMUFZF2nv6)D7|Ux{Ui|qx>tkb9f8zD0_D2sZY>`zOv|K}O zF_kff6~yI2dFj&p+OB_c>B2b~#0$;2AKd>lhTz~KC zQZ-HXtMg08hkifij#oQ(uI1z=>G;k}<>}_RXZDAWkEai{T?(V@(>)c7sBlo*oDF zS(D`o+xs6s)q2$glZk&qK<>g!brPIH%`R2HFcRA+$IQxXpy4=53}@}aOfyU4r@gj1 zSC<00Ua8j)(gcjg5zr8mglatB9}$n$)69qo5-DX>}pk8Bui4G z;=S=J^PifXE~8PdKa>ndByU%;^dw^(r^J*l)KpWPY0aTDyqp@yMyb^GM+%rWshu2e zM$@q16=Nd+xTKKYJLqMHotfe+gi)z5W94Vp_qGQU>E)Z3o+@fPJ~(0Derw~4U;6Ba zAO2SN&g0iF-b}+PV!;f6Ub)ogWDNSN%ifjws#F3J596E^$#^h392>7*vntG>(7xi| zuSJuejr7bNCZg| z4?87qE)+)K%Qy&P3GEa*jw6VomWib%5MfLp(o$=QfYu0UfGrZoApp=c45?L?39Xf3 zFdU@}V+fJdT48~L1$lVBPS;|wV1DN-zy0t$do4#89_B^A`*Y%ZbBlsLLz zyE@InEO9K$v2u2!vSilnluZJ`J41c6w_BYt{bYQ4)NRAZX^0KN>b3mrEXM`xPa_DS z?ax^T>hiYbXgLmmhLMO&3b{78%sJ*fCMY)p(xlUn4MbTSBB6zhWR&&Nwo%Bt{=@xZ zG4`XT6=#tNH>V!n_k*(19%$(3HxJd@}Sn{Vpw zY0Fq_)y|)6zuTTFK^md3QCJN#zuqtdbDPAhR;!(~cT#~EgN8xVVbbr6PdYuUT6+g~ ztMl?t|BoNcHf{hijFN$EnnGkkQ`>a{Efg03z(@!i6RkA_8ft|Bf?5$sD8!IJ3?YUX zV2l6~N)P}UV+=v50RliH1c2n6YauZuh%kVG;KC$S329I!ga7~tsD#kT<*^2tH!=1a&z4DTtOfIZl*x7iv_{7CtCotsIx$^la90M8hOh-a_ zrh&rE%|pMj=6PmkzW&L3`)}&vQJ!6nAD)DG)D>eM6Ohp;I9(rXzVzzf>zsV#7Ow3E zZ{_&-LhUpD%JgrAd zZ2irTTKNl{yZ4VPMfdt@W7|Kp$5YrIUisupLy9lu7k~Ekul=V#|AQx>X0Y?SP`=F2!~ zRmz4?VJF_dTAw(Cd!;MP&X8wIhaDfB@Wh|-5Rb=s`$Bi?W5e@~qdus#T80yjN3Eip zn~6@3eXU!j%ML~(h@Aj%K##wC`9w7Y+x6UH&Jc=HBW8h1sE8szo)Uf$V+bI3a7!tsv zR02*-mt!#W#|mf@Gl7T}n3^WVRPrcQvF$C#=`d&FfQKp}o|hj?PjiOOa7J~RATCuD zMnprDsEN`A3Bf4bwOzn77Xr_*tRNTwlol$borAX*8?8WtO{n{^xHwwK- zYSf>BEdincZD+hO$qFivU=o&5VzUHe3K+>0jyQzHI7