From e03a7685a3c610ef3791958da1c4d3d7666e9bd7 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 01:57:39 +0000 Subject: [PATCH 01/12] don't add space before single letter chars that don't have a merge --- src/transformers/models/t5/tokenization_t5.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index 400c956a3d5771..a2dd260aa7a726 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -51,6 +51,7 @@ "t5-11b": 512, } +SPIECE_UNDERLINE = "▁" class T5Tokenizer(PreTrainedTokenizer): """ @@ -296,7 +297,10 @@ def __setstate__(self, d): def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" - return self.sp_model.encode(text, out_type=str) + tokens = self.sp_model.encode(text, out_type=str) + if not text.startswith(' ') and tokens[0] == SPIECE_UNDERLINE: + tokens = tokens[1:] + return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" From 76d6ab3995ceb535e8b6ca88d3715f5028e22a2d Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 02:34:00 +0000 Subject: [PATCH 02/12] fix the fix --- src/transformers/models/t5/tokenization_t5.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index a2dd260aa7a726..8065cd536da3a0 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -24,6 +24,7 @@ import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import TextInput from ...utils import logging @@ -295,6 +296,11 @@ def __setstate__(self, d): self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) + def tokenize(self, text: TextInput, **kwargs) -> List[str]: + if not text.startswith(' '): + text = ' ' + text + return super().tokenize(text, *kwargs) + def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" tokens = self.sp_model.encode(text, out_type=str) From 5a7184bb8253ca4e8c7eea3a78b372a27384a637 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 02:35:35 +0000 Subject: [PATCH 03/12] fixup --- src/transformers/models/t5/tokenization_t5.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index 8065cd536da3a0..b052c9d346daac 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -52,7 +52,8 @@ "t5-11b": 512, } -SPIECE_UNDERLINE = "▁" +SPIECE_UNDERLINE = "▁" + class T5Tokenizer(PreTrainedTokenizer): """ @@ -297,14 +298,14 @@ def __setstate__(self, d): self.sp_model.Load(self.vocab_file) def tokenize(self, text: TextInput, **kwargs) -> List[str]: - if not text.startswith(' '): - text = ' ' + text - return super().tokenize(text, *kwargs) - + if not text.startswith(" "): + text = " " + text + return super().tokenize(text, **kwargs) + def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" - tokens = self.sp_model.encode(text, out_type=str) - if not text.startswith(' ') and tokens[0] == SPIECE_UNDERLINE: + tokens = self.sp_model.encode(text, out_type=str) + if not text.startswith(" ") and tokens[0] == SPIECE_UNDERLINE: tokens = tokens[1:] return tokens From baac7be98a43271008ee04fd9203fe9ce9b3ce87 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 02:55:40 +0000 Subject: [PATCH 04/12] add a test --- tests/models/t5/test_tokenization_t5.py | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/models/t5/test_tokenization_t5.py b/tests/models/t5/test_tokenization_t5.py index 16ff9f04de4325..def99595f02955 100644 --- a/tests/models/t5/test_tokenization_t5.py +++ b/tests/models/t5/test_tokenization_t5.py @@ -399,3 +399,30 @@ def test_get_sentinel_tokens_for_fasttokenizer(self): def test_get_sentinel_token_ids_for_fasttokenizer(self): tokenizer = T5TokenizerFast(SAMPLE_VOCAB, extra_ids=10) self.assertListEqual(sorted(tokenizer.get_sentinel_token_ids()), sorted(range(1000, 1010))) + + def test_encode_extra_ids(self): + tokenizer = T5Tokenizer(SAMPLE_VOCAB, extra_ids=0) + tokenizer.add_special_tokens({"additional_special_tokens": [""]}) + tokenizer._create_trie(tokenizer.all_special_tokens) + # TODO ArthurZ the above is necessary as addedTokens / intialization sucks. Trie is not correctly created + # So the extra ids are split.... + + input_ids = tokenizer.encode(". Hello") + self.assertEquals(input_ids, [7, 4, 156, 86, 20, 2]) + tokens = tokenizer.tokenize(". Hello") + self.assertEquals(tokens, ["▁", ".", "▁He", "ll", "o"]) + + input_ids = tokenizer.encode(" . Hello") + self.assertEquals(input_ids, [7, 4, 156, 86, 20, 2]) + tokens = tokenizer.tokenize(" . Hello") + self.assertEquals(tokens, ["▁", ".", "▁He", "ll", "o"]) + + input_ids = tokenizer.encode("Hello, I") + self.assertEquals(input_ids, [156, 86, 20, 3, 999, 8, 2]) + tokens = tokenizer.tokenize("Hello, I") + self.assertEquals(tokens, ["▁He", "ll", "o", ",", "", "▁I"]) + + input_ids = tokenizer.encode("Hello, ,") + self.assertEquals(input_ids, [156, 86, 20, 3, 999, 3, 2]) + tokens = tokenizer.encode("Hello, ,") + self.assertEquals(tokens, ["▁He", "ll", "o", ",", "", ","]) From 6e37601e1b8dd87d51bf21e027fd0c90281f5bfd Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 03:02:37 +0000 Subject: [PATCH 05/12] more testing --- tests/models/t5/test_tokenization_t5.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/models/t5/test_tokenization_t5.py b/tests/models/t5/test_tokenization_t5.py index def99595f02955..38ac1d405c1945 100644 --- a/tests/models/t5/test_tokenization_t5.py +++ b/tests/models/t5/test_tokenization_t5.py @@ -424,5 +424,10 @@ def test_encode_extra_ids(self): input_ids = tokenizer.encode("Hello, ,") self.assertEquals(input_ids, [156, 86, 20, 3, 999, 3, 2]) - tokens = tokenizer.encode("Hello, ,") + tokens = tokenizer.tokenize("Hello, ,") self.assertEquals(tokens, ["▁He", "ll", "o", ",", "", ","]) + + input_ids = tokenizer.encode(" ,") + self.assertEquals(input_ids, [999, 3, 2]) + tokens = tokenizer.tokenize(" ,") + self.assertEquals(tokens, ['', ',']) # spaces are eaten by rstrip / lstrip \ No newline at end of file From b9333287bc264419f079b362dd425ca10926b5cd Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 03:03:08 +0000 Subject: [PATCH 06/12] fixup --- tests/models/t5/test_tokenization_t5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/t5/test_tokenization_t5.py b/tests/models/t5/test_tokenization_t5.py index 38ac1d405c1945..afe0d32948a9ac 100644 --- a/tests/models/t5/test_tokenization_t5.py +++ b/tests/models/t5/test_tokenization_t5.py @@ -430,4 +430,4 @@ def test_encode_extra_ids(self): input_ids = tokenizer.encode(" ,") self.assertEquals(input_ids, [999, 3, 2]) tokens = tokenizer.tokenize(" ,") - self.assertEquals(tokens, ['', ',']) # spaces are eaten by rstrip / lstrip \ No newline at end of file + self.assertEquals(tokens, ["", ","]) # spaces are eaten by rstrip / lstrip From d0cbc495dff7072b622dba45afba95212705ab48 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 03:26:40 +0000 Subject: [PATCH 07/12] hack to make sure fast is also fixed --- src/transformers/convert_slow_tokenizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 082ccb47bdfafc..6c8494ef6bcaac 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -897,7 +897,7 @@ class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] - vocab += [(f"", 0.0) for i in range(num_extra_ids - 1, -1, -1)] + vocab += [(f"_", 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): From 50008ed2c47e04d5ae56b9fafbe7fad3dfb0a85b Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 04:36:44 +0000 Subject: [PATCH 08/12] update switch transformers test --- .../test_modeling_switch_transformers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index f8730d899329ff..e633533e1a313c 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -1149,7 +1149,7 @@ def test_small_generate(self): model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", torch_dtype=torch.bfloat16 ).eval() - tokenizer = AutoTokenizer.from_pretrained("t5-small") + tokenizer = AutoTokenizer.from_pretrained("t5-small", use_fast=False) model = model.to(torch_device) input_ids = tokenizer( @@ -1160,13 +1160,13 @@ def test_small_generate(self): self.assertEqual(output_str, "drink.") input_ids = tokenizer( - "A walks into a bar a orders a with pinch of .", + "A walks into a bar and orders a with pinch of .", return_tensors="pt", ).input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=False)[0] - EXPECTED_OUTPUT = " man beer a salt." + EXPECTED_OUTPUT = " man beer a whiskey." self.assertEqual(output_str, EXPECTED_OUTPUT) def test_small_batch_generate(self): @@ -1174,10 +1174,10 @@ def test_small_batch_generate(self): model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", torch_dtype=torch.bfloat16 ).eval() - tokenizer = AutoTokenizer.from_pretrained("t5-small") + tokenizer = AutoTokenizer.from_pretrained("t5-small", use_fast=False) inputs = [ - "A walks into a bar a orders a with pinch of ." + "A walks into a bar and orders a with pinch of ." ] * BATCH_SIZE encoded_input = tokenizer.batch_encode_plus(inputs, return_tensors="pt") From 5edf8633ad1de07971dba214eac34fbb837c8033 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Thu, 29 Jun 2023 06:07:29 +0000 Subject: [PATCH 09/12] revert convert slow --- src/transformers/convert_slow_tokenizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 6c8494ef6bcaac..082ccb47bdfafc 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -897,7 +897,7 @@ class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] - vocab += [(f"_", 0.0) for i in range(num_extra_ids - 1, -1, -1)] + vocab += [(f"", 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): From 17bda2cdf73f53e360fc97b43589aa2cf7140bbc Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 30 Jun 2023 03:54:51 +0200 Subject: [PATCH 10/12] Update src/transformers/models/t5/tokenization_t5.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/models/t5/tokenization_t5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index b052c9d346daac..45cef1c8e8eb8e 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -297,7 +297,7 @@ def __setstate__(self, d): self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) - def tokenize(self, text: TextInput, **kwargs) -> List[str]: + def tokenize(self, text: "TextInput", **kwargs) -> List[str]: if not text.startswith(" "): text = " " + text return super().tokenize(text, **kwargs) From 059999e1492a7d47b0a7c17a454af7f6ab39e8da Mon Sep 17 00:00:00 2001 From: Arthur Date: Fri, 30 Jun 2023 10:56:05 +0900 Subject: [PATCH 11/12] add typechecking --- src/transformers/models/t5/tokenization_t5.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index 45cef1c8e8eb8e..be6cbf245fd7b3 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -19,12 +19,13 @@ import re import warnings from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer -from ...tokenization_utils_base import TextInput +if TYPE_CHECKING: + from ...tokenization_utils_base import TextInput from ...utils import logging From 8d3f2a2f34a5c5dcbab56d15c2be817c6475c0f5 Mon Sep 17 00:00:00 2001 From: "arthur.zucker@gmail.com" Date: Fri, 30 Jun 2023 04:35:40 +0000 Subject: [PATCH 12/12] quality --- src/transformers/models/t5/tokenization_t5.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index be6cbf245fd7b3..70821333bdefde 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -19,11 +19,13 @@ import re import warnings from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer + + if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging