Skip to content

Commit

Permalink
Fix issue avoid-missing-comma found at https://codereview.doctor (#1…
Browse files Browse the repository at this point in the history
  • Loading branch information
code-review-doctor authored Apr 14, 2022
1 parent de8b06f commit 1b7de41
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions tests/bert_japanese/test_tokenization_bert_japanese.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def test_mecab_tokenizer_no_normalize(self):
)

def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにけは", "こん", "にけは" "ばんは", "##こん", "##にけは", "##ばんは"]
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにけは", "こん", "にけは", "ばんは", "##こん", "##にけは", "##ばんは"]

vocab = {}
for (i, token) in enumerate(vocab_tokens):
Expand Down Expand Up @@ -246,7 +246,7 @@ def test_full_tokenizer(self):
)

def test_character_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "γ‚“", "に", "け", "は", "ば", "δΈ–", "η•Œ" "、", "。"]
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "γ‚“", "に", "け", "は", "ば", "δΈ–", "η•Œ", "、", "。"]

vocab = {}
for (i, token) in enumerate(vocab_tokens):
Expand Down

0 comments on commit 1b7de41

Please sign in to comment.