Upload tokenizer_config.json
Browse files- tokenizer_config.json +9 -2
tokenizer_config.json
CHANGED
@@ -48,13 +48,20 @@
|
|
48 |
"extra_special_tokens": {},
|
49 |
"full_tokenizer_file": null,
|
50 |
"mask_token": "[MASK]",
|
|
|
51 |
"model_max_length": 512,
|
52 |
"never_split": null,
|
|
|
53 |
"pad_token": "[PAD]",
|
|
|
|
|
|
|
54 |
"sep_token": "[SEP]",
|
|
|
55 |
"strip_accents": null,
|
56 |
"tokenize_chinese_chars": true,
|
57 |
"tokenizer_class": "BertTokenizer",
|
58 |
-
"
|
59 |
-
"
|
|
|
60 |
}
|
|
|
48 |
"extra_special_tokens": {},
|
49 |
"full_tokenizer_file": null,
|
50 |
"mask_token": "[MASK]",
|
51 |
+
"max_length": 128,
|
52 |
"model_max_length": 512,
|
53 |
"never_split": null,
|
54 |
+
"pad_to_multiple_of": null,
|
55 |
"pad_token": "[PAD]",
|
56 |
+
"pad_token_type_id": 0,
|
57 |
+
"padding_side": "right",
|
58 |
+
"pipeline_tag": "text-classification",
|
59 |
"sep_token": "[SEP]",
|
60 |
+
"stride": 0,
|
61 |
"strip_accents": null,
|
62 |
"tokenize_chinese_chars": true,
|
63 |
"tokenizer_class": "BertTokenizer",
|
64 |
+
"truncation_side": "right",
|
65 |
+
"truncation_strategy": "longest_first",
|
66 |
+
"unk_token": "[UNK]"
|
67 |
}
|