Upload tokenizer
Browse files- README.md +3 -3
- tokenizer_config.json +1 -1
- vocab.txt +0 -0
README.md
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
---
|
|
|
2 |
library_name: transformers
|
3 |
license: apache-2.0
|
4 |
-
base_model: google-bert/bert-base-multilingual-cased
|
5 |
-
tags:
|
6 |
-
- generated_from_trainer
|
7 |
metrics:
|
8 |
- accuracy
|
9 |
- precision
|
10 |
- recall
|
11 |
- f1
|
|
|
|
|
12 |
model-index:
|
13 |
- name: bert-f1-durga-muhammad
|
14 |
results: []
|
|
|
1 |
---
|
2 |
+
base_model: google-bert/bert-base-multilingual-cased
|
3 |
library_name: transformers
|
4 |
license: apache-2.0
|
|
|
|
|
|
|
5 |
metrics:
|
6 |
- accuracy
|
7 |
- precision
|
8 |
- recall
|
9 |
- f1
|
10 |
+
tags:
|
11 |
+
- generated_from_trainer
|
12 |
model-index:
|
13 |
- name: bert-f1-durga-muhammad
|
14 |
results: []
|
tokenizer_config.json
CHANGED
@@ -44,7 +44,7 @@
|
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
46 |
"do_basic_tokenize": true,
|
47 |
-
"do_lower_case":
|
48 |
"mask_token": "[MASK]",
|
49 |
"model_max_length": 512,
|
50 |
"never_split": null,
|
|
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
46 |
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": false,
|
48 |
"mask_token": "[MASK]",
|
49 |
"model_max_length": 512,
|
50 |
"never_split": null,
|
vocab.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|