trying push_pretrained_to_hf_hub util (#5)
Browse files- trying push_pretrained_to_hf_hub util (ab946f5fafe9204aa90bc5d4b23a88bb29c790e8)
- open_clip_config.json +1 -1
- open_clip_pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
open_clip_config.json
CHANGED
|
@@ -12,7 +12,7 @@
|
|
| 12 |
"hf_model_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract",
|
| 13 |
"hf_tokenizer_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract",
|
| 14 |
"proj": "mlp",
|
| 15 |
-
"pooler_type": "
|
| 16 |
"context_length": 256
|
| 17 |
}
|
| 18 |
},
|
|
|
|
| 12 |
"hf_model_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract",
|
| 13 |
"hf_tokenizer_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract",
|
| 14 |
"proj": "mlp",
|
| 15 |
+
"pooler_type": "cls_pooler",
|
| 16 |
"context_length": 256
|
| 17 |
}
|
| 18 |
},
|
open_clip_pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8792dba76fc3a96544a87bb0f76c82167b4ba509d57c08b98b9c9266f764598b
|
| 3 |
+
size 783734497
|
tokenizer_config.json
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
{
|
|
|
|
| 2 |
"cls_token": "[CLS]",
|
| 3 |
"do_basic_tokenize": true,
|
| 4 |
"do_lower_case": true,
|
|
@@ -7,7 +8,6 @@
|
|
| 7 |
"never_split": null,
|
| 8 |
"pad_token": "[PAD]",
|
| 9 |
"sep_token": "[SEP]",
|
| 10 |
-
"special_tokens_map_file": null,
|
| 11 |
"strip_accents": null,
|
| 12 |
"tokenize_chinese_chars": true,
|
| 13 |
"tokenizer_class": "BertTokenizer",
|
|
|
|
| 1 |
{
|
| 2 |
+
"clean_up_tokenization_spaces": true,
|
| 3 |
"cls_token": "[CLS]",
|
| 4 |
"do_basic_tokenize": true,
|
| 5 |
"do_lower_case": true,
|
|
|
|
| 8 |
"never_split": null,
|
| 9 |
"pad_token": "[PAD]",
|
| 10 |
"sep_token": "[SEP]",
|
|
|
|
| 11 |
"strip_accents": null,
|
| 12 |
"tokenize_chinese_chars": true,
|
| 13 |
"tokenizer_class": "BertTokenizer",
|