Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +11 -0
- config.json +228 -0
- generation_config.json +4 -0
- lora_weights.pth +3 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +0 -0
- qa_metrics.txt +22 -0
- qa_results.csv +0 -0
- runs/Apr25_07-43-39_amax/events.out.tfevents.1745567038.amax.2662062.0 +3 -0
- runs/Apr25_07-47-50_amax/events.out.tfevents.1745567287.amax.2667492.0 +3 -0
- runs/Apr25_07-49-47_amax/events.out.tfevents.1745567403.amax.2673802.0 +3 -0
- runs/Apr25_07-53-41_amax/events.out.tfevents.1745567639.amax.2680006.0 +3 -0
- runs/Apr25_07-57-17_amax/events.out.tfevents.1745567854.amax.2687397.0 +3 -0
- runs/Apr26_04-08-11_amax/events.out.tfevents.1745640508.amax.784562.0 +3 -0
- special_tokens_map.json +63 -0
- tokenization_internlm3.py +294 -0
- tokenizer.model +3 -0
- tokenizer_config.json +330 -0
- training_args.bin +3 -0
- training_log.txt +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
training_log.txt filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</box>": 128141,
|
3 |
+
"</img>": 128134,
|
4 |
+
"</quad>": 128137,
|
5 |
+
"</ref>": 128139,
|
6 |
+
"<IMG_CONTEXT>": 128135,
|
7 |
+
"<box>": 128140,
|
8 |
+
"<img>": 128133,
|
9 |
+
"<quad>": 128136,
|
10 |
+
"<ref>": 128138
|
11 |
+
}
|
config.json
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": null,
|
3 |
+
"_name_or_path": "/home/wangjiarui/InternVL/internvl_chat/InternVL3-9B",
|
4 |
+
"architectures": [
|
5 |
+
"InternVLChatModel"
|
6 |
+
],
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
|
9 |
+
"AutoModel": "modeling_internvl_chat.InternVLChatModel",
|
10 |
+
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
|
11 |
+
},
|
12 |
+
"downsample_ratio": 0.5,
|
13 |
+
"dynamic_image_size": true,
|
14 |
+
"force_image_size": 448,
|
15 |
+
"hidden_size": 4096,
|
16 |
+
"image_fold": null,
|
17 |
+
"llm_config": {
|
18 |
+
"_attn_implementation_autoset": true,
|
19 |
+
"_name_or_path": "/mnt/petrelfs/share_data/wangweiyun/share_ckpt/hf_home/internlm2-chat-7b",
|
20 |
+
"add_cross_attention": false,
|
21 |
+
"architectures": [
|
22 |
+
"InternLM2ForCausalLM"
|
23 |
+
],
|
24 |
+
"attn_implementation": "flash_attention_2",
|
25 |
+
"auto_map": {
|
26 |
+
"AutoConfig": "configuration_internlm2.InternLM2Config",
|
27 |
+
"AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
|
28 |
+
"AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
|
29 |
+
},
|
30 |
+
"bad_words_ids": null,
|
31 |
+
"begin_suppress_tokens": null,
|
32 |
+
"bias": false,
|
33 |
+
"bos_token_id": 1,
|
34 |
+
"chunk_size_feed_forward": 0,
|
35 |
+
"cross_attention_hidden_size": null,
|
36 |
+
"decoder_start_token_id": null,
|
37 |
+
"diversity_penalty": 0.0,
|
38 |
+
"do_sample": false,
|
39 |
+
"early_stopping": false,
|
40 |
+
"encoder_no_repeat_ngram_size": 0,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"exponential_decay_length_penalty": null,
|
43 |
+
"finetuning_task": null,
|
44 |
+
"forced_bos_token_id": null,
|
45 |
+
"forced_eos_token_id": null,
|
46 |
+
"hidden_act": "silu",
|
47 |
+
"hidden_size": 4096,
|
48 |
+
"id2label": {
|
49 |
+
"0": "LABEL_0",
|
50 |
+
"1": "LABEL_1"
|
51 |
+
},
|
52 |
+
"initializer_range": 0.02,
|
53 |
+
"intermediate_size": 10240,
|
54 |
+
"is_decoder": false,
|
55 |
+
"is_encoder_decoder": false,
|
56 |
+
"label2id": {
|
57 |
+
"LABEL_0": 0,
|
58 |
+
"LABEL_1": 1
|
59 |
+
},
|
60 |
+
"length_penalty": 1.0,
|
61 |
+
"max_length": 20,
|
62 |
+
"max_position_embeddings": 32768,
|
63 |
+
"min_length": 0,
|
64 |
+
"model_type": "internlm2",
|
65 |
+
"moe_config": null,
|
66 |
+
"no_repeat_ngram_size": 0,
|
67 |
+
"num_attention_heads": 32,
|
68 |
+
"num_beam_groups": 1,
|
69 |
+
"num_beams": 1,
|
70 |
+
"num_hidden_layers": 48,
|
71 |
+
"num_key_value_heads": 2,
|
72 |
+
"num_return_sequences": 1,
|
73 |
+
"output_attentions": false,
|
74 |
+
"output_hidden_states": false,
|
75 |
+
"output_scores": false,
|
76 |
+
"pad_token_id": 2,
|
77 |
+
"prefix": null,
|
78 |
+
"pretraining_tp": 1,
|
79 |
+
"problem_type": null,
|
80 |
+
"pruned_heads": {},
|
81 |
+
"remove_invalid_values": false,
|
82 |
+
"repetition_penalty": 1.0,
|
83 |
+
"return_dict": true,
|
84 |
+
"return_dict_in_generate": false,
|
85 |
+
"rms_norm_eps": 1e-05,
|
86 |
+
"rope_scaling": {
|
87 |
+
"factor": 2.0,
|
88 |
+
"type": "dynamic"
|
89 |
+
},
|
90 |
+
"rope_theta": 50000000,
|
91 |
+
"sep_token_id": null,
|
92 |
+
"suppress_tokens": null,
|
93 |
+
"task_specific_params": null,
|
94 |
+
"temperature": 1.0,
|
95 |
+
"tf_legacy_loss": false,
|
96 |
+
"tie_encoder_decoder": false,
|
97 |
+
"tie_word_embeddings": false,
|
98 |
+
"tokenizer_class": null,
|
99 |
+
"top_k": 50,
|
100 |
+
"top_p": 1.0,
|
101 |
+
"torch_dtype": "bfloat16",
|
102 |
+
"torchscript": false,
|
103 |
+
"transformers_version": "4.48.3",
|
104 |
+
"typical_p": 1.0,
|
105 |
+
"use_bfloat16": false,
|
106 |
+
"use_cache": false,
|
107 |
+
"vocab_size": 128142
|
108 |
+
},
|
109 |
+
"max_dynamic_patch": 6,
|
110 |
+
"min_dynamic_patch": 1,
|
111 |
+
"model_type": "internvl_chat",
|
112 |
+
"pad2square": false,
|
113 |
+
"ps_version": "v2",
|
114 |
+
"select_layer": -1,
|
115 |
+
"system_message": null,
|
116 |
+
"template": "internlm2-chat",
|
117 |
+
"tie_word_embeddings": false,
|
118 |
+
"torch_dtype": "bfloat16",
|
119 |
+
"transformers_version": null,
|
120 |
+
"use_backbone_lora": 16,
|
121 |
+
"use_img_start_end_token": true,
|
122 |
+
"use_llm_lora": 16,
|
123 |
+
"use_thumbnail": true,
|
124 |
+
"vision_config": {
|
125 |
+
"_attn_implementation_autoset": true,
|
126 |
+
"_name_or_path": "pretrained/intern_vit_6b_448px_v1_2/",
|
127 |
+
"add_cross_attention": false,
|
128 |
+
"architectures": [
|
129 |
+
"InternVisionModel"
|
130 |
+
],
|
131 |
+
"attention_dropout": 0.0,
|
132 |
+
"auto_map": {
|
133 |
+
"AutoConfig": "configuration_intern_vit.InternVisionConfig",
|
134 |
+
"AutoModel": "modeling_intern_vit.InternVisionModel"
|
135 |
+
},
|
136 |
+
"bad_words_ids": null,
|
137 |
+
"begin_suppress_tokens": null,
|
138 |
+
"bos_token_id": null,
|
139 |
+
"capacity_factor": 1.2,
|
140 |
+
"chunk_size_feed_forward": 0,
|
141 |
+
"cross_attention_hidden_size": null,
|
142 |
+
"decoder_start_token_id": null,
|
143 |
+
"diversity_penalty": 0.0,
|
144 |
+
"do_sample": false,
|
145 |
+
"drop_path_rate": 0.1,
|
146 |
+
"dropout": 0.0,
|
147 |
+
"early_stopping": false,
|
148 |
+
"encoder_no_repeat_ngram_size": 0,
|
149 |
+
"eos_token_id": null,
|
150 |
+
"eval_capacity_factor": 1.4,
|
151 |
+
"exponential_decay_length_penalty": null,
|
152 |
+
"finetuning_task": null,
|
153 |
+
"forced_bos_token_id": null,
|
154 |
+
"forced_eos_token_id": null,
|
155 |
+
"hidden_act": "gelu",
|
156 |
+
"hidden_size": 1024,
|
157 |
+
"id2label": {
|
158 |
+
"0": "LABEL_0",
|
159 |
+
"1": "LABEL_1"
|
160 |
+
},
|
161 |
+
"image_size": 448,
|
162 |
+
"initializer_factor": 0.1,
|
163 |
+
"initializer_range": 1e-10,
|
164 |
+
"intermediate_size": 4096,
|
165 |
+
"is_decoder": false,
|
166 |
+
"is_encoder_decoder": false,
|
167 |
+
"label2id": {
|
168 |
+
"LABEL_0": 0,
|
169 |
+
"LABEL_1": 1
|
170 |
+
},
|
171 |
+
"laux_allreduce": "all_nodes",
|
172 |
+
"layer_norm_eps": 1e-06,
|
173 |
+
"length_penalty": 1.0,
|
174 |
+
"max_length": 20,
|
175 |
+
"min_length": 0,
|
176 |
+
"model_type": "intern_vit_6b",
|
177 |
+
"moe_coeff_ratio": 0.5,
|
178 |
+
"moe_intermediate_size": 768,
|
179 |
+
"moe_output_scale": 4.0,
|
180 |
+
"no_repeat_ngram_size": 0,
|
181 |
+
"noisy_gate_policy": "RSample_before",
|
182 |
+
"norm_type": "layer_norm",
|
183 |
+
"num_attention_heads": 16,
|
184 |
+
"num_beam_groups": 1,
|
185 |
+
"num_beams": 1,
|
186 |
+
"num_channels": 3,
|
187 |
+
"num_experts": 8,
|
188 |
+
"num_hidden_layers": 24,
|
189 |
+
"num_return_sequences": 1,
|
190 |
+
"num_routed_experts": 4,
|
191 |
+
"num_shared_experts": 4,
|
192 |
+
"output_attentions": false,
|
193 |
+
"output_hidden_states": false,
|
194 |
+
"output_scores": false,
|
195 |
+
"pad_token_id": null,
|
196 |
+
"patch_size": 14,
|
197 |
+
"prefix": null,
|
198 |
+
"problem_type": null,
|
199 |
+
"pruned_heads": {},
|
200 |
+
"qk_normalization": false,
|
201 |
+
"qkv_bias": true,
|
202 |
+
"remove_invalid_values": false,
|
203 |
+
"repetition_penalty": 1.0,
|
204 |
+
"return_dict": true,
|
205 |
+
"return_dict_in_generate": false,
|
206 |
+
"sep_token_id": null,
|
207 |
+
"shared_expert_intermediate_size": 3072,
|
208 |
+
"suppress_tokens": null,
|
209 |
+
"task_specific_params": null,
|
210 |
+
"temperature": 1.0,
|
211 |
+
"tf_legacy_loss": false,
|
212 |
+
"tie_encoder_decoder": false,
|
213 |
+
"tie_word_embeddings": true,
|
214 |
+
"tokenizer_class": null,
|
215 |
+
"top_k": 50,
|
216 |
+
"top_p": 1.0,
|
217 |
+
"torch_dtype": "bfloat16",
|
218 |
+
"torchscript": false,
|
219 |
+
"transformers_version": "4.48.3",
|
220 |
+
"typical_p": 1.0,
|
221 |
+
"use_bfloat16": false,
|
222 |
+
"use_flash_attn": true,
|
223 |
+
"use_moe": false,
|
224 |
+
"use_residual": true,
|
225 |
+
"use_rts": false,
|
226 |
+
"use_weighted_residual": false
|
227 |
+
}
|
228 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"transformers_version": "4.48.3"
|
4 |
+
}
|
lora_weights.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28d09f840f109678f6540436c33a7838835bc0609f23f3a3853b25ca4dfac478
|
3 |
+
size 104842214
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91681b1654e3b957fde348fa18b3b48f767744a35aa299f630613922d3a5206f
|
3 |
+
size 4991243040
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ac4fd32b5f80a27f40ee9598ccc8ec90146afccc5f5d6dbb98e26ab312abc3d
|
3 |
+
size 4957806440
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68e9b19b9727927d85c26e208afbe7b1d71c58d5d135234a3bc74deb55ed6acb
|
3 |
+
size 4957806440
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67554b480c2cad40263f0b785b2375776175e92c0b1553cf75af0534de982e84
|
3 |
+
size 3586334288
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
qa_metrics.txt
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Accuracy: 0.0
|
2 |
+
Accuracy: 0.0
|
3 |
+
Accuracy: 0.0
|
4 |
+
Accuracy: 0.0
|
5 |
+
Accuracy: 0.0
|
6 |
+
Accuracy: 0.0
|
7 |
+
Accuracy: 0.3333333333333333
|
8 |
+
Accuracy: 0.3333333333333333
|
9 |
+
Accuracy: 0.3333333333333333
|
10 |
+
Accuracy: 0.3333333333333333
|
11 |
+
Accuracy: 0.3333333333333333
|
12 |
+
Accuracy: 0.3333333333333333
|
13 |
+
Accuracy: 0.3333333333333333
|
14 |
+
Accuracy: 0.3333333333333333
|
15 |
+
Accuracy: 0.3333333333333333
|
16 |
+
Accuracy: 0.3333333333333333
|
17 |
+
Accuracy: 0.3333333333333333
|
18 |
+
Accuracy: 0.3333333333333333
|
19 |
+
Accuracy: 0.7871111111111111
|
20 |
+
Accuracy: 0.7871111111111111
|
21 |
+
Accuracy: 0.7871111111111111
|
22 |
+
Accuracy: 0.787
|
qa_results.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
runs/Apr25_07-43-39_amax/events.out.tfevents.1745567038.amax.2662062.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4641219cfae35b8813aa05a158de3ba4672bcb0c3acb88d205e7d19a11dba720
|
3 |
+
size 15602
|
runs/Apr25_07-47-50_amax/events.out.tfevents.1745567287.amax.2667492.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49981fc6f5f4d0a61b8824f8748e21c5cd7a411349a303c28347c3ca116957d1
|
3 |
+
size 11873
|
runs/Apr25_07-49-47_amax/events.out.tfevents.1745567403.amax.2673802.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20c033dbe595aef7458d5eb7010f1e5f15bf738ccf07741dc3aed43f096a7902
|
3 |
+
size 11873
|
runs/Apr25_07-53-41_amax/events.out.tfevents.1745567639.amax.2680006.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af206399a5e1afe2cb0f6e0fdd58d24521b5f671de1af7c5b334e2b9cd0cc2b7
|
3 |
+
size 12287
|
runs/Apr25_07-57-17_amax/events.out.tfevents.1745567854.amax.2687397.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:680876790ee8c170feb4efd07457fb0fa6ae64af5a81de77ee2a6da0df50f7b1
|
3 |
+
size 1771968
|
runs/Apr26_04-08-11_amax/events.out.tfevents.1745640508.amax.784562.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a91759dba47ee2d943ed9f4182496136b465d154bf0647089290dd7f621f6a5
|
3 |
+
size 880915
|
special_tokens_map.json
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|action_start|>",
|
6 |
+
"<|action_end|>",
|
7 |
+
"<|interpreter|>",
|
8 |
+
"<|plugin|>",
|
9 |
+
"<restate>",
|
10 |
+
"</restate>",
|
11 |
+
"<planning>",
|
12 |
+
"</planning>",
|
13 |
+
"<recollect>",
|
14 |
+
"</recollect>",
|
15 |
+
"<execution>",
|
16 |
+
"</execution>",
|
17 |
+
"<review>",
|
18 |
+
"</review>",
|
19 |
+
"<summarize>",
|
20 |
+
"</summarize>",
|
21 |
+
"<retry>",
|
22 |
+
"</retry>",
|
23 |
+
"<conclude>",
|
24 |
+
"</conclude>",
|
25 |
+
"<img>",
|
26 |
+
"</img>",
|
27 |
+
"<IMG_CONTEXT>",
|
28 |
+
"<quad>",
|
29 |
+
"</quad>",
|
30 |
+
"<ref>",
|
31 |
+
"</ref>",
|
32 |
+
"<box>",
|
33 |
+
"</box>"
|
34 |
+
],
|
35 |
+
"bos_token": {
|
36 |
+
"content": "<s>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false
|
41 |
+
},
|
42 |
+
"eos_token": {
|
43 |
+
"content": "<|im_end|>",
|
44 |
+
"lstrip": false,
|
45 |
+
"normalized": false,
|
46 |
+
"rstrip": false,
|
47 |
+
"single_word": false
|
48 |
+
},
|
49 |
+
"pad_token": {
|
50 |
+
"content": "</s>",
|
51 |
+
"lstrip": false,
|
52 |
+
"normalized": false,
|
53 |
+
"rstrip": false,
|
54 |
+
"single_word": false
|
55 |
+
},
|
56 |
+
"unk_token": {
|
57 |
+
"content": "<unk>",
|
58 |
+
"lstrip": false,
|
59 |
+
"normalized": false,
|
60 |
+
"rstrip": false,
|
61 |
+
"single_word": false
|
62 |
+
}
|
63 |
+
}
|
tokenization_internlm3.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from shutil import copyfile
|
3 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
4 |
+
|
5 |
+
import sentencepiece as spm
|
6 |
+
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
7 |
+
from transformers.utils import logging
|
8 |
+
|
9 |
+
if TYPE_CHECKING:
|
10 |
+
from transformers.tokenization_utils_base import TextInput
|
11 |
+
|
12 |
+
logger = logging.get_logger(__name__)
|
13 |
+
|
14 |
+
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
|
15 |
+
|
16 |
+
SPIECE_UNDERLINE = "▁"
|
17 |
+
|
18 |
+
|
19 |
+
class InternLM3Tokenizer(PreTrainedTokenizer):
|
20 |
+
"""
|
21 |
+
Construct a InternLM3 tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
|
22 |
+
no padding token in the original model.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
vocab_file (`str`):
|
26 |
+
Path to the vocabulary file.
|
27 |
+
unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
|
28 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
29 |
+
token instead.
|
30 |
+
bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
|
31 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
32 |
+
eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
|
33 |
+
The end of sequence token.
|
34 |
+
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
|
35 |
+
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
|
36 |
+
attention mechanisms or loss computation.
|
37 |
+
sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
|
38 |
+
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
|
39 |
+
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
|
40 |
+
to set:
|
41 |
+
|
42 |
+
- `enable_sampling`: Enable subword regularization.
|
43 |
+
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
|
44 |
+
|
45 |
+
- `nbest_size = {0,1}`: No sampling is performed.
|
46 |
+
- `nbest_size > 1`: samples from the nbest_size results.
|
47 |
+
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
|
48 |
+
using forward-filtering-and-backward-sampling algorithm.
|
49 |
+
|
50 |
+
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
|
51 |
+
BPE-dropout.
|
52 |
+
|
53 |
+
add_bos_token (`bool`, *optional*, defaults to `True`):
|
54 |
+
Whether or not to add an `bos_token` at the start of sequences.
|
55 |
+
add_eos_token (`bool`, *optional*, defaults to `False`):
|
56 |
+
Whether or not to add an `eos_token` at the end of sequences.
|
57 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
58 |
+
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
|
59 |
+
extra spaces.
|
60 |
+
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
|
61 |
+
Whether or not the default system prompt for InternLM3 should be used.
|
62 |
+
spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
|
63 |
+
Whether or not to add spaces between special tokens.
|
64 |
+
spaces_for_interleaved_special_tokens (`bool`, *optional*, defaults to `False`):
|
65 |
+
Whether or not to add spaces between special tokens that are interleaved with normal tokens.
|
66 |
+
add_prefix_space (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
68 |
+
other word. Again, this should be set with `from_slow=True` to make sure it's taken into account.
|
69 |
+
"""
|
70 |
+
|
71 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
72 |
+
model_input_names = ["input_ids", "attention_mask"]
|
73 |
+
|
74 |
+
def __init__(
|
75 |
+
self,
|
76 |
+
vocab_file,
|
77 |
+
unk_token="<unk>",
|
78 |
+
bos_token="<s>",
|
79 |
+
eos_token="</s>",
|
80 |
+
pad_token=None,
|
81 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
82 |
+
add_bos_token=True,
|
83 |
+
add_eos_token=False,
|
84 |
+
clean_up_tokenization_spaces=False,
|
85 |
+
use_default_system_prompt=False,
|
86 |
+
spaces_between_special_tokens=False,
|
87 |
+
spaces_for_interleaved_special_tokens=False,
|
88 |
+
add_prefix_space=True,
|
89 |
+
**kwargs,
|
90 |
+
):
|
91 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
92 |
+
bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
|
93 |
+
eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
|
94 |
+
unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
|
95 |
+
pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
|
96 |
+
|
97 |
+
self.vocab_file = vocab_file
|
98 |
+
self.add_bos_token = add_bos_token
|
99 |
+
self.add_eos_token = add_eos_token
|
100 |
+
self.use_default_system_prompt = use_default_system_prompt
|
101 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
102 |
+
self.sp_model.Load(vocab_file)
|
103 |
+
self.add_prefix_space = add_prefix_space
|
104 |
+
self.spaces_for_interleaved_special_tokens = spaces_for_interleaved_special_tokens
|
105 |
+
|
106 |
+
vocab_size = self.sp_model.get_piece_size()
|
107 |
+
self.decoder = {i: self.sp_model.id_to_piece(i) for i in range(vocab_size)}
|
108 |
+
|
109 |
+
super().__init__(
|
110 |
+
bos_token=bos_token,
|
111 |
+
eos_token=eos_token,
|
112 |
+
unk_token=unk_token,
|
113 |
+
pad_token=pad_token,
|
114 |
+
add_bos_token=add_bos_token,
|
115 |
+
add_eos_token=add_eos_token,
|
116 |
+
sp_model_kwargs=sp_model_kwargs,
|
117 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
118 |
+
use_default_system_prompt=use_default_system_prompt,
|
119 |
+
spaces_between_special_tokens=spaces_between_special_tokens,
|
120 |
+
add_prefix_space=add_prefix_space,
|
121 |
+
**kwargs,
|
122 |
+
)
|
123 |
+
|
124 |
+
def __getstate__(self):
|
125 |
+
state = self.__dict__.copy()
|
126 |
+
state["sp_model"] = None
|
127 |
+
state["sp_model_proto"] = self.sp_model.serialized_model_proto()
|
128 |
+
return state
|
129 |
+
|
130 |
+
def __setstate__(self, d):
|
131 |
+
self.__dict__.update(d)
|
132 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
133 |
+
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
|
134 |
+
|
135 |
+
@property
|
136 |
+
def vocab_size(self):
|
137 |
+
"""Returns vocab size"""
|
138 |
+
return self.sp_model.get_piece_size()
|
139 |
+
|
140 |
+
def get_vocab(self):
|
141 |
+
"""Returns vocab as a dict"""
|
142 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
143 |
+
vocab.update(self.added_tokens_encoder)
|
144 |
+
return vocab
|
145 |
+
|
146 |
+
def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
|
147 |
+
"""
|
148 |
+
Args:
|
149 |
+
text: TextInput
|
150 |
+
Simply calls PreTrainedTokenizer's method
|
151 |
+
"""
|
152 |
+
return super().tokenize(text, **kwargs)
|
153 |
+
|
154 |
+
def _tokenize(self, text, **kwargs):
|
155 |
+
"""
|
156 |
+
Args:
|
157 |
+
text: TextInput
|
158 |
+
Returns a tokenized string. The Gemma tokenizer never adds a prefix space.
|
159 |
+
"""
|
160 |
+
return self.sp_model.encode(text, out_type=str)
|
161 |
+
|
162 |
+
def _convert_token_to_id(self, token):
|
163 |
+
"""Converts a token (str) in an id using the vocab."""
|
164 |
+
return self.sp_model.piece_to_id(token)
|
165 |
+
|
166 |
+
def _convert_id_to_token(self, index):
|
167 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
168 |
+
return self.decoder.get(index, "")
|
169 |
+
|
170 |
+
def convert_tokens_to_string(self, tokens):
|
171 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
172 |
+
# since we manually add the prefix space, we have to remove it when decoding
|
173 |
+
if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
|
174 |
+
tokens[0] = tokens[0][1:]
|
175 |
+
|
176 |
+
current_sub_tokens = []
|
177 |
+
out_string = ""
|
178 |
+
prev_is_special = False
|
179 |
+
for i, token in enumerate(tokens):
|
180 |
+
# make sure that special tokens are not decoded using sentencepiece model
|
181 |
+
if token in self.all_special_tokens:
|
182 |
+
if not prev_is_special and i != 0 and self.spaces_for_interleaved_special_tokens:
|
183 |
+
out_string += " "
|
184 |
+
out_string += self.sp_model.decode(current_sub_tokens) + token
|
185 |
+
prev_is_special = True
|
186 |
+
current_sub_tokens = []
|
187 |
+
else:
|
188 |
+
if (
|
189 |
+
prev_is_special
|
190 |
+
and i == 1
|
191 |
+
and self.add_prefix_space
|
192 |
+
and not token.startswith(SPIECE_UNDERLINE)
|
193 |
+
and self.spaces_for_interleaved_special_tokens
|
194 |
+
):
|
195 |
+
out_string += " "
|
196 |
+
current_sub_tokens.append(token)
|
197 |
+
prev_is_special = False
|
198 |
+
out_string += self.sp_model.decode(current_sub_tokens)
|
199 |
+
return out_string
|
200 |
+
|
201 |
+
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
202 |
+
"""
|
203 |
+
Save the vocabulary and special tokens file to a directory.
|
204 |
+
|
205 |
+
Args:
|
206 |
+
save_directory (`str`):
|
207 |
+
The directory in which to save the vocabulary.
|
208 |
+
|
209 |
+
Returns:
|
210 |
+
`Tuple(str)`: Paths to the files saved.
|
211 |
+
"""
|
212 |
+
if not os.path.isdir(save_directory):
|
213 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
214 |
+
return
|
215 |
+
out_vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
|
216 |
+
|
217 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
218 |
+
copyfile(self.vocab_file, out_vocab_file)
|
219 |
+
elif not os.path.isfile(self.vocab_file):
|
220 |
+
with open(out_vocab_file, "wb") as fi:
|
221 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
222 |
+
fi.write(content_spiece_model)
|
223 |
+
|
224 |
+
return (out_vocab_file,)
|
225 |
+
|
226 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
227 |
+
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
228 |
+
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
229 |
+
|
230 |
+
output = bos_token_id + token_ids_0 + eos_token_id
|
231 |
+
|
232 |
+
if token_ids_1 is not None:
|
233 |
+
output = output + bos_token_id + token_ids_1 + eos_token_id
|
234 |
+
|
235 |
+
return output
|
236 |
+
|
237 |
+
def get_special_tokens_mask(
|
238 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
239 |
+
) -> List[int]:
|
240 |
+
"""
|
241 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
242 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
243 |
+
|
244 |
+
Args:
|
245 |
+
token_ids_0 (`List[int]`):
|
246 |
+
List of IDs.
|
247 |
+
token_ids_1 (`List[int]`, *optional*):
|
248 |
+
Optional second list of IDs for sequence pairs.
|
249 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
250 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
251 |
+
|
252 |
+
Returns:
|
253 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
254 |
+
"""
|
255 |
+
if already_has_special_tokens:
|
256 |
+
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
|
257 |
+
|
258 |
+
bos_token_id = [1] if self.add_bos_token else []
|
259 |
+
eos_token_id = [1] if self.add_eos_token else []
|
260 |
+
|
261 |
+
if token_ids_1 is None:
|
262 |
+
return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
|
263 |
+
return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id
|
264 |
+
|
265 |
+
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]:
|
266 |
+
"""
|
267 |
+
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
|
268 |
+
sequence pair mask has the following format:
|
269 |
+
|
270 |
+
```
|
271 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
272 |
+
| first sequence | second sequence |
|
273 |
+
```
|
274 |
+
|
275 |
+
if token_ids_1 is None, only returns the first portion of the mask (0s).
|
276 |
+
|
277 |
+
Args:
|
278 |
+
token_ids_0 (`List[int]`):
|
279 |
+
List of ids.
|
280 |
+
token_ids_1 (`List[int]`, *optional*):
|
281 |
+
Optional second list of IDs for sequence pairs.
|
282 |
+
|
283 |
+
Returns:
|
284 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
285 |
+
"""
|
286 |
+
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
287 |
+
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
288 |
+
|
289 |
+
output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
|
290 |
+
|
291 |
+
if token_ids_1 is not None:
|
292 |
+
output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
|
293 |
+
|
294 |
+
return output
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcacff3229854f5103ee7a85473a30ca9a8b3a68f3aae9b7479574b23ac2256b
|
3 |
+
size 2475075
|
tokenizer_config.json
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": true,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"128111": {
|
31 |
+
"content": "<restate>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"128112": {
|
39 |
+
"content": "</restate>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": false,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"128113": {
|
47 |
+
"content": "<planning>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": false,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"128114": {
|
55 |
+
"content": "</planning>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": false,
|
59 |
+
"single_word": false,
|
60 |
+
"special": true
|
61 |
+
},
|
62 |
+
"128115": {
|
63 |
+
"content": "<recollect>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": false,
|
67 |
+
"single_word": false,
|
68 |
+
"special": true
|
69 |
+
},
|
70 |
+
"128116": {
|
71 |
+
"content": "</recollect>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": false,
|
75 |
+
"single_word": false,
|
76 |
+
"special": true
|
77 |
+
},
|
78 |
+
"128117": {
|
79 |
+
"content": "<execution>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": false,
|
83 |
+
"single_word": false,
|
84 |
+
"special": true
|
85 |
+
},
|
86 |
+
"128118": {
|
87 |
+
"content": "</execution>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": false,
|
91 |
+
"single_word": false,
|
92 |
+
"special": true
|
93 |
+
},
|
94 |
+
"128119": {
|
95 |
+
"content": "<review>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": false,
|
99 |
+
"single_word": false,
|
100 |
+
"special": true
|
101 |
+
},
|
102 |
+
"128120": {
|
103 |
+
"content": "</review>",
|
104 |
+
"lstrip": false,
|
105 |
+
"normalized": false,
|
106 |
+
"rstrip": false,
|
107 |
+
"single_word": false,
|
108 |
+
"special": true
|
109 |
+
},
|
110 |
+
"128121": {
|
111 |
+
"content": "<summarize>",
|
112 |
+
"lstrip": false,
|
113 |
+
"normalized": false,
|
114 |
+
"rstrip": false,
|
115 |
+
"single_word": false,
|
116 |
+
"special": true
|
117 |
+
},
|
118 |
+
"128122": {
|
119 |
+
"content": "</summarize>",
|
120 |
+
"lstrip": false,
|
121 |
+
"normalized": false,
|
122 |
+
"rstrip": false,
|
123 |
+
"single_word": false,
|
124 |
+
"special": true
|
125 |
+
},
|
126 |
+
"128123": {
|
127 |
+
"content": "<retry>",
|
128 |
+
"lstrip": false,
|
129 |
+
"normalized": false,
|
130 |
+
"rstrip": false,
|
131 |
+
"single_word": false,
|
132 |
+
"special": true
|
133 |
+
},
|
134 |
+
"128124": {
|
135 |
+
"content": "</retry>",
|
136 |
+
"lstrip": false,
|
137 |
+
"normalized": false,
|
138 |
+
"rstrip": false,
|
139 |
+
"single_word": false,
|
140 |
+
"special": true
|
141 |
+
},
|
142 |
+
"128125": {
|
143 |
+
"content": "<conclude>",
|
144 |
+
"lstrip": false,
|
145 |
+
"normalized": false,
|
146 |
+
"rstrip": false,
|
147 |
+
"single_word": false,
|
148 |
+
"special": true
|
149 |
+
},
|
150 |
+
"128126": {
|
151 |
+
"content": "</conclude>",
|
152 |
+
"lstrip": false,
|
153 |
+
"normalized": false,
|
154 |
+
"rstrip": false,
|
155 |
+
"single_word": false,
|
156 |
+
"special": true
|
157 |
+
},
|
158 |
+
"128127": {
|
159 |
+
"content": "<|plugin|>",
|
160 |
+
"lstrip": false,
|
161 |
+
"normalized": false,
|
162 |
+
"rstrip": false,
|
163 |
+
"single_word": false,
|
164 |
+
"special": true
|
165 |
+
},
|
166 |
+
"128128": {
|
167 |
+
"content": "<|interpreter|>",
|
168 |
+
"lstrip": false,
|
169 |
+
"normalized": false,
|
170 |
+
"rstrip": false,
|
171 |
+
"single_word": false,
|
172 |
+
"special": true
|
173 |
+
},
|
174 |
+
"128129": {
|
175 |
+
"content": "<|action_end|>",
|
176 |
+
"lstrip": false,
|
177 |
+
"normalized": false,
|
178 |
+
"rstrip": false,
|
179 |
+
"single_word": false,
|
180 |
+
"special": true
|
181 |
+
},
|
182 |
+
"128130": {
|
183 |
+
"content": "<|action_start|>",
|
184 |
+
"lstrip": false,
|
185 |
+
"normalized": false,
|
186 |
+
"rstrip": false,
|
187 |
+
"single_word": false,
|
188 |
+
"special": true
|
189 |
+
},
|
190 |
+
"128131": {
|
191 |
+
"content": "<|im_end|>",
|
192 |
+
"lstrip": false,
|
193 |
+
"normalized": false,
|
194 |
+
"rstrip": false,
|
195 |
+
"single_word": false,
|
196 |
+
"special": true
|
197 |
+
},
|
198 |
+
"128132": {
|
199 |
+
"content": "<|im_start|>",
|
200 |
+
"lstrip": false,
|
201 |
+
"normalized": false,
|
202 |
+
"rstrip": false,
|
203 |
+
"single_word": false,
|
204 |
+
"special": true
|
205 |
+
},
|
206 |
+
"128133": {
|
207 |
+
"content": "<img>",
|
208 |
+
"lstrip": false,
|
209 |
+
"normalized": false,
|
210 |
+
"rstrip": false,
|
211 |
+
"single_word": false,
|
212 |
+
"special": true
|
213 |
+
},
|
214 |
+
"128134": {
|
215 |
+
"content": "</img>",
|
216 |
+
"lstrip": false,
|
217 |
+
"normalized": false,
|
218 |
+
"rstrip": false,
|
219 |
+
"single_word": false,
|
220 |
+
"special": true
|
221 |
+
},
|
222 |
+
"128135": {
|
223 |
+
"content": "<IMG_CONTEXT>",
|
224 |
+
"lstrip": false,
|
225 |
+
"normalized": false,
|
226 |
+
"rstrip": false,
|
227 |
+
"single_word": false,
|
228 |
+
"special": true
|
229 |
+
},
|
230 |
+
"128136": {
|
231 |
+
"content": "<quad>",
|
232 |
+
"lstrip": false,
|
233 |
+
"normalized": false,
|
234 |
+
"rstrip": false,
|
235 |
+
"single_word": false,
|
236 |
+
"special": true
|
237 |
+
},
|
238 |
+
"128137": {
|
239 |
+
"content": "</quad>",
|
240 |
+
"lstrip": false,
|
241 |
+
"normalized": false,
|
242 |
+
"rstrip": false,
|
243 |
+
"single_word": false,
|
244 |
+
"special": true
|
245 |
+
},
|
246 |
+
"128138": {
|
247 |
+
"content": "<ref>",
|
248 |
+
"lstrip": false,
|
249 |
+
"normalized": false,
|
250 |
+
"rstrip": false,
|
251 |
+
"single_word": false,
|
252 |
+
"special": true
|
253 |
+
},
|
254 |
+
"128139": {
|
255 |
+
"content": "</ref>",
|
256 |
+
"lstrip": false,
|
257 |
+
"normalized": false,
|
258 |
+
"rstrip": false,
|
259 |
+
"single_word": false,
|
260 |
+
"special": true
|
261 |
+
},
|
262 |
+
"128140": {
|
263 |
+
"content": "<box>",
|
264 |
+
"lstrip": false,
|
265 |
+
"normalized": false,
|
266 |
+
"rstrip": false,
|
267 |
+
"single_word": false,
|
268 |
+
"special": true
|
269 |
+
},
|
270 |
+
"128141": {
|
271 |
+
"content": "</box>",
|
272 |
+
"lstrip": false,
|
273 |
+
"normalized": false,
|
274 |
+
"rstrip": false,
|
275 |
+
"single_word": false,
|
276 |
+
"special": true
|
277 |
+
}
|
278 |
+
},
|
279 |
+
"additional_special_tokens": [
|
280 |
+
"<|im_start|>",
|
281 |
+
"<|im_end|>",
|
282 |
+
"<|action_start|>",
|
283 |
+
"<|action_end|>",
|
284 |
+
"<|interpreter|>",
|
285 |
+
"<|plugin|>",
|
286 |
+
"<restate>",
|
287 |
+
"</restate>",
|
288 |
+
"<planning>",
|
289 |
+
"</planning>",
|
290 |
+
"<recollect>",
|
291 |
+
"</recollect>",
|
292 |
+
"<execution>",
|
293 |
+
"</execution>",
|
294 |
+
"<review>",
|
295 |
+
"</review>",
|
296 |
+
"<summarize>",
|
297 |
+
"</summarize>",
|
298 |
+
"<retry>",
|
299 |
+
"</retry>",
|
300 |
+
"<conclude>",
|
301 |
+
"</conclude>",
|
302 |
+
"<img>",
|
303 |
+
"</img>",
|
304 |
+
"<IMG_CONTEXT>",
|
305 |
+
"<quad>",
|
306 |
+
"</quad>",
|
307 |
+
"<ref>",
|
308 |
+
"</ref>",
|
309 |
+
"<box>",
|
310 |
+
"</box>"
|
311 |
+
],
|
312 |
+
"auto_map": {
|
313 |
+
"AutoTokenizer": [
|
314 |
+
"tokenization_internlm3.InternLM3Tokenizer",
|
315 |
+
null
|
316 |
+
]
|
317 |
+
},
|
318 |
+
"bos_token": "<s>",
|
319 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
320 |
+
"clean_up_tokenization_spaces": false,
|
321 |
+
"eos_token": "<|im_end|>",
|
322 |
+
"extra_special_tokens": {},
|
323 |
+
"model_max_length": 4096,
|
324 |
+
"pad_token": "</s>",
|
325 |
+
"sp_model_kwargs": {},
|
326 |
+
"spaces_between_special_tokens": false,
|
327 |
+
"tokenizer_class": "InternLM3Tokenizer",
|
328 |
+
"unk_token": "<unk>",
|
329 |
+
"use_default_system_prompt": false
|
330 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4314a99e2ee6a8351b9bca81e6cac7f22a0614efcd2c1c53556d5f7c576eab64
|
3 |
+
size 7224
|
training_log.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abeb31dc07e9f2e0fbdbfe9927835ea73ca8a296f67d210fb71b83e0e00c1b2c
|
3 |
+
size 19308152
|