cpatonn commited on
Commit
da965d6
·
verified ·
1 Parent(s): c7ef248

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</answer>": 151670,
3
+ "</judge>": 151668,
4
+ "</think>": 151666,
5
+ "</tool_call>": 151658,
6
+ "<answer>": 151669,
7
+ "<judge>": 151667,
8
+ "<think>": 151665,
9
+ "<think_off>": 151672,
10
+ "<think_on>": 151671,
11
+ "<tool_call>": 151657,
12
+ "<|box_end|>": 151649,
13
+ "<|box_start|>": 151648,
14
+ "<|endoftext|>": 151643,
15
+ "<|file_sep|>": 151664,
16
+ "<|fim_middle|>": 151660,
17
+ "<|fim_pad|>": 151662,
18
+ "<|fim_prefix|>": 151659,
19
+ "<|fim_suffix|>": 151661,
20
+ "<|im_end|>": 151645,
21
+ "<|im_start|>": 151644,
22
+ "<|image_pad|>": 151655,
23
+ "<|object_ref_end|>": 151647,
24
+ "<|object_ref_start|>": 151646,
25
+ "<|quad_end|>": 151651,
26
+ "<|quad_start|>": 151650,
27
+ "<|repo_name|>": 151663,
28
+ "<|video_pad|>": 151656,
29
+ "<|vision_end|>": 151653,
30
+ "<|vision_pad|>": 151654,
31
+ "<|vision_start|>": 151652
32
+ }
chat_template.jinja ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- '' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" and not message.tool_calls %}
25
+ {%- set content = message.content %}
26
+ {%- if not loop.last %}
27
+ {%- set answer_blocks = message.content.split('<answer>\n') %}
28
+ {%- if answer_blocks|length > 1 %}
29
+ {%- set last_answer_block = answer_blocks[-1] %}
30
+ {%- if '\n</answer>' in last_answer_block %}
31
+ {%- set content = last_answer_block.split('\n</answer>')[0] %}
32
+ {%- else %}
33
+ {%- set content = message.content.split('<think_off>')[-1].lstrip('\n') %}
34
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
35
+ {%- endif %}
36
+ {%- else %}
37
+ {%- set content = message.content.split('<think_off>')[-1].lstrip('\n') %}
38
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
39
+ {%- endif %}
40
+ {%- endif %}
41
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
42
+ {%- elif message.role == "assistant" %}
43
+ {%- set content = message.content %}
44
+ {%- if not loop.last %}
45
+ {%- set answer_blocks = message.content.split('<answer>\n') %}
46
+ {%- if answer_blocks|length > 1 %}
47
+ {%- set last_answer_block = answer_blocks[-1] %}
48
+ {%- if '\n</answer>' in last_answer_block %}
49
+ {%- set content = last_answer_block.split('\n</answer>')[0] %}
50
+ {%- else %}
51
+ {%- set content = message.content.split('<think_off>')[-1].lstrip('\n') %}
52
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
53
+ {%- endif %}
54
+ {%- else %}
55
+ {%- set content = message.content.split('<think_off>')[-1].lstrip('\n') %}
56
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
57
+ {%- endif %}
58
+ {%- endif %}
59
+ {{- '<|im_start|>' + message.role }}
60
+ {%- if message.content %}
61
+ {{- '\n' + content }}
62
+ {%- endif %}
63
+ {%- for tool_call in message.tool_calls %}
64
+ {%- if tool_call.function is defined %}
65
+ {%- set tool_call = tool_call.function %}
66
+ {%- endif %}
67
+ {{- '\n<tool_call>\n{\"name\": \"' }}
68
+ {{- tool_call.name }}
69
+ {{- '\", \"arguments\": ' }}
70
+ {{- tool_call.arguments | tojson }}
71
+ {{- '}\n</tool_call>' }}
72
+ {%- endfor %}
73
+ {{- '<|im_end|>\n' }}
74
+ {%- elif message.role == "tool" %}
75
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
76
+ {{- '<|im_start|>user' }}
77
+ {%- endif %}
78
+ {{- '\n<tool_response>\n' }}
79
+ {{- message.content }}
80
+ {{- '\n</tool_response>' }}
81
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
82
+ {{- '<|im_end|>\n' }}
83
+ {%- endif %}
84
+ {%- endif %}
85
+ {%- endfor %}
86
+ {%- if add_generation_prompt %}
87
+ {{- '<|im_start|>assistant\n<judge>\n' }}
88
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151643,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 5120,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 27648,
12
+ "layer_types": [
13
+ "full_attention",
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention",
78
+ "full_attention",
79
+ "full_attention",
80
+ "full_attention",
81
+ "full_attention",
82
+ "full_attention",
83
+ "full_attention",
84
+ "full_attention",
85
+ "full_attention",
86
+ "full_attention",
87
+ "full_attention",
88
+ "full_attention",
89
+ "full_attention",
90
+ "full_attention",
91
+ "full_attention",
92
+ "full_attention"
93
+ ],
94
+ "max_position_embeddings": 131072,
95
+ "max_window_layers": 64,
96
+ "model_type": "qwen2",
97
+ "num_attention_heads": 40,
98
+ "num_hidden_layers": 80,
99
+ "num_key_value_heads": 8,
100
+ "quantization_config": {
101
+ "config_groups": {
102
+ "group_0": {
103
+ "input_activations": null,
104
+ "output_activations": null,
105
+ "targets": [
106
+ "Linear"
107
+ ],
108
+ "weights": {
109
+ "actorder": null,
110
+ "block_structure": null,
111
+ "dynamic": false,
112
+ "group_size": null,
113
+ "num_bits": 8,
114
+ "observer": "minmax",
115
+ "observer_kwargs": {},
116
+ "strategy": "channel",
117
+ "symmetric": true,
118
+ "type": "int"
119
+ }
120
+ }
121
+ },
122
+ "format": "pack-quantized",
123
+ "global_compression_ratio": null,
124
+ "ignore": [
125
+ "lm_head"
126
+ ],
127
+ "kv_cache_scheme": null,
128
+ "quant_method": "compressed-tensors",
129
+ "quantization_status": "compressed"
130
+ },
131
+ "rms_norm_eps": 1e-05,
132
+ "rope_scaling": null,
133
+ "rope_theta": 1000000.0,
134
+ "sliding_window": null,
135
+ "tie_word_embeddings": false,
136
+ "torch_dtype": "bfloat16",
137
+ "transformers_version": "4.53.3",
138
+ "use_cache": false,
139
+ "use_sliding_window": false,
140
+ "vocab_size": 152064
141
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_p": 0.95,
11
+ "transformers_version": "4.53.3"
12
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:624742f988f83dc8a337a83e7c2bcd2908b0b2a748b012b5b0e912ff519c054b
3
+ size 4997768688
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5efefa122e8522e57bbaa977ce6511167c027d0b28ad0b8c5d044a4c198baf3a
3
+ size 4914431192
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7dd2234302ff34d5d76bdd9e9d422d64977d9455c7522938fb571d7f35337d7
3
+ size 4877711368
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb595c6ec9a49af55e5ba60e19296f4e7c87c382f4148bcab689bb5d6b18903
3
+ size 4877711368
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1501561dbaf197e59ddab567e659afe181c195bfabcf3ecfdbe768bdb1f74539
3
+ size 4877711368
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed134609eceae8c9120bc5f1dcc1dfa6844cfe09376d79568611d6915d01737
3
+ size 4877711368
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4dcc51373b8b7d03b86957b9f6c31c97f7a502a8ce4f1f8aae0ac8f19c855a4
3
+ size 4877711368
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eedca0b667ee71f76ca25d5aaacbaea13603a0892111225663fa630b4e1f1bb
3
+ size 4877711368
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3463c086891ced7556ab3a5264a7562c5ec77cd7f56f1a546a5a89e34332b7
3
+ size 2957503824
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
recipe.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ GPTQModifier:
4
+ targets: [Linear]
5
+ ignore: [lm_head]
6
+ scheme: W8A16
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:528d9f2690ed3679eee70ed9b085fb78899b7f6dfc2a220220cfe89fdd3ffef5
3
+ size 11423388
tokenizer_config.json ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<think>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</think>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<judge>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</judge>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "<answer>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": false
220
+ },
221
+ "151670": {
222
+ "content": "</answer>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": false
228
+ },
229
+ "151671": {
230
+ "content": "<think_on>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": false
236
+ },
237
+ "151672": {
238
+ "content": "<think_off>",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": false
244
+ }
245
+ },
246
+ "additional_special_tokens": [
247
+ "<|im_start|>",
248
+ "<|im_end|>",
249
+ "<|object_ref_start|>",
250
+ "<|object_ref_end|>",
251
+ "<|box_start|>",
252
+ "<|box_end|>",
253
+ "<|quad_start|>",
254
+ "<|quad_end|>",
255
+ "<|vision_start|>",
256
+ "<|vision_end|>",
257
+ "<|vision_pad|>",
258
+ "<|image_pad|>",
259
+ "<|video_pad|>"
260
+ ],
261
+ "bos_token": null,
262
+ "clean_up_tokenization_spaces": false,
263
+ "eos_token": "<|im_end|>",
264
+ "errors": "replace",
265
+ "extra_special_tokens": {},
266
+ "model_max_length": 131072,
267
+ "pad_token": "<|endoftext|>",
268
+ "padding_side": "right",
269
+ "split_special_tokens": false,
270
+ "tokenizer_class": "Qwen2Tokenizer",
271
+ "unk_token": null
272
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff