danielhanchen commited on
Commit
df25779
·
verified ·
1 Parent(s): 5a1d225

Upload LlamaForCausalLM

Browse files
Files changed (2) hide show
  1. config.json +9 -2
  2. generation_config.json +3 -1
config.json CHANGED
@@ -22,6 +22,7 @@
22
  "num_attention_heads": 24,
23
  "num_hidden_layers": 28,
24
  "num_key_value_heads": 8,
 
25
  "pretraining_tp": 1,
26
  "quantization_config": {
27
  "_load_in_4bit": true,
@@ -32,7 +33,12 @@
32
  "bnb_4bit_use_double_quant": true,
33
  "llm_int8_enable_fp32_cpu_offload": false,
34
  "llm_int8_has_fp16_weight": false,
35
- "llm_int8_skip_modules": null,
 
 
 
 
 
36
  "llm_int8_threshold": 6.0,
37
  "load_in_4bit": true,
38
  "load_in_8bit": false,
@@ -49,7 +55,8 @@
49
  "rope_theta": 500000.0,
50
  "tie_word_embeddings": true,
51
  "torch_dtype": "bfloat16",
52
- "transformers_version": "4.45.0",
 
53
  "use_cache": true,
54
  "vocab_size": 128256
55
  }
 
22
  "num_attention_heads": 24,
23
  "num_hidden_layers": 28,
24
  "num_key_value_heads": 8,
25
+ "pad_token_id": 128004,
26
  "pretraining_tp": 1,
27
  "quantization_config": {
28
  "_load_in_4bit": true,
 
33
  "bnb_4bit_use_double_quant": true,
34
  "llm_int8_enable_fp32_cpu_offload": false,
35
  "llm_int8_has_fp16_weight": false,
36
+ "llm_int8_skip_modules": [
37
+ "lm_head",
38
+ "multi_modal_projector",
39
+ "merger",
40
+ "modality_projection"
41
+ ],
42
  "llm_int8_threshold": 6.0,
43
  "load_in_4bit": true,
44
  "load_in_8bit": false,
 
55
  "rope_theta": 500000.0,
56
  "tie_word_embeddings": true,
57
  "torch_dtype": "bfloat16",
58
+ "transformers_version": "4.48.0",
59
+ "unsloth_fixed": true,
60
  "use_cache": true,
61
  "vocab_size": 128256
62
  }
generation_config.json CHANGED
@@ -6,7 +6,9 @@
6
  128008,
7
  128009
8
  ],
 
 
9
  "temperature": 0.6,
10
  "top_p": 0.9,
11
- "transformers_version": "4.45.0"
12
  }
 
6
  128008,
7
  128009
8
  ],
9
+ "max_length": 131072,
10
+ "pad_token_id": 128004,
11
  "temperature": 0.6,
12
  "top_p": 0.9,
13
+ "transformers_version": "4.48.0"
14
  }