taronklm commited on
Commit
c7f9941
·
verified ·
1 Parent(s): c1cac3b

taronklm/Qwen2.5-0.5B-Instruct-chatbot

Browse files
README.md CHANGED
@@ -1,60 +1,60 @@
1
  ---
2
- library_name: transformers
3
- license: apache-2.0
4
  base_model: Qwen/Qwen2.5-0.5B-Instruct
 
 
 
 
5
  tags:
6
  - trl
7
  - sft
8
  - generated_from_trainer
9
- datasets:
10
- - generator
11
  model-index:
12
  - name: trained_model
13
  results: []
14
  ---
15
-
16
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
- should probably proofread and complete it, then remove this comment. -->
18
-
19
- # trained_model
20
-
21
- This model is a fine-tuned version of [Qwen/Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) on the generator dataset.
22
-
23
- ## Model description
24
-
25
- More information needed
26
-
27
- ## Intended uses & limitations
28
-
29
- More information needed
30
-
31
- ## Training and evaluation data
32
-
33
- More information needed
34
-
35
- ## Training procedure
36
-
37
- ### Training hyperparameters
38
-
39
- The following hyperparameters were used during training:
40
- - learning_rate: 0.0002
41
- - train_batch_size: 4
42
- - eval_batch_size: 8
43
- - seed: 42
44
- - gradient_accumulation_steps: 4
45
- - total_train_batch_size: 16
46
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
- - lr_scheduler_type: linear
48
- - num_epochs: 1
49
- - mixed_precision_training: Native AMP
50
-
51
- ### Training results
52
-
53
-
54
-
55
- ### Framework versions
56
-
57
- - Transformers 4.45.1
58
- - Pytorch 2.5.1+cpu
59
- - Datasets 3.0.1
60
- - Tokenizers 0.20.0
 
1
  ---
 
 
2
  base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ datasets:
4
+ - generator
5
+ library_name: peft
6
+ license: apache-2.0
7
  tags:
8
  - trl
9
  - sft
10
  - generated_from_trainer
 
 
11
  model-index:
12
  - name: trained_model
13
  results: []
14
  ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # trained_model
20
+
21
+ This model is a fine-tuned version of [Qwen/Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) on the generator dataset.
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 16
45
+ - total_train_batch_size: 16
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - num_epochs: 2
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - PEFT 0.13.0
57
+ - Transformers 4.45.1
58
+ - Pytorch 2.5.1+cpu
59
+ - Datasets 3.0.1
60
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -16,14 +16,19 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "q_proj",
24
- "v_proj"
 
 
 
25
  ],
26
- "task_type": "CAUSAL_LM",
27
  "use_dora": false,
28
  "use_rslora": false
29
  }
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 4,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
24
+ "o_proj",
25
  "q_proj",
26
+ "v_proj",
27
+ "k_proj",
28
+ "down_proj",
29
+ "gate_proj"
30
  ],
31
+ "task_type": "CASUAL_LM",
32
  "use_dora": false,
33
  "use_rslora": false
34
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b49ff0ea53d18b5246a3e8d9ce437725f5e0bfba2e8dbbca724a4c12316da6f8
3
- size 4338000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfbc2159c7fbe53e83476546a5700cb2ddb1714027cc026901ef6ce4d1ee8ff4
3
+ size 8841592
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b93612e5f080373abe2115f82df55e7a6633a575113ed2345880f59464cec355
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3147a44829aaeded52568334e274104f25be6f4800bb0f2cb2455ee55cb3d918
3
  size 5368