Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +14 -0
- faiss_attn/source/__pycache__/modeling_llama.cpython-313.pyc +0 -0
- faiss_attn/source/__pycache__/modeling_mistral.cpython-313.pyc +0 -0
- faiss_attn/source/__pycache__/modeling_mixtral.cpython-313.pyc +0 -0
- faiss_attn/source/__pycache__/modeling_phi3.cpython-313.pyc +0 -0
- faiss_attn/source/__pycache__/modeling_qwen2.cpython-313.pyc +0 -0
- faiss_attn/source/modeling_llama.py +1601 -0
- faiss_attn/source/modeling_mistral.py +1535 -0
- faiss_attn/source/modeling_mixtral.py +1754 -0
- faiss_attn/source/modeling_phi3.py +1772 -0
- faiss_attn/source/modeling_qwen2.py +1563 -0
- faiss_attn/source/utils.py +17 -0
- head_score/.ipynb_checkpoints/qwen2_5_7b_de-checkpoint.json +0 -0
- head_score/phi_35_mini_inst_de.json +0 -0
- head_score/phi_35_mini_inst_en.json +0 -0
- head_score/phi_35_mini_inst_zh.json +0 -0
- head_score/qwen2_5_7b_de.json +0 -0
- head_score/qwen2_5_7b_en.json +0 -0
- head_score/qwen2_5_7b_zh.json +0 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_10000_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_10000_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_1100_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_1100_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_3300_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_3300_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_4400_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_4400_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_5600_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_6700_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_6700_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_0_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_0_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_10000_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_1_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_2_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_2200_0_results.json +1 -0
- results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_3300_0_results.json +1 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
wandb/run-20250630_012253-rmcfmdw0/run-rmcfmdw0.wandb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
wandb/run-20250630_012253-rmcfmdw0/run-rmcfmdw0.wandb filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
wandb/run-20250629_052510-8ki2wthd/run-8ki2wthd.wandb filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
wandb/run-20250630_054143-sm6fwh98/run-sm6fwh98.wandb filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
wandb/run-20250630_042738-w7zwp6vq/run-w7zwp6vq.wandb filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
wandb/run-20250630_063031-257lw23l/run-257lw23l.wandb filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
wandb/run-20250629_023011-6q7xnel3/run-6q7xnel3.wandb filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
wandb/run-20250630_022607-ggwjeycn/run-ggwjeycn.wandb filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
wandb/run-20250630_033916-286jxr62/run-286jxr62.wandb filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
wandb/run-20250629_191844-wuafutdo/run-wuafutdo.wandb filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
wandb/run-20250630_020151-nyulmltd/run-nyulmltd.wandb filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
wandb/run-20250629_212740-ktnabxrp/run-ktnabxrp.wandb filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
wandb/run-20250629_024136-5j5goca4/run-5j5goca4.wandb filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
wandb/run-20250629_031032-zuhgrlz6/run-zuhgrlz6.wandb filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
wandb/run-20250629_191838-d28qmf8n/run-d28qmf8n.wandb filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
wandb/run-20250629_044858-ncmx8mnd/run-ncmx8mnd.wandb filter=lfs diff=lfs merge=lfs -text
|
faiss_attn/source/__pycache__/modeling_llama.cpython-313.pyc
ADDED
|
Binary file (79.3 kB). View file
|
|
|
faiss_attn/source/__pycache__/modeling_mistral.cpython-313.pyc
ADDED
|
Binary file (68.8 kB). View file
|
|
|
faiss_attn/source/__pycache__/modeling_mixtral.cpython-313.pyc
ADDED
|
Binary file (77.4 kB). View file
|
|
|
faiss_attn/source/__pycache__/modeling_phi3.cpython-313.pyc
ADDED
|
Binary file (82.4 kB). View file
|
|
|
faiss_attn/source/__pycache__/modeling_qwen2.cpython-313.pyc
ADDED
|
Binary file (68.9 kB). View file
|
|
|
faiss_attn/source/modeling_llama.py
ADDED
|
@@ -0,0 +1,1601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch LLaMA model."""
|
| 21 |
+
import math
|
| 22 |
+
import warnings
|
| 23 |
+
from typing import List, Optional, Tuple, Union, Any
|
| 24 |
+
|
| 25 |
+
#from heterogeneous_memory import HeterogeneousMemory
|
| 26 |
+
|
| 27 |
+
import numpy as np
|
| 28 |
+
import torch
|
| 29 |
+
import torch.nn.functional as F
|
| 30 |
+
import torch.utils.checkpoint
|
| 31 |
+
from torch import nn
|
| 32 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 33 |
+
from dataclasses import dataclass
|
| 34 |
+
|
| 35 |
+
from transformers.activations import ACT2FN
|
| 36 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 37 |
+
from transformers.modeling_attn_mask_utils import (
|
| 38 |
+
AttentionMaskConverter,
|
| 39 |
+
_prepare_4d_attention_mask,
|
| 40 |
+
_prepare_4d_causal_attention_mask,
|
| 41 |
+
_prepare_4d_causal_attention_mask_for_sdpa,
|
| 42 |
+
)
|
| 43 |
+
from transformers.modeling_outputs import ModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 44 |
+
|
| 45 |
+
@dataclass
|
| 46 |
+
class HeterogeneousMemoryOutput(ModelOutput):
|
| 47 |
+
last_hidden_state: torch.FloatTensor = None
|
| 48 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
| 49 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 50 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 51 |
+
inspect: Optional[Any] = None
|
| 52 |
+
|
| 53 |
+
@dataclass
|
| 54 |
+
class CausalLMOutputWithHeterogeneousMemory(ModelOutput):
|
| 55 |
+
loss: torch.FloatTensor = None
|
| 56 |
+
logits: torch.FloatTensor = None
|
| 57 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
| 58 |
+
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
| 59 |
+
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
| 60 |
+
inspect: Optional[Any] = None
|
| 61 |
+
|
| 62 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 63 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
|
| 64 |
+
from transformers.utils import (
|
| 65 |
+
add_start_docstrings,
|
| 66 |
+
add_start_docstrings_to_model_forward,
|
| 67 |
+
is_flash_attn_2_available,
|
| 68 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 69 |
+
logging,
|
| 70 |
+
replace_return_docstrings,
|
| 71 |
+
)
|
| 72 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 73 |
+
from transformers import LlamaConfig
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
if is_flash_attn_2_available():
|
| 77 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 78 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
| 82 |
+
# It means that the function will not be traced through and simply appear as a node in the graph.
|
| 83 |
+
if is_torch_fx_available():
|
| 84 |
+
if not is_torch_greater_or_equal_than_1_13:
|
| 85 |
+
import torch.fx
|
| 86 |
+
|
| 87 |
+
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
logger = logging.get_logger(__name__)
|
| 91 |
+
|
| 92 |
+
_CONFIG_FOR_DOC = "LlamaConfig"
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _get_unpad_data(attention_mask):
|
| 96 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 97 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 98 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 99 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 100 |
+
return (
|
| 101 |
+
indices,
|
| 102 |
+
cu_seqlens,
|
| 103 |
+
max_seqlen_in_batch,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 108 |
+
warnings.warn(
|
| 109 |
+
"Calling `transformers.models.llama.modeling_llama._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils._prepare_4d_attention_mask"
|
| 110 |
+
)
|
| 111 |
+
return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _make_causal_mask(
|
| 115 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 116 |
+
):
|
| 117 |
+
warnings.warn(
|
| 118 |
+
"Calling `transformers.models.llama.modeling_llama._make_causal_mask` is deprecated and will be removed in v4.37. Use `transformers.models.llama.modeling_llama.AttentionMaskConverter._make_causal_mask"
|
| 119 |
+
)
|
| 120 |
+
return AttentionMaskConverter._make_causal_mask(
|
| 121 |
+
input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class LlamaRMSNorm(nn.Module):
|
| 126 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 127 |
+
"""
|
| 128 |
+
LlamaRMSNorm is equivalent to T5LayerNorm
|
| 129 |
+
"""
|
| 130 |
+
super().__init__()
|
| 131 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 132 |
+
self.variance_epsilon = eps
|
| 133 |
+
|
| 134 |
+
def forward(self, hidden_states):
|
| 135 |
+
input_dtype = hidden_states.dtype
|
| 136 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 137 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 138 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 139 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class LlamaRotaryEmbedding(nn.Module):
|
| 146 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 147 |
+
super().__init__()
|
| 148 |
+
|
| 149 |
+
self.dim = dim
|
| 150 |
+
self.max_position_embeddings = max_position_embeddings
|
| 151 |
+
self.base = base
|
| 152 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 153 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 154 |
+
|
| 155 |
+
# Build here to make `torch.jit.trace` work.
|
| 156 |
+
self._set_cos_sin_cache(
|
| 157 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 161 |
+
self.max_seq_len_cached = seq_len
|
| 162 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
| 163 |
+
|
| 164 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 165 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 166 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 167 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 168 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 169 |
+
|
| 170 |
+
def forward(self, x, seq_len=None):
|
| 171 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 172 |
+
if seq_len > self.max_seq_len_cached:
|
| 173 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 174 |
+
|
| 175 |
+
return (
|
| 176 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
| 177 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
|
| 182 |
+
"""LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
| 183 |
+
|
| 184 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
| 185 |
+
self.scaling_factor = scaling_factor
|
| 186 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
| 187 |
+
|
| 188 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 189 |
+
self.max_seq_len_cached = seq_len
|
| 190 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
| 191 |
+
t = t / self.scaling_factor
|
| 192 |
+
|
| 193 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 194 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 195 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 196 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 197 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
|
| 201 |
+
"""LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
|
| 202 |
+
|
| 203 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
| 204 |
+
self.scaling_factor = scaling_factor
|
| 205 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
| 206 |
+
|
| 207 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 208 |
+
self.max_seq_len_cached = seq_len
|
| 209 |
+
|
| 210 |
+
if seq_len > self.max_position_embeddings:
|
| 211 |
+
base = self.base * (
|
| 212 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
| 213 |
+
) ** (self.dim / (self.dim - 2))
|
| 214 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 215 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 216 |
+
|
| 217 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
| 218 |
+
|
| 219 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 220 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 221 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 222 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 223 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def rotate_half(x):
|
| 227 |
+
"""Rotates half the hidden dims of the input."""
|
| 228 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 229 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 230 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 234 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
q (`torch.Tensor`): The query tensor.
|
| 238 |
+
k (`torch.Tensor`): The key tensor.
|
| 239 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 240 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 241 |
+
position_ids (`torch.Tensor`):
|
| 242 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 243 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 244 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 245 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 246 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 247 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 248 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 249 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 250 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 251 |
+
Returns:
|
| 252 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 256 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 257 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 258 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 259 |
+
return q_embed, k_embed
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class LlamaMLP(nn.Module):
|
| 263 |
+
def __init__(self, config):
|
| 264 |
+
super().__init__()
|
| 265 |
+
self.config = config
|
| 266 |
+
self.hidden_size = config.hidden_size
|
| 267 |
+
self.intermediate_size = config.intermediate_size
|
| 268 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 269 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 270 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 271 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 272 |
+
|
| 273 |
+
def forward(self, x):
|
| 274 |
+
if self.config.pretraining_tp > 1:
|
| 275 |
+
slice = self.intermediate_size // self.config.pretraining_tp
|
| 276 |
+
gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
|
| 277 |
+
up_proj_slices = self.up_proj.weight.split(slice, dim=0)
|
| 278 |
+
down_proj_slices = self.down_proj.weight.split(slice, dim=1)
|
| 279 |
+
|
| 280 |
+
gate_proj = torch.cat(
|
| 281 |
+
[F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
|
| 282 |
+
)
|
| 283 |
+
up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
|
| 284 |
+
|
| 285 |
+
intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
|
| 286 |
+
down_proj = [
|
| 287 |
+
F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
|
| 288 |
+
]
|
| 289 |
+
down_proj = sum(down_proj)
|
| 290 |
+
else:
|
| 291 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 292 |
+
|
| 293 |
+
return down_proj
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 297 |
+
"""
|
| 298 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 299 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 300 |
+
"""
|
| 301 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 302 |
+
if n_rep == 1:
|
| 303 |
+
return hidden_states
|
| 304 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 305 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class LlamaAttention(nn.Module):
|
| 309 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 310 |
+
|
| 311 |
+
def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
|
| 312 |
+
super().__init__()
|
| 313 |
+
self.config = config
|
| 314 |
+
self.layer_idx = layer_idx
|
| 315 |
+
if layer_idx is None:
|
| 316 |
+
logger.warning_once(
|
| 317 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 318 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 319 |
+
"when creating this class."
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
self.attention_dropout = config.attention_dropout
|
| 323 |
+
self.hidden_size = config.hidden_size
|
| 324 |
+
self.num_heads = config.num_attention_heads
|
| 325 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 326 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 327 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 328 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 329 |
+
self.rope_theta = config.rope_theta
|
| 330 |
+
self.is_causal = True
|
| 331 |
+
|
| 332 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 333 |
+
raise ValueError(
|
| 334 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 335 |
+
f" and `num_heads`: {self.num_heads})."
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 339 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 340 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 341 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
|
| 342 |
+
self._init_rope()
|
| 343 |
+
|
| 344 |
+
def _init_rope(self):
|
| 345 |
+
if self.config.rope_scaling is None:
|
| 346 |
+
self.rotary_emb = LlamaRotaryEmbedding(
|
| 347 |
+
self.head_dim,
|
| 348 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 349 |
+
base=self.rope_theta,
|
| 350 |
+
)
|
| 351 |
+
else:
|
| 352 |
+
scaling_type = self.config.rope_scaling["type"]
|
| 353 |
+
scaling_factor = self.config.rope_scaling["factor"]
|
| 354 |
+
if scaling_type == "linear":
|
| 355 |
+
self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
|
| 356 |
+
self.head_dim,
|
| 357 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 358 |
+
scaling_factor=scaling_factor,
|
| 359 |
+
base=self.rope_theta,
|
| 360 |
+
)
|
| 361 |
+
elif scaling_type == "dynamic":
|
| 362 |
+
self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
|
| 363 |
+
self.head_dim,
|
| 364 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 365 |
+
scaling_factor=scaling_factor,
|
| 366 |
+
base=self.rope_theta,
|
| 367 |
+
)
|
| 368 |
+
else:
|
| 369 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
| 370 |
+
|
| 371 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 372 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 373 |
+
|
| 374 |
+
def forward(
|
| 375 |
+
self,
|
| 376 |
+
hidden_states: torch.Tensor,
|
| 377 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 378 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 379 |
+
past_key_value: Optional[Cache] = None,
|
| 380 |
+
output_attentions: bool = False,
|
| 381 |
+
use_cache: bool = False,
|
| 382 |
+
**kwargs,
|
| 383 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 384 |
+
if "padding_mask" in kwargs:
|
| 385 |
+
warnings.warn(
|
| 386 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
bsz, q_len, _ = hidden_states.size()
|
| 390 |
+
|
| 391 |
+
if self.config.pretraining_tp > 1:
|
| 392 |
+
key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
|
| 393 |
+
query_slices = self.q_proj.weight.split(
|
| 394 |
+
(self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
|
| 395 |
+
)
|
| 396 |
+
key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
|
| 397 |
+
value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
|
| 398 |
+
|
| 399 |
+
query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 400 |
+
query_states = torch.cat(query_states, dim=-1)
|
| 401 |
+
|
| 402 |
+
key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 403 |
+
key_states = torch.cat(key_states, dim=-1)
|
| 404 |
+
|
| 405 |
+
value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 406 |
+
value_states = torch.cat(value_states, dim=-1)
|
| 407 |
+
|
| 408 |
+
else:
|
| 409 |
+
query_states = self.q_proj(hidden_states)
|
| 410 |
+
key_states = self.k_proj(hidden_states)
|
| 411 |
+
value_states = self.v_proj(hidden_states)
|
| 412 |
+
|
| 413 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 414 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 415 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 416 |
+
|
| 417 |
+
kv_seq_len = key_states.shape[-2]
|
| 418 |
+
if past_key_value is not None:
|
| 419 |
+
if self.layer_idx is None:
|
| 420 |
+
raise ValueError(
|
| 421 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 422 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 423 |
+
"with a layer index."
|
| 424 |
+
)
|
| 425 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 426 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 427 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 428 |
+
|
| 429 |
+
if past_key_value is not None:
|
| 430 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 431 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 432 |
+
|
| 433 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 434 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 435 |
+
|
| 436 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 437 |
+
|
| 438 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 439 |
+
raise ValueError(
|
| 440 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 441 |
+
f" {attn_weights.size()}"
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
if attention_mask is not None:
|
| 445 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 446 |
+
raise ValueError(
|
| 447 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 448 |
+
)
|
| 449 |
+
attn_weights = attn_weights + attention_mask
|
| 450 |
+
|
| 451 |
+
# upcast attention to fp32
|
| 452 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 453 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 454 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 455 |
+
|
| 456 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 457 |
+
raise ValueError(
|
| 458 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 459 |
+
f" {attn_output.size()}"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 463 |
+
|
| 464 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 465 |
+
|
| 466 |
+
if self.config.pretraining_tp > 1:
|
| 467 |
+
attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
|
| 468 |
+
o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
|
| 469 |
+
attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
|
| 470 |
+
else:
|
| 471 |
+
attn_output = self.o_proj(attn_output)
|
| 472 |
+
|
| 473 |
+
if not output_attentions:
|
| 474 |
+
attn_weights = None
|
| 475 |
+
|
| 476 |
+
return attn_output, attn_weights, past_key_value
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
class LlamaFlashAttention2(LlamaAttention):
|
| 480 |
+
"""
|
| 481 |
+
Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
|
| 482 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 483 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
def __init__(self, *args, **kwargs):
|
| 487 |
+
super().__init__(*args, **kwargs)
|
| 488 |
+
|
| 489 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 490 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 491 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 492 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 493 |
+
|
| 494 |
+
def forward(
|
| 495 |
+
self,
|
| 496 |
+
hidden_states: torch.Tensor,
|
| 497 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 498 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 499 |
+
past_key_value: Optional[Any] = None,
|
| 500 |
+
output_attentions: bool = False,
|
| 501 |
+
use_cache: bool = False,
|
| 502 |
+
**kwargs,
|
| 503 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 504 |
+
# LlamaFlashAttention2 attention does not support output_attentions
|
| 505 |
+
if "padding_mask" in kwargs:
|
| 506 |
+
warnings.warn(
|
| 507 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
# overwrite attention_mask with padding_mask
|
| 511 |
+
attention_mask = kwargs.pop("padding_mask")
|
| 512 |
+
if(output_attentions):
|
| 513 |
+
_, inspect, attn_weights, _ = self.forward_torch(
|
| 514 |
+
hidden_states,
|
| 515 |
+
attention_mask,
|
| 516 |
+
position_ids,
|
| 517 |
+
past_key_value,
|
| 518 |
+
output_attentions,
|
| 519 |
+
use_cache=False,
|
| 520 |
+
**kwargs,
|
| 521 |
+
)
|
| 522 |
+
else:
|
| 523 |
+
attn_weights = None
|
| 524 |
+
inspect = None
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
bsz, q_len, _ = hidden_states.size()
|
| 528 |
+
|
| 529 |
+
query_states = self.q_proj(hidden_states)
|
| 530 |
+
key_states = self.k_proj(hidden_states)
|
| 531 |
+
value_states = self.v_proj(hidden_states)
|
| 532 |
+
|
| 533 |
+
# Flash attention requires the input to have the shape
|
| 534 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 535 |
+
# therefore we just need to keep the original shape
|
| 536 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 537 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 538 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 539 |
+
|
| 540 |
+
kv_seq_len = key_states.shape[-2]
|
| 541 |
+
if past_key_value is not None:
|
| 542 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 543 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 544 |
+
|
| 545 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 546 |
+
|
| 547 |
+
if past_key_value is not None:
|
| 548 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 549 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 550 |
+
|
| 551 |
+
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
| 552 |
+
# to be able to avoid many of these transpose/reshape/view.
|
| 553 |
+
#### mask head in flash attention
|
| 554 |
+
if 'block_list' in kwargs:
|
| 555 |
+
for h in kwargs['block_list']:
|
| 556 |
+
if self.layer_idx==h[0]:
|
| 557 |
+
query_states[:,h[1], :, :] = 0
|
| 558 |
+
#attn_weights[:, h[1], :, :] = 0
|
| 559 |
+
query_states = query_states.transpose(1, 2)
|
| 560 |
+
key_states = key_states.transpose(1, 2)
|
| 561 |
+
value_states = value_states.transpose(1, 2)
|
| 562 |
+
|
| 563 |
+
dropout_rate = self.attention_dropout if self.training else 0.0
|
| 564 |
+
|
| 565 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 566 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 567 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 568 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 569 |
+
# in fp32. (LlamaRMSNorm handles it correctly)
|
| 570 |
+
|
| 571 |
+
input_dtype = query_states.dtype
|
| 572 |
+
if input_dtype == torch.float32:
|
| 573 |
+
# Handle the case where the model is quantized
|
| 574 |
+
if hasattr(self.config, "_pre_quantization_dtype"):
|
| 575 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 576 |
+
else:
|
| 577 |
+
target_dtype = self.q_proj.weight.dtype
|
| 578 |
+
|
| 579 |
+
logger.warning_once(
|
| 580 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 581 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 582 |
+
f" {target_dtype}."
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
query_states = query_states.to(target_dtype)
|
| 586 |
+
key_states = key_states.to(target_dtype)
|
| 587 |
+
value_states = value_states.to(target_dtype)
|
| 588 |
+
|
| 589 |
+
attn_output = self._flash_attention_forward(
|
| 590 |
+
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 594 |
+
attn_output = self.o_proj(attn_output)
|
| 595 |
+
|
| 596 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 597 |
+
|
| 598 |
+
def forward_torch(
|
| 599 |
+
self,
|
| 600 |
+
hidden_states: torch.Tensor,
|
| 601 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 602 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 603 |
+
past_key_value: Optional[Any] = None,
|
| 604 |
+
output_attentions: bool = False,
|
| 605 |
+
use_cache: bool = False,
|
| 606 |
+
**kwargs,
|
| 607 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 608 |
+
if "padding_mask" in kwargs:
|
| 609 |
+
warnings.warn(
|
| 610 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
bsz, q_len, _ = hidden_states.size()
|
| 614 |
+
inspect = {}
|
| 615 |
+
|
| 616 |
+
if self.config.pretraining_tp > 1:
|
| 617 |
+
key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
|
| 618 |
+
query_slices = self.q_proj.weight.split(
|
| 619 |
+
(self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
|
| 620 |
+
)
|
| 621 |
+
key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
|
| 622 |
+
value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
|
| 623 |
+
|
| 624 |
+
query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 625 |
+
query_states = torch.cat(query_states, dim=-1)
|
| 626 |
+
|
| 627 |
+
key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 628 |
+
key_states = torch.cat(key_states, dim=-1)
|
| 629 |
+
|
| 630 |
+
value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 631 |
+
value_states = torch.cat(value_states, dim=-1)
|
| 632 |
+
|
| 633 |
+
else:
|
| 634 |
+
query_states = self.q_proj(hidden_states)
|
| 635 |
+
key_states = self.k_proj(hidden_states)
|
| 636 |
+
value_states = self.v_proj(hidden_states)
|
| 637 |
+
|
| 638 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 639 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 640 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 641 |
+
|
| 642 |
+
kv_seq_len = key_states.shape[-2]
|
| 643 |
+
# print(past_key_value)
|
| 644 |
+
if past_key_value is not None:
|
| 645 |
+
if self.layer_idx is None:
|
| 646 |
+
raise ValueError(
|
| 647 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 648 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 649 |
+
"with a layer index."
|
| 650 |
+
)
|
| 651 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 652 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 653 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 654 |
+
|
| 655 |
+
if past_key_value is not None:
|
| 656 |
+
if(use_cache):
|
| 657 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 658 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 659 |
+
else:
|
| 660 |
+
key_states = torch.cat([past_key_value.key_cache[self.layer_idx], key_states], dim=-2)
|
| 661 |
+
value_states = torch.cat([past_key_value.value_cache[self.layer_idx], value_states], dim=-2)
|
| 662 |
+
|
| 663 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 664 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
inspect["query"] = query_states
|
| 668 |
+
inspect["key"] = key_states
|
| 669 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 670 |
+
###write our mask here
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 674 |
+
raise ValueError(
|
| 675 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 676 |
+
f" {attn_weights.size()}"
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
if attention_mask is not None:
|
| 680 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 681 |
+
raise ValueError(
|
| 682 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 683 |
+
)
|
| 684 |
+
attn_weights = attn_weights + attention_mask
|
| 685 |
+
## masking head in normal attention
|
| 686 |
+
if 'block_list' in kwargs:
|
| 687 |
+
for h in kwargs['block_list']:
|
| 688 |
+
if self.layer_idx==h[0]:
|
| 689 |
+
attn_weights[:, h[1], :, :] = 0
|
| 690 |
+
# upcast attention to fp32
|
| 691 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 692 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 693 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 694 |
+
inspect["attn_output_before_o_proj"] = attn_output
|
| 695 |
+
|
| 696 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 697 |
+
raise ValueError(
|
| 698 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 699 |
+
f" {attn_output.size()}"
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 703 |
+
|
| 704 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 705 |
+
|
| 706 |
+
if self.config.pretraining_tp > 1:
|
| 707 |
+
attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
|
| 708 |
+
o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
|
| 709 |
+
attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
|
| 710 |
+
else:
|
| 711 |
+
attn_output = self.o_proj(attn_output)
|
| 712 |
+
|
| 713 |
+
if not output_attentions:
|
| 714 |
+
attn_weights = None
|
| 715 |
+
|
| 716 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 717 |
+
|
| 718 |
+
def _flash_attention_forward(
|
| 719 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 720 |
+
):
|
| 721 |
+
"""
|
| 722 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 723 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 724 |
+
|
| 725 |
+
Args:
|
| 726 |
+
query_states (`torch.Tensor`):
|
| 727 |
+
Input query states to be passed to Flash Attention API
|
| 728 |
+
key_states (`torch.Tensor`):
|
| 729 |
+
Input key states to be passed to Flash Attention API
|
| 730 |
+
value_states (`torch.Tensor`):
|
| 731 |
+
Input value states to be passed to Flash Attention API
|
| 732 |
+
attention_mask (`torch.Tensor`):
|
| 733 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 734 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 735 |
+
dropout (`int`, *optional*):
|
| 736 |
+
Attention dropout
|
| 737 |
+
softmax_scale (`float`, *optional*):
|
| 738 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 739 |
+
"""
|
| 740 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 741 |
+
causal = self.is_causal
|
| 742 |
+
else:
|
| 743 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 744 |
+
causal = self.is_causal and query_length != 1
|
| 745 |
+
|
| 746 |
+
# Contains at least one padding token in the sequence
|
| 747 |
+
if attention_mask is not None:
|
| 748 |
+
batch_size = query_states.shape[0]
|
| 749 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 750 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 751 |
+
)
|
| 752 |
+
|
| 753 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 754 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 755 |
+
|
| 756 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 757 |
+
query_states,
|
| 758 |
+
key_states,
|
| 759 |
+
value_states,
|
| 760 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 761 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 762 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 763 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 764 |
+
dropout_p=dropout,
|
| 765 |
+
softmax_scale=softmax_scale,
|
| 766 |
+
causal=causal,
|
| 767 |
+
)
|
| 768 |
+
|
| 769 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 770 |
+
else:
|
| 771 |
+
attn_output = flash_attn_func(
|
| 772 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
return attn_output
|
| 776 |
+
|
| 777 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 778 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 779 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
| 780 |
+
|
| 781 |
+
key_layer = index_first_axis(
|
| 782 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 783 |
+
)
|
| 784 |
+
value_layer = index_first_axis(
|
| 785 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
| 786 |
+
)
|
| 787 |
+
if query_length == kv_seq_len:
|
| 788 |
+
query_layer = index_first_axis(
|
| 789 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
| 790 |
+
)
|
| 791 |
+
cu_seqlens_q = cu_seqlens_k
|
| 792 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 793 |
+
indices_q = indices_k
|
| 794 |
+
elif query_length == 1:
|
| 795 |
+
max_seqlen_in_batch_q = 1
|
| 796 |
+
cu_seqlens_q = torch.arange(
|
| 797 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 798 |
+
) # There is a memcpy here, that is very bad.
|
| 799 |
+
indices_q = cu_seqlens_q[:-1]
|
| 800 |
+
query_layer = query_layer.squeeze(1)
|
| 801 |
+
else:
|
| 802 |
+
# The -q_len: slice assumes left padding.
|
| 803 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 804 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 805 |
+
|
| 806 |
+
return (
|
| 807 |
+
query_layer,
|
| 808 |
+
key_layer,
|
| 809 |
+
value_layer,
|
| 810 |
+
indices_q,
|
| 811 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 812 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
|
| 816 |
+
class LlamaSdpaAttention(LlamaAttention):
|
| 817 |
+
"""
|
| 818 |
+
Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 819 |
+
`LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 820 |
+
SDPA API.
|
| 821 |
+
"""
|
| 822 |
+
|
| 823 |
+
# Adapted from LlamaAttention.forward
|
| 824 |
+
def forward(
|
| 825 |
+
self,
|
| 826 |
+
hidden_states: torch.Tensor,
|
| 827 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 828 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 829 |
+
past_key_value: Optional[Cache] = None,
|
| 830 |
+
output_attentions: bool = False,
|
| 831 |
+
use_cache: bool = False,
|
| 832 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 833 |
+
if output_attentions:
|
| 834 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 835 |
+
logger.warning_once(
|
| 836 |
+
"LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 837 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 838 |
+
)
|
| 839 |
+
return super().forward(
|
| 840 |
+
hidden_states=hidden_states,
|
| 841 |
+
attention_mask=attention_mask,
|
| 842 |
+
position_ids=position_ids,
|
| 843 |
+
past_key_value=past_key_value,
|
| 844 |
+
output_attentions=output_attentions,
|
| 845 |
+
use_cache=use_cache,
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
bsz, q_len, _ = hidden_states.size()
|
| 849 |
+
|
| 850 |
+
query_states = self.q_proj(hidden_states)
|
| 851 |
+
key_states = self.k_proj(hidden_states)
|
| 852 |
+
value_states = self.v_proj(hidden_states)
|
| 853 |
+
|
| 854 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 855 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 856 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 857 |
+
|
| 858 |
+
kv_seq_len = key_states.shape[-2]
|
| 859 |
+
if past_key_value is not None:
|
| 860 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 861 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 862 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 863 |
+
|
| 864 |
+
if past_key_value is not None:
|
| 865 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 866 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 867 |
+
|
| 868 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 869 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 870 |
+
|
| 871 |
+
if attention_mask is not None:
|
| 872 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 873 |
+
raise ValueError(
|
| 874 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 878 |
+
query_states,
|
| 879 |
+
key_states,
|
| 880 |
+
value_states,
|
| 881 |
+
attn_mask=attention_mask,
|
| 882 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 883 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 884 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 888 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 889 |
+
|
| 890 |
+
attn_output = self.o_proj(attn_output)
|
| 891 |
+
|
| 892 |
+
return attn_output, None, past_key_value
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
LLAMA_ATTENTION_CLASSES = {
|
| 896 |
+
"eager": LlamaAttention,
|
| 897 |
+
"flash_attention_2": LlamaFlashAttention2,
|
| 898 |
+
"sdpa": LlamaSdpaAttention,
|
| 899 |
+
}
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
class LlamaDecoderLayer(nn.Module):
|
| 903 |
+
def __init__(self, config: LlamaConfig, layer_idx: int):
|
| 904 |
+
super().__init__()
|
| 905 |
+
self.hidden_size = config.hidden_size
|
| 906 |
+
|
| 907 |
+
self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
| 908 |
+
|
| 909 |
+
self.mlp = LlamaMLP(config)
|
| 910 |
+
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 911 |
+
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 912 |
+
|
| 913 |
+
def forward(
|
| 914 |
+
self,
|
| 915 |
+
hidden_states: torch.Tensor,
|
| 916 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 917 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 918 |
+
past_key_value: Optional[Any] = None,
|
| 919 |
+
output_attentions: Optional[bool] = False,
|
| 920 |
+
use_cache: Optional[bool] = False,
|
| 921 |
+
attn_mode: str = "flash",
|
| 922 |
+
**kwargs,
|
| 923 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 924 |
+
"""
|
| 925 |
+
Args:
|
| 926 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 927 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
| 928 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
| 929 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
| 930 |
+
output_attentions (`bool`, *optional*):
|
| 931 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 932 |
+
returned tensors for more detail.
|
| 933 |
+
use_cache (`bool`, *optional*):
|
| 934 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 935 |
+
(see `past_key_values`).
|
| 936 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 937 |
+
"""
|
| 938 |
+
if "padding_mask" in kwargs:
|
| 939 |
+
warnings.warn(
|
| 940 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 941 |
+
)
|
| 942 |
+
|
| 943 |
+
residual = hidden_states
|
| 944 |
+
|
| 945 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 946 |
+
|
| 947 |
+
# Self Attention
|
| 948 |
+
#print("#2", kwargs)
|
| 949 |
+
|
| 950 |
+
if(attn_mode == "flash"):
|
| 951 |
+
hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn(
|
| 952 |
+
hidden_states=hidden_states,
|
| 953 |
+
attention_mask=attention_mask,
|
| 954 |
+
position_ids=position_ids,
|
| 955 |
+
past_key_value=past_key_value,
|
| 956 |
+
output_attentions=output_attentions,
|
| 957 |
+
use_cache=use_cache,
|
| 958 |
+
**kwargs,
|
| 959 |
+
)
|
| 960 |
+
elif(attn_mode == "torch"):
|
| 961 |
+
|
| 962 |
+
hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 963 |
+
hidden_states=hidden_states,
|
| 964 |
+
attention_mask=attention_mask,
|
| 965 |
+
position_ids=position_ids,
|
| 966 |
+
past_key_value=past_key_value,
|
| 967 |
+
output_attentions=output_attentions,
|
| 968 |
+
use_cache=use_cache,
|
| 969 |
+
**kwargs,
|
| 970 |
+
)
|
| 971 |
+
else: raise ValueError("attention mode %s invalid" % attn_mode)
|
| 972 |
+
hidden_states = residual + hidden_states
|
| 973 |
+
|
| 974 |
+
# Fully Connected
|
| 975 |
+
residual = hidden_states
|
| 976 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 977 |
+
hidden_states = self.mlp(hidden_states)
|
| 978 |
+
hidden_states = residual + hidden_states
|
| 979 |
+
|
| 980 |
+
outputs = (hidden_states,)
|
| 981 |
+
|
| 982 |
+
if output_attentions:
|
| 983 |
+
outputs += (self_attn_weights,)
|
| 984 |
+
outputs += (inspect,)
|
| 985 |
+
|
| 986 |
+
if use_cache:
|
| 987 |
+
outputs += (present_key_value,)
|
| 988 |
+
|
| 989 |
+
return outputs
|
| 990 |
+
|
| 991 |
+
|
| 992 |
+
LLAMA_START_DOCSTRING = r"""
|
| 993 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 994 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 995 |
+
etc.)
|
| 996 |
+
|
| 997 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 998 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 999 |
+
and behavior.
|
| 1000 |
+
|
| 1001 |
+
Parameters:
|
| 1002 |
+
config ([`LlamaConfig`]):
|
| 1003 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1004 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 1005 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1006 |
+
"""
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
@add_start_docstrings(
|
| 1010 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
| 1011 |
+
LLAMA_START_DOCSTRING,
|
| 1012 |
+
)
|
| 1013 |
+
class LlamaPreTrainedModel(PreTrainedModel):
|
| 1014 |
+
config_class = LlamaConfig
|
| 1015 |
+
base_model_prefix = "model"
|
| 1016 |
+
supports_gradient_checkpointing = True
|
| 1017 |
+
_no_split_modules = ["LlamaDecoderLayer"]
|
| 1018 |
+
_skip_keys_device_placement = "past_key_values"
|
| 1019 |
+
_supports_flash_attn_2 = True
|
| 1020 |
+
_supports_sdpa = True
|
| 1021 |
+
_supports_cache_class = True
|
| 1022 |
+
|
| 1023 |
+
def _init_weights(self, module):
|
| 1024 |
+
std = self.config.initializer_range
|
| 1025 |
+
if isinstance(module, nn.Linear):
|
| 1026 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1027 |
+
if module.bias is not None:
|
| 1028 |
+
module.bias.data.zero_()
|
| 1029 |
+
elif isinstance(module, nn.Embedding):
|
| 1030 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1031 |
+
if module.padding_idx is not None:
|
| 1032 |
+
module.weight.data[module.padding_idx].zero_()
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
LLAMA_INPUTS_DOCSTRING = r"""
|
| 1036 |
+
Args:
|
| 1037 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1038 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 1039 |
+
it.
|
| 1040 |
+
|
| 1041 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1042 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1043 |
+
|
| 1044 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1045 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1046 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1047 |
+
|
| 1048 |
+
- 1 for tokens that are **not masked**,
|
| 1049 |
+
- 0 for tokens that are **masked**.
|
| 1050 |
+
|
| 1051 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1052 |
+
|
| 1053 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1054 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1055 |
+
|
| 1056 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
| 1057 |
+
`past_key_values`).
|
| 1058 |
+
|
| 1059 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 1060 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 1061 |
+
information on the default strategy.
|
| 1062 |
+
|
| 1063 |
+
- 1 indicates the head is **not masked**,
|
| 1064 |
+
- 0 indicates the head is **masked**.
|
| 1065 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1066 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1067 |
+
config.n_positions - 1]`.
|
| 1068 |
+
|
| 1069 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1070 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 1071 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1072 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 1073 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 1074 |
+
|
| 1075 |
+
Two formats are allowed:
|
| 1076 |
+
- a [`~cache_utils.Cache`] instance;
|
| 1077 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 1078 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 1079 |
+
cache format.
|
| 1080 |
+
|
| 1081 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 1082 |
+
legacy cache format will be returned.
|
| 1083 |
+
|
| 1084 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 1085 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 1086 |
+
of shape `(batch_size, sequence_length)`.
|
| 1087 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1088 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1089 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1090 |
+
model's internal embedding lookup matrix.
|
| 1091 |
+
use_cache (`bool`, *optional*):
|
| 1092 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1093 |
+
`past_key_values`).
|
| 1094 |
+
output_attentions (`bool`, *optional*):
|
| 1095 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1096 |
+
tensors for more detail.
|
| 1097 |
+
output_hidden_states (`bool`, *optional*):
|
| 1098 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1099 |
+
more detail.
|
| 1100 |
+
return_dict (`bool`, *optional*):
|
| 1101 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1102 |
+
"""
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
@add_start_docstrings(
|
| 1106 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
| 1107 |
+
LLAMA_START_DOCSTRING,
|
| 1108 |
+
)
|
| 1109 |
+
class LlamaModel(LlamaPreTrainedModel):
|
| 1110 |
+
"""
|
| 1111 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
|
| 1112 |
+
|
| 1113 |
+
Args:
|
| 1114 |
+
config: LlamaConfig
|
| 1115 |
+
"""
|
| 1116 |
+
|
| 1117 |
+
def __init__(self, config: LlamaConfig):
|
| 1118 |
+
super().__init__(config)
|
| 1119 |
+
self.padding_idx = config.pad_token_id
|
| 1120 |
+
self.vocab_size = config.vocab_size
|
| 1121 |
+
|
| 1122 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1123 |
+
self.layers = nn.ModuleList(
|
| 1124 |
+
[LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1125 |
+
)
|
| 1126 |
+
self._use_sdpa = config._attn_implementation == "sdpa"
|
| 1127 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
| 1128 |
+
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1129 |
+
|
| 1130 |
+
self.gradient_checkpointing = False
|
| 1131 |
+
# Initialize weights and apply final processing
|
| 1132 |
+
self.post_init()
|
| 1133 |
+
|
| 1134 |
+
def get_input_embeddings(self):
|
| 1135 |
+
return self.embed_tokens
|
| 1136 |
+
|
| 1137 |
+
def set_input_embeddings(self, value):
|
| 1138 |
+
self.embed_tokens = value
|
| 1139 |
+
|
| 1140 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
| 1141 |
+
def forward(
|
| 1142 |
+
self,
|
| 1143 |
+
input_ids: torch.LongTensor = None,
|
| 1144 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1145 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1146 |
+
past_key_values: Optional[Any] = None,
|
| 1147 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1148 |
+
use_cache: Optional[bool] = None,
|
| 1149 |
+
attn_mode: str = "flash",
|
| 1150 |
+
output_attentions: Optional[bool] = None,
|
| 1151 |
+
output_hidden_states: Optional[bool] = None,
|
| 1152 |
+
return_dict: Optional[bool] = None,
|
| 1153 |
+
block_list: list = None
|
| 1154 |
+
) -> Union[Tuple, HeterogeneousMemoryOutput]:
|
| 1155 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1156 |
+
output_hidden_states = (
|
| 1157 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1158 |
+
)
|
| 1159 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1160 |
+
|
| 1161 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1162 |
+
|
| 1163 |
+
# retrieve input_ids and inputs_embeds
|
| 1164 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1165 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 1166 |
+
elif input_ids is not None:
|
| 1167 |
+
batch_size, seq_length = input_ids.shape[:2]
|
| 1168 |
+
elif inputs_embeds is not None:
|
| 1169 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
| 1170 |
+
else:
|
| 1171 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 1172 |
+
|
| 1173 |
+
past_key_values_length = 0
|
| 1174 |
+
if use_cache:
|
| 1175 |
+
if(attn_mode == "heterogeneous"):
|
| 1176 |
+
past_key_values_length = past_key_values.full_memory_length
|
| 1177 |
+
else:
|
| 1178 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1179 |
+
if use_legacy_cache:
|
| 1180 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1181 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1182 |
+
|
| 1183 |
+
if position_ids is None:
|
| 1184 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1185 |
+
position_ids = torch.arange(
|
| 1186 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1187 |
+
)
|
| 1188 |
+
position_ids = position_ids.unsqueeze(0)
|
| 1189 |
+
|
| 1190 |
+
if inputs_embeds is None:
|
| 1191 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1192 |
+
|
| 1193 |
+
if self._use_flash_attention_2:
|
| 1194 |
+
# 2d mask is passed through the layers
|
| 1195 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1196 |
+
elif self._use_sdpa and not output_attentions:
|
| 1197 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
| 1198 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
| 1199 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 1200 |
+
attention_mask,
|
| 1201 |
+
(batch_size, seq_length),
|
| 1202 |
+
inputs_embeds,
|
| 1203 |
+
past_key_values_length,
|
| 1204 |
+
)
|
| 1205 |
+
else:
|
| 1206 |
+
# 4d mask is passed through the layers
|
| 1207 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1208 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
| 1209 |
+
)
|
| 1210 |
+
|
| 1211 |
+
# embed positions
|
| 1212 |
+
hidden_states = inputs_embeds
|
| 1213 |
+
|
| 1214 |
+
if self.gradient_checkpointing and self.training:
|
| 1215 |
+
if use_cache:
|
| 1216 |
+
logger.warning_once(
|
| 1217 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1218 |
+
)
|
| 1219 |
+
use_cache = False
|
| 1220 |
+
|
| 1221 |
+
# decoder layers
|
| 1222 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1223 |
+
all_self_attns = () if output_attentions else None
|
| 1224 |
+
all_inspect = () if output_attentions else None
|
| 1225 |
+
next_decoder_cache = None
|
| 1226 |
+
if block_list:
|
| 1227 |
+
kwargs={"block_list":block_list}
|
| 1228 |
+
else:
|
| 1229 |
+
kwargs={}
|
| 1230 |
+
|
| 1231 |
+
for decoder_layer in self.layers:
|
| 1232 |
+
if output_hidden_states:
|
| 1233 |
+
all_hidden_states += (hidden_states,)
|
| 1234 |
+
|
| 1235 |
+
if self.gradient_checkpointing and self.training:
|
| 1236 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1237 |
+
decoder_layer.__call__,
|
| 1238 |
+
hidden_states,
|
| 1239 |
+
attention_mask,
|
| 1240 |
+
position_ids,
|
| 1241 |
+
past_key_values,
|
| 1242 |
+
output_attentions,
|
| 1243 |
+
use_cache,
|
| 1244 |
+
)
|
| 1245 |
+
else:
|
| 1246 |
+
layer_outputs = decoder_layer(
|
| 1247 |
+
hidden_states,
|
| 1248 |
+
attention_mask=attention_mask,
|
| 1249 |
+
position_ids=position_ids,
|
| 1250 |
+
past_key_value=past_key_values,
|
| 1251 |
+
output_attentions=output_attentions,
|
| 1252 |
+
use_cache=use_cache,
|
| 1253 |
+
attn_mode=attn_mode,
|
| 1254 |
+
**kwargs,
|
| 1255 |
+
)
|
| 1256 |
+
|
| 1257 |
+
hidden_states = layer_outputs[0]
|
| 1258 |
+
|
| 1259 |
+
if use_cache:
|
| 1260 |
+
next_decoder_cache = layer_outputs[3 if output_attentions else 1]
|
| 1261 |
+
|
| 1262 |
+
if output_attentions:
|
| 1263 |
+
all_self_attns += (layer_outputs[1],)
|
| 1264 |
+
all_inspect += (layer_outputs[2],)
|
| 1265 |
+
|
| 1266 |
+
hidden_states = self.norm(hidden_states)
|
| 1267 |
+
|
| 1268 |
+
# add hidden states from the last decoder layer
|
| 1269 |
+
if output_hidden_states:
|
| 1270 |
+
all_hidden_states += (hidden_states,)
|
| 1271 |
+
|
| 1272 |
+
next_cache = None
|
| 1273 |
+
if use_cache:
|
| 1274 |
+
# next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1275 |
+
next_cache = next_decoder_cache
|
| 1276 |
+
if not return_dict:
|
| 1277 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1278 |
+
return HeterogeneousMemoryOutput(
|
| 1279 |
+
last_hidden_state=hidden_states,
|
| 1280 |
+
past_key_values=next_cache,
|
| 1281 |
+
hidden_states=all_hidden_states,
|
| 1282 |
+
attentions=all_self_attns,
|
| 1283 |
+
inspect=all_inspect
|
| 1284 |
+
)
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
class LlamaForCausalLM(LlamaPreTrainedModel):
|
| 1288 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1289 |
+
|
| 1290 |
+
def __init__(self, config):
|
| 1291 |
+
super().__init__(config)
|
| 1292 |
+
self.model = LlamaModel(config)
|
| 1293 |
+
self.vocab_size = config.vocab_size
|
| 1294 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1295 |
+
|
| 1296 |
+
# Initialize weights and apply final processing
|
| 1297 |
+
self.post_init()
|
| 1298 |
+
|
| 1299 |
+
def get_input_embeddings(self):
|
| 1300 |
+
return self.model.embed_tokens
|
| 1301 |
+
|
| 1302 |
+
def set_input_embeddings(self, value):
|
| 1303 |
+
self.model.embed_tokens = value
|
| 1304 |
+
|
| 1305 |
+
def get_output_embeddings(self):
|
| 1306 |
+
return self.lm_head
|
| 1307 |
+
|
| 1308 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1309 |
+
self.lm_head = new_embeddings
|
| 1310 |
+
|
| 1311 |
+
def set_decoder(self, decoder):
|
| 1312 |
+
self.model = decoder
|
| 1313 |
+
|
| 1314 |
+
def get_decoder(self):
|
| 1315 |
+
return self.model
|
| 1316 |
+
|
| 1317 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
| 1318 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1319 |
+
def forward(
|
| 1320 |
+
self,
|
| 1321 |
+
input_ids: torch.LongTensor = None,
|
| 1322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1323 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1324 |
+
past_key_values: Optional[Any] = None,
|
| 1325 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1326 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1327 |
+
use_cache: Optional[bool] = None,
|
| 1328 |
+
attn_mode: str = "flash",
|
| 1329 |
+
output_attentions: Optional[bool] = None,
|
| 1330 |
+
output_hidden_states: Optional[bool] = None,
|
| 1331 |
+
return_dict: Optional[bool] = None,
|
| 1332 |
+
block_list:list = None
|
| 1333 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1334 |
+
r"""
|
| 1335 |
+
Args:
|
| 1336 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1337 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1338 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1339 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1340 |
+
|
| 1341 |
+
Returns:
|
| 1342 |
+
|
| 1343 |
+
Example:
|
| 1344 |
+
|
| 1345 |
+
```python
|
| 1346 |
+
>>> from transformers import AutoTokenizer, LlamaForCausalLM
|
| 1347 |
+
|
| 1348 |
+
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 1349 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 1350 |
+
|
| 1351 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1352 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1353 |
+
|
| 1354 |
+
>>> # Generate
|
| 1355 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1356 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1357 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1358 |
+
```"""
|
| 1359 |
+
|
| 1360 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1361 |
+
output_hidden_states = (
|
| 1362 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1363 |
+
)
|
| 1364 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1365 |
+
|
| 1366 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1367 |
+
outputs = self.model(
|
| 1368 |
+
input_ids=input_ids,
|
| 1369 |
+
attention_mask=attention_mask,
|
| 1370 |
+
position_ids=position_ids,
|
| 1371 |
+
past_key_values=past_key_values,
|
| 1372 |
+
inputs_embeds=inputs_embeds,
|
| 1373 |
+
use_cache=use_cache,
|
| 1374 |
+
attn_mode=attn_mode,
|
| 1375 |
+
output_attentions=output_attentions,
|
| 1376 |
+
output_hidden_states=output_hidden_states,
|
| 1377 |
+
return_dict=return_dict,
|
| 1378 |
+
block_list=block_list
|
| 1379 |
+
)
|
| 1380 |
+
|
| 1381 |
+
hidden_states = outputs[0]
|
| 1382 |
+
if self.config.pretraining_tp > 1:
|
| 1383 |
+
lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
|
| 1384 |
+
logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
|
| 1385 |
+
logits = torch.cat(logits, dim=-1)
|
| 1386 |
+
else:
|
| 1387 |
+
logits = self.lm_head(hidden_states)
|
| 1388 |
+
logits = logits.float()
|
| 1389 |
+
|
| 1390 |
+
loss = None
|
| 1391 |
+
if labels is not None:
|
| 1392 |
+
# Shift so that tokens < n predict n
|
| 1393 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1394 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1395 |
+
# Flatten the tokens
|
| 1396 |
+
loss_fct = CrossEntropyLoss()
|
| 1397 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1398 |
+
shift_labels = shift_labels.view(-1)
|
| 1399 |
+
# Enable model parallelism
|
| 1400 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1401 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1402 |
+
|
| 1403 |
+
if not return_dict:
|
| 1404 |
+
output = (logits,) + outputs[1:]
|
| 1405 |
+
return (loss,) + output if loss is not None else output
|
| 1406 |
+
|
| 1407 |
+
return CausalLMOutputWithHeterogeneousMemory(
|
| 1408 |
+
loss=loss,
|
| 1409 |
+
logits=logits,
|
| 1410 |
+
past_key_values=outputs.past_key_values,
|
| 1411 |
+
hidden_states=outputs.hidden_states,
|
| 1412 |
+
attentions=outputs.attentions,
|
| 1413 |
+
inspect=outputs.inspect,
|
| 1414 |
+
)
|
| 1415 |
+
|
| 1416 |
+
def prepare_inputs_for_generation(
|
| 1417 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1418 |
+
):
|
| 1419 |
+
if past_key_values is not None:
|
| 1420 |
+
if isinstance(past_key_values, Cache):
|
| 1421 |
+
cache_length = past_key_values.get_seq_length()
|
| 1422 |
+
past_length = past_key_values.seen_tokens
|
| 1423 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1424 |
+
else:
|
| 1425 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1426 |
+
max_cache_length = None
|
| 1427 |
+
|
| 1428 |
+
# Keep only the unprocessed tokens:
|
| 1429 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1430 |
+
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
|
| 1431 |
+
# input)
|
| 1432 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1433 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1434 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1435 |
+
# input_ids based on the past_length.
|
| 1436 |
+
elif past_length < input_ids.shape[1]:
|
| 1437 |
+
input_ids = input_ids[:, past_length:]
|
| 1438 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1439 |
+
|
| 1440 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1441 |
+
if (
|
| 1442 |
+
max_cache_length is not None
|
| 1443 |
+
and attention_mask is not None
|
| 1444 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1445 |
+
):
|
| 1446 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1447 |
+
|
| 1448 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1449 |
+
if attention_mask is not None and position_ids is None:
|
| 1450 |
+
# create position_ids on the fly for batch generation
|
| 1451 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1452 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1453 |
+
if past_key_values:
|
| 1454 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1455 |
+
|
| 1456 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1457 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1458 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1459 |
+
else:
|
| 1460 |
+
model_inputs = {"input_ids": input_ids}
|
| 1461 |
+
|
| 1462 |
+
model_inputs.update(
|
| 1463 |
+
{
|
| 1464 |
+
"position_ids": position_ids,
|
| 1465 |
+
"past_key_values": past_key_values,
|
| 1466 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1467 |
+
"attention_mask": attention_mask,
|
| 1468 |
+
}
|
| 1469 |
+
)
|
| 1470 |
+
return model_inputs
|
| 1471 |
+
|
| 1472 |
+
@staticmethod
|
| 1473 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1474 |
+
reordered_past = ()
|
| 1475 |
+
for layer_past in past_key_values:
|
| 1476 |
+
reordered_past += (
|
| 1477 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1478 |
+
)
|
| 1479 |
+
return reordered_past
|
| 1480 |
+
|
| 1481 |
+
|
| 1482 |
+
@add_start_docstrings(
|
| 1483 |
+
"""
|
| 1484 |
+
The LLaMa Model transformer with a sequence classification head on top (linear layer).
|
| 1485 |
+
|
| 1486 |
+
[`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1487 |
+
(e.g. GPT-2) do.
|
| 1488 |
+
|
| 1489 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1490 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1491 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1492 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1493 |
+
each row of the batch).
|
| 1494 |
+
""",
|
| 1495 |
+
LLAMA_START_DOCSTRING,
|
| 1496 |
+
)
|
| 1497 |
+
class LlamaForSequenceClassification(LlamaPreTrainedModel):
|
| 1498 |
+
def __init__(self, config):
|
| 1499 |
+
super().__init__(config)
|
| 1500 |
+
self.num_labels = config.num_labels
|
| 1501 |
+
self.model = LlamaModel(config)
|
| 1502 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1503 |
+
|
| 1504 |
+
# Initialize weights and apply final processing
|
| 1505 |
+
self.post_init()
|
| 1506 |
+
|
| 1507 |
+
def get_input_embeddings(self):
|
| 1508 |
+
return self.model.embed_tokens
|
| 1509 |
+
|
| 1510 |
+
def set_input_embeddings(self, value):
|
| 1511 |
+
self.model.embed_tokens = value
|
| 1512 |
+
|
| 1513 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
| 1514 |
+
def forward(
|
| 1515 |
+
self,
|
| 1516 |
+
input_ids: torch.LongTensor = None,
|
| 1517 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1518 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1519 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1520 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1521 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1522 |
+
use_cache: Optional[bool] = None,
|
| 1523 |
+
output_attentions: Optional[bool] = None,
|
| 1524 |
+
output_hidden_states: Optional[bool] = None,
|
| 1525 |
+
return_dict: Optional[bool] = None,
|
| 1526 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1527 |
+
r"""
|
| 1528 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1529 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1530 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1531 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1532 |
+
"""
|
| 1533 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1534 |
+
|
| 1535 |
+
transformer_outputs = self.model(
|
| 1536 |
+
input_ids,
|
| 1537 |
+
attention_mask=attention_mask,
|
| 1538 |
+
position_ids=position_ids,
|
| 1539 |
+
past_key_values=past_key_values,
|
| 1540 |
+
inputs_embeds=inputs_embeds,
|
| 1541 |
+
use_cache=use_cache,
|
| 1542 |
+
output_attentions=output_attentions,
|
| 1543 |
+
output_hidden_states=output_hidden_states,
|
| 1544 |
+
return_dict=return_dict,
|
| 1545 |
+
)
|
| 1546 |
+
hidden_states = transformer_outputs[0]
|
| 1547 |
+
logits = self.score(hidden_states)
|
| 1548 |
+
|
| 1549 |
+
if input_ids is not None:
|
| 1550 |
+
batch_size = input_ids.shape[0]
|
| 1551 |
+
else:
|
| 1552 |
+
batch_size = inputs_embeds.shape[0]
|
| 1553 |
+
|
| 1554 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1555 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1556 |
+
if self.config.pad_token_id is None:
|
| 1557 |
+
sequence_lengths = -1
|
| 1558 |
+
else:
|
| 1559 |
+
if input_ids is not None:
|
| 1560 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
|
| 1561 |
+
logits.device
|
| 1562 |
+
)
|
| 1563 |
+
else:
|
| 1564 |
+
sequence_lengths = -1
|
| 1565 |
+
|
| 1566 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1567 |
+
|
| 1568 |
+
loss = None
|
| 1569 |
+
if labels is not None:
|
| 1570 |
+
labels = labels.to(logits.device)
|
| 1571 |
+
if self.config.problem_type is None:
|
| 1572 |
+
if self.num_labels == 1:
|
| 1573 |
+
self.config.problem_type = "regression"
|
| 1574 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1575 |
+
self.config.problem_type = "single_label_classification"
|
| 1576 |
+
else:
|
| 1577 |
+
self.config.problem_type = "multi_label_classification"
|
| 1578 |
+
|
| 1579 |
+
if self.config.problem_type == "regression":
|
| 1580 |
+
loss_fct = MSELoss()
|
| 1581 |
+
if self.num_labels == 1:
|
| 1582 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1583 |
+
else:
|
| 1584 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1585 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1586 |
+
loss_fct = CrossEntropyLoss()
|
| 1587 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1588 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1589 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1590 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1591 |
+
if not return_dict:
|
| 1592 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1593 |
+
return ((loss,) + output) if loss is not None else output
|
| 1594 |
+
|
| 1595 |
+
return SequenceClassifierOutputWithPast(
|
| 1596 |
+
loss=loss,
|
| 1597 |
+
logits=pooled_logits,
|
| 1598 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1599 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1600 |
+
attentions=transformer_outputs.attentions,
|
| 1601 |
+
)
|
faiss_attn/source/modeling_mistral.py
ADDED
|
@@ -0,0 +1,1535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch Mistral model."""
|
| 21 |
+
import inspect
|
| 22 |
+
import math
|
| 23 |
+
import warnings
|
| 24 |
+
from typing import List, Optional, Tuple, Union, Any
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn.functional as F
|
| 28 |
+
import torch.utils.checkpoint
|
| 29 |
+
from torch import nn
|
| 30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 31 |
+
|
| 32 |
+
from transformers.activations import ACT2FN
|
| 33 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 34 |
+
from transformers.modeling_attn_mask_utils import (
|
| 35 |
+
AttentionMaskConverter,
|
| 36 |
+
_prepare_4d_attention_mask,
|
| 37 |
+
_prepare_4d_causal_attention_mask,
|
| 38 |
+
_prepare_4d_causal_attention_mask_for_sdpa,
|
| 39 |
+
)
|
| 40 |
+
from transformers.modeling_outputs import ModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 41 |
+
|
| 42 |
+
from transformers import MistralConfig
|
| 43 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 44 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
|
| 45 |
+
from transformers.utils import (
|
| 46 |
+
add_start_docstrings,
|
| 47 |
+
add_start_docstrings_to_model_forward,
|
| 48 |
+
is_flash_attn_2_available,
|
| 49 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 50 |
+
logging,
|
| 51 |
+
replace_return_docstrings,
|
| 52 |
+
)
|
| 53 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
if is_flash_attn_2_available():
|
| 57 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 58 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 59 |
+
|
| 60 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
logger = logging.get_logger(__name__)
|
| 64 |
+
|
| 65 |
+
_CONFIG_FOR_DOC = "MistralConfig"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 69 |
+
def _get_unpad_data(attention_mask):
|
| 70 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 71 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 72 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 73 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 74 |
+
return (
|
| 75 |
+
indices,
|
| 76 |
+
cu_seqlens,
|
| 77 |
+
max_seqlen_in_batch,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral
|
| 82 |
+
class MistralRMSNorm(nn.Module):
|
| 83 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 84 |
+
"""
|
| 85 |
+
MistralRMSNorm is equivalent to T5LayerNorm
|
| 86 |
+
"""
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 89 |
+
self.variance_epsilon = eps
|
| 90 |
+
|
| 91 |
+
def forward(self, hidden_states):
|
| 92 |
+
input_dtype = hidden_states.dtype
|
| 93 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 94 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 95 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 96 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral
|
| 100 |
+
class MistralRotaryEmbedding(nn.Module):
|
| 101 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 102 |
+
super().__init__()
|
| 103 |
+
|
| 104 |
+
self.dim = dim
|
| 105 |
+
self.max_position_embeddings = max_position_embeddings
|
| 106 |
+
self.base = base
|
| 107 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 108 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 109 |
+
|
| 110 |
+
# Build here to make `torch.jit.trace` work.
|
| 111 |
+
self._set_cos_sin_cache(
|
| 112 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 116 |
+
self.max_seq_len_cached = seq_len
|
| 117 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
| 118 |
+
|
| 119 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 120 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 121 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 122 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 123 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 124 |
+
|
| 125 |
+
def forward(self, x, seq_len=None):
|
| 126 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 127 |
+
if seq_len > self.max_seq_len_cached:
|
| 128 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 129 |
+
|
| 130 |
+
return (
|
| 131 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
| 132 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 137 |
+
def rotate_half(x):
|
| 138 |
+
"""Rotates half the hidden dims of the input."""
|
| 139 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 140 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 141 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
| 145 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 146 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
q (`torch.Tensor`): The query tensor.
|
| 150 |
+
k (`torch.Tensor`): The key tensor.
|
| 151 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 152 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 153 |
+
position_ids (`torch.Tensor`):
|
| 154 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 155 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 156 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 157 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 158 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 159 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 160 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 161 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 162 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 163 |
+
Returns:
|
| 164 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 165 |
+
"""
|
| 166 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 167 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 168 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 169 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 170 |
+
return q_embed, k_embed
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class MistralMLP(nn.Module):
|
| 174 |
+
def __init__(self, config):
|
| 175 |
+
super().__init__()
|
| 176 |
+
self.config = config
|
| 177 |
+
self.hidden_size = config.hidden_size
|
| 178 |
+
self.intermediate_size = config.intermediate_size
|
| 179 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 180 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 181 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 182 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 183 |
+
|
| 184 |
+
def forward(self, x):
|
| 185 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 189 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 190 |
+
"""
|
| 191 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 192 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 193 |
+
"""
|
| 194 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 195 |
+
if n_rep == 1:
|
| 196 |
+
return hidden_states
|
| 197 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 198 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class MistralAttention(nn.Module):
|
| 202 |
+
"""
|
| 203 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 204 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
def __init__(self, config: MistralConfig, layer_idx: Optional[int] = None):
|
| 208 |
+
super().__init__()
|
| 209 |
+
self.config = config
|
| 210 |
+
self.layer_idx = layer_idx
|
| 211 |
+
if layer_idx is None:
|
| 212 |
+
logger.warning_once(
|
| 213 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 214 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 215 |
+
"when creating this class."
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
self.hidden_size = config.hidden_size
|
| 219 |
+
self.num_heads = config.num_attention_heads
|
| 220 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 221 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 222 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 223 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 224 |
+
self.rope_theta = config.rope_theta
|
| 225 |
+
self.is_causal = True
|
| 226 |
+
self.attention_dropout = config.attention_dropout
|
| 227 |
+
|
| 228 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 229 |
+
raise ValueError(
|
| 230 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 231 |
+
f" and `num_heads`: {self.num_heads})."
|
| 232 |
+
)
|
| 233 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 234 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 235 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 236 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 237 |
+
|
| 238 |
+
self.rotary_emb = MistralRotaryEmbedding(
|
| 239 |
+
self.head_dim,
|
| 240 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 241 |
+
base=self.rope_theta,
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 245 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 246 |
+
def forward_torch(
|
| 247 |
+
self,
|
| 248 |
+
hidden_states: torch.Tensor,
|
| 249 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 250 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 251 |
+
past_key_value: Optional[Any] = None,
|
| 252 |
+
output_attentions: bool = False,
|
| 253 |
+
use_cache: bool = False,
|
| 254 |
+
**kwargs,
|
| 255 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 256 |
+
if "padding_mask" in kwargs:
|
| 257 |
+
warnings.warn(
|
| 258 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
bsz, q_len, _ = hidden_states.size()
|
| 262 |
+
inspect = {}
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
query_states = self.q_proj(hidden_states)
|
| 266 |
+
key_states = self.k_proj(hidden_states)
|
| 267 |
+
value_states = self.v_proj(hidden_states)
|
| 268 |
+
|
| 269 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 270 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 271 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 272 |
+
|
| 273 |
+
kv_seq_len = key_states.shape[-2]
|
| 274 |
+
# print(past_key_value)
|
| 275 |
+
if past_key_value is not None:
|
| 276 |
+
if self.layer_idx is None:
|
| 277 |
+
raise ValueError(
|
| 278 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 279 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 280 |
+
"with a layer index."
|
| 281 |
+
)
|
| 282 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 283 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 284 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 285 |
+
|
| 286 |
+
if past_key_value is not None:
|
| 287 |
+
if(use_cache):
|
| 288 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 289 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 290 |
+
else:
|
| 291 |
+
key_states = torch.cat([past_key_value.key_cache[self.layer_idx], key_states], dim=-2)
|
| 292 |
+
value_states = torch.cat([past_key_value.value_cache[self.layer_idx], value_states], dim=-2)
|
| 293 |
+
|
| 294 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 295 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 296 |
+
|
| 297 |
+
# print(query_states.size())
|
| 298 |
+
# print(key_states.size())
|
| 299 |
+
inspect["query"] = query_states
|
| 300 |
+
inspect["key"] = key_states
|
| 301 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 302 |
+
###write our mask here
|
| 303 |
+
#print(attn_weights.size())#[batch_size, head, q, c]
|
| 304 |
+
|
| 305 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 306 |
+
raise ValueError(
|
| 307 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 308 |
+
f" {attn_weights.size()}"
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
if attention_mask is not None:
|
| 312 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 313 |
+
raise ValueError(
|
| 314 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 315 |
+
)
|
| 316 |
+
attn_weights = attn_weights + attention_mask
|
| 317 |
+
if 'block_list' in kwargs:
|
| 318 |
+
for h in kwargs['block_list']:
|
| 319 |
+
if self.layer_idx==h[0]:
|
| 320 |
+
'''
|
| 321 |
+
if h[1]==0:
|
| 322 |
+
target_head = 1
|
| 323 |
+
elif h[1]==31:
|
| 324 |
+
target_head = 30
|
| 325 |
+
else:
|
| 326 |
+
target_head = h[1] - 1
|
| 327 |
+
|
| 328 |
+
attn_weights[:, h[1], :, :] = attn_weights[:, target_head, :, :]
|
| 329 |
+
'''
|
| 330 |
+
|
| 331 |
+
attn_weights[:, h[1], :, :] = 0
|
| 332 |
+
|
| 333 |
+
# upcast attention to fp32
|
| 334 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 335 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 336 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 337 |
+
inspect["attn_output_before_o_proj"] = attn_output
|
| 338 |
+
|
| 339 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 340 |
+
raise ValueError(
|
| 341 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 342 |
+
f" {attn_output.size()}"
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 346 |
+
|
| 347 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
attn_output = self.o_proj(attn_output)
|
| 351 |
+
|
| 352 |
+
if not output_attentions:
|
| 353 |
+
attn_weights = None
|
| 354 |
+
|
| 355 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 356 |
+
|
| 357 |
+
def forward(
|
| 358 |
+
self,
|
| 359 |
+
hidden_states: torch.Tensor,
|
| 360 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 361 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 362 |
+
past_key_value: Optional[Cache] = None,
|
| 363 |
+
output_attentions: bool = False,
|
| 364 |
+
use_cache: bool = False,
|
| 365 |
+
**kwargs,
|
| 366 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 367 |
+
if "padding_mask" in kwargs:
|
| 368 |
+
warnings.warn(
|
| 369 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 370 |
+
)
|
| 371 |
+
bsz, q_len, _ = hidden_states.size()
|
| 372 |
+
|
| 373 |
+
query_states = self.q_proj(hidden_states)
|
| 374 |
+
key_states = self.k_proj(hidden_states)
|
| 375 |
+
value_states = self.v_proj(hidden_states)
|
| 376 |
+
|
| 377 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 378 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 379 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 380 |
+
|
| 381 |
+
kv_seq_len = key_states.shape[-2]
|
| 382 |
+
if past_key_value is not None:
|
| 383 |
+
if self.layer_idx is None:
|
| 384 |
+
raise ValueError(
|
| 385 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 386 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 387 |
+
"with a layer index."
|
| 388 |
+
)
|
| 389 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 390 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 391 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 392 |
+
|
| 393 |
+
if past_key_value is not None:
|
| 394 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 395 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 396 |
+
|
| 397 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 398 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 399 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 400 |
+
|
| 401 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 402 |
+
|
| 403 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 404 |
+
raise ValueError(
|
| 405 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 406 |
+
f" {attn_weights.size()}"
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
if attention_mask is not None:
|
| 410 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 411 |
+
raise ValueError(
|
| 412 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
attn_weights = attn_weights + attention_mask
|
| 416 |
+
|
| 417 |
+
# upcast attention to fp32
|
| 418 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 419 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 420 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 421 |
+
|
| 422 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 423 |
+
raise ValueError(
|
| 424 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 425 |
+
f" {attn_output.size()}"
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 429 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 430 |
+
|
| 431 |
+
attn_output = self.o_proj(attn_output)
|
| 432 |
+
|
| 433 |
+
if not output_attentions:
|
| 434 |
+
attn_weights = None
|
| 435 |
+
|
| 436 |
+
return attn_output, attn_weights, past_key_value
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class MistralFlashAttention2(MistralAttention):
|
| 440 |
+
"""
|
| 441 |
+
Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays
|
| 442 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 443 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 444 |
+
"""
|
| 445 |
+
|
| 446 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 447 |
+
def __init__(self, *args, **kwargs):
|
| 448 |
+
super().__init__(*args, **kwargs)
|
| 449 |
+
|
| 450 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 451 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 452 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 453 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 454 |
+
|
| 455 |
+
def forward(
|
| 456 |
+
self,
|
| 457 |
+
hidden_states: torch.Tensor,
|
| 458 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 459 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 460 |
+
past_key_value: Optional[Cache] = None,
|
| 461 |
+
output_attentions: bool = False,
|
| 462 |
+
use_cache: bool = False,
|
| 463 |
+
**kwargs,
|
| 464 |
+
):
|
| 465 |
+
if "padding_mask" in kwargs:
|
| 466 |
+
warnings.warn(
|
| 467 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
# overwrite attention_mask with padding_mask
|
| 471 |
+
attention_mask = kwargs.pop("padding_mask")
|
| 472 |
+
bsz, q_len, _ = hidden_states.size()
|
| 473 |
+
|
| 474 |
+
query_states = self.q_proj(hidden_states)
|
| 475 |
+
key_states = self.k_proj(hidden_states)
|
| 476 |
+
value_states = self.v_proj(hidden_states)
|
| 477 |
+
|
| 478 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 479 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 480 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 481 |
+
|
| 482 |
+
kv_seq_len = key_states.shape[-2]
|
| 483 |
+
if past_key_value is not None:
|
| 484 |
+
if self.layer_idx is None:
|
| 485 |
+
raise ValueError(
|
| 486 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 487 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 488 |
+
"with a layer index."
|
| 489 |
+
)
|
| 490 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 491 |
+
|
| 492 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
| 493 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
| 494 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
| 495 |
+
|
| 496 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 497 |
+
|
| 498 |
+
use_sliding_windows = (
|
| 499 |
+
_flash_supports_window_size
|
| 500 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 501 |
+
and kv_seq_len > self.config.sliding_window
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
if not _flash_supports_window_size:
|
| 505 |
+
logger.warning_once(
|
| 506 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
| 507 |
+
" make sure to upgrade flash-attn library."
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
if past_key_value is not None:
|
| 511 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
| 512 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| 513 |
+
if (
|
| 514 |
+
getattr(self.config, "sliding_window", None) is not None
|
| 515 |
+
and kv_seq_len > self.config.sliding_window
|
| 516 |
+
and cache_has_contents
|
| 517 |
+
):
|
| 518 |
+
slicing_tokens = 1 - self.config.sliding_window
|
| 519 |
+
|
| 520 |
+
past_key = past_key_value[self.layer_idx][0]
|
| 521 |
+
past_value = past_key_value[self.layer_idx][1]
|
| 522 |
+
|
| 523 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| 524 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
| 525 |
+
|
| 526 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
| 527 |
+
raise ValueError(
|
| 528 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| 529 |
+
f" {past_key.shape}"
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
if attention_mask is not None:
|
| 533 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
| 534 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
| 535 |
+
|
| 536 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 537 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 538 |
+
|
| 539 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 540 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 541 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 542 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 543 |
+
|
| 544 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 545 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 546 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 547 |
+
input_dtype = query_states.dtype
|
| 548 |
+
if input_dtype == torch.float32:
|
| 549 |
+
if torch.is_autocast_enabled():
|
| 550 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 551 |
+
# Handle the case where the model is quantized
|
| 552 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 553 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 554 |
+
else:
|
| 555 |
+
target_dtype = self.q_proj.weight.dtype
|
| 556 |
+
|
| 557 |
+
logger.warning_once(
|
| 558 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 559 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 560 |
+
f" {target_dtype}."
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
query_states = query_states.to(target_dtype)
|
| 564 |
+
key_states = key_states.to(target_dtype)
|
| 565 |
+
value_states = value_states.to(target_dtype)
|
| 566 |
+
if 'block_list' in kwargs:
|
| 567 |
+
if len(kwargs['block_list'][0]) > 2:
|
| 568 |
+
for batch_idx, bh in enumerate(kwargs['block_list']):
|
| 569 |
+
for h in bh:
|
| 570 |
+
if self.layer_idx==h[0]:
|
| 571 |
+
query_states[batch_idx, h[1], :, :] = 0
|
| 572 |
+
else:
|
| 573 |
+
for h in kwargs['block_list']:
|
| 574 |
+
if self.layer_idx==h[0]:
|
| 575 |
+
query_states[:,h[1], :, :] = 0
|
| 576 |
+
#key_states[:, h[1], :, :] = 0
|
| 577 |
+
# Reashape to the expected shape for Flash Attention
|
| 578 |
+
query_states = query_states.transpose(1, 2)
|
| 579 |
+
key_states = key_states.transpose(1, 2)
|
| 580 |
+
value_states = value_states.transpose(1, 2)
|
| 581 |
+
|
| 582 |
+
attn_output = self._flash_attention_forward(
|
| 583 |
+
query_states,
|
| 584 |
+
key_states,
|
| 585 |
+
value_states,
|
| 586 |
+
attention_mask,
|
| 587 |
+
q_len,
|
| 588 |
+
dropout=dropout_rate,
|
| 589 |
+
use_sliding_windows=use_sliding_windows,
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 593 |
+
attn_output = self.o_proj(attn_output)
|
| 594 |
+
|
| 595 |
+
if not output_attentions:
|
| 596 |
+
attn_weights = None
|
| 597 |
+
|
| 598 |
+
return attn_output, attn_weights, past_key_value
|
| 599 |
+
|
| 600 |
+
def _flash_attention_forward(
|
| 601 |
+
self,
|
| 602 |
+
query_states,
|
| 603 |
+
key_states,
|
| 604 |
+
value_states,
|
| 605 |
+
attention_mask,
|
| 606 |
+
query_length,
|
| 607 |
+
dropout=0.0,
|
| 608 |
+
softmax_scale=None,
|
| 609 |
+
use_sliding_windows=False,
|
| 610 |
+
):
|
| 611 |
+
"""
|
| 612 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 613 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 614 |
+
|
| 615 |
+
Args:
|
| 616 |
+
query_states (`torch.Tensor`):
|
| 617 |
+
Input query states to be passed to Flash Attention API
|
| 618 |
+
key_states (`torch.Tensor`):
|
| 619 |
+
Input key states to be passed to Flash Attention API
|
| 620 |
+
value_states (`torch.Tensor`):
|
| 621 |
+
Input value states to be passed to Flash Attention API
|
| 622 |
+
attention_mask (`torch.Tensor`):
|
| 623 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 624 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 625 |
+
dropout (`int`, *optional*):
|
| 626 |
+
Attention dropout
|
| 627 |
+
softmax_scale (`float`, *optional*):
|
| 628 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 629 |
+
use_sliding_windows (`bool`, *optional*):
|
| 630 |
+
Whether to activate sliding window attention.
|
| 631 |
+
"""
|
| 632 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 633 |
+
causal = self.is_causal
|
| 634 |
+
else:
|
| 635 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 636 |
+
causal = self.is_causal and query_length != 1
|
| 637 |
+
|
| 638 |
+
# Contains at least one padding token in the sequence
|
| 639 |
+
if attention_mask is not None:
|
| 640 |
+
batch_size = query_states.shape[0]
|
| 641 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 642 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 646 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 647 |
+
|
| 648 |
+
if not use_sliding_windows:
|
| 649 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 650 |
+
query_states,
|
| 651 |
+
key_states,
|
| 652 |
+
value_states,
|
| 653 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 654 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 655 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 656 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 657 |
+
dropout_p=dropout,
|
| 658 |
+
softmax_scale=softmax_scale,
|
| 659 |
+
causal=causal,
|
| 660 |
+
)
|
| 661 |
+
else:
|
| 662 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 663 |
+
query_states,
|
| 664 |
+
key_states,
|
| 665 |
+
value_states,
|
| 666 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 667 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 668 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 669 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 670 |
+
dropout_p=dropout,
|
| 671 |
+
softmax_scale=softmax_scale,
|
| 672 |
+
causal=causal,
|
| 673 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 674 |
+
)
|
| 675 |
+
|
| 676 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 677 |
+
else:
|
| 678 |
+
if not use_sliding_windows:
|
| 679 |
+
attn_output = flash_attn_func(
|
| 680 |
+
query_states,
|
| 681 |
+
key_states,
|
| 682 |
+
value_states,
|
| 683 |
+
dropout,
|
| 684 |
+
softmax_scale=softmax_scale,
|
| 685 |
+
causal=causal,
|
| 686 |
+
)
|
| 687 |
+
else:
|
| 688 |
+
attn_output = flash_attn_func(
|
| 689 |
+
query_states,
|
| 690 |
+
key_states,
|
| 691 |
+
value_states,
|
| 692 |
+
dropout,
|
| 693 |
+
softmax_scale=softmax_scale,
|
| 694 |
+
causal=causal,
|
| 695 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
return attn_output
|
| 699 |
+
|
| 700 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 701 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
| 702 |
+
|
| 703 |
+
# On the first iteration we need to properly re-create the padding mask
|
| 704 |
+
# by slicing it on the proper place
|
| 705 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
| 706 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
| 707 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
| 708 |
+
|
| 709 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 710 |
+
|
| 711 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 712 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 713 |
+
|
| 714 |
+
if query_length == kv_seq_len:
|
| 715 |
+
query_layer = index_first_axis(
|
| 716 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
| 717 |
+
)
|
| 718 |
+
cu_seqlens_q = cu_seqlens_k
|
| 719 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 720 |
+
indices_q = indices_k
|
| 721 |
+
elif query_length == 1:
|
| 722 |
+
max_seqlen_in_batch_q = 1
|
| 723 |
+
cu_seqlens_q = torch.arange(
|
| 724 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 725 |
+
) # There is a memcpy here, that is very bad.
|
| 726 |
+
indices_q = cu_seqlens_q[:-1]
|
| 727 |
+
query_layer = query_layer.squeeze(1)
|
| 728 |
+
else:
|
| 729 |
+
# The -q_len: slice assumes left padding.
|
| 730 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 731 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 732 |
+
|
| 733 |
+
return (
|
| 734 |
+
query_layer,
|
| 735 |
+
key_layer,
|
| 736 |
+
value_layer,
|
| 737 |
+
indices_q,
|
| 738 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 739 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mistral
|
| 744 |
+
class MistralSdpaAttention(MistralAttention):
|
| 745 |
+
"""
|
| 746 |
+
Mistral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 747 |
+
`MistralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 748 |
+
SDPA API.
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
# Adapted from MistralAttention.forward
|
| 752 |
+
def forward(
|
| 753 |
+
self,
|
| 754 |
+
hidden_states: torch.Tensor,
|
| 755 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 756 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 757 |
+
past_key_value: Optional[Cache] = None,
|
| 758 |
+
output_attentions: bool = False,
|
| 759 |
+
use_cache: bool = False,
|
| 760 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 761 |
+
if output_attentions:
|
| 762 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 763 |
+
logger.warning_once(
|
| 764 |
+
"MistralModel is using MistralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 765 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 766 |
+
)
|
| 767 |
+
return super().forward(
|
| 768 |
+
hidden_states=hidden_states,
|
| 769 |
+
attention_mask=attention_mask,
|
| 770 |
+
position_ids=position_ids,
|
| 771 |
+
past_key_value=past_key_value,
|
| 772 |
+
output_attentions=output_attentions,
|
| 773 |
+
use_cache=use_cache,
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
bsz, q_len, _ = hidden_states.size()
|
| 777 |
+
|
| 778 |
+
query_states = self.q_proj(hidden_states)
|
| 779 |
+
key_states = self.k_proj(hidden_states)
|
| 780 |
+
value_states = self.v_proj(hidden_states)
|
| 781 |
+
|
| 782 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 783 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 784 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 785 |
+
|
| 786 |
+
kv_seq_len = key_states.shape[-2]
|
| 787 |
+
if past_key_value is not None:
|
| 788 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 789 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 790 |
+
|
| 791 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 792 |
+
|
| 793 |
+
if past_key_value is not None:
|
| 794 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 795 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 796 |
+
|
| 797 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 798 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 799 |
+
|
| 800 |
+
if attention_mask is not None:
|
| 801 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 802 |
+
raise ValueError(
|
| 803 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 804 |
+
)
|
| 805 |
+
|
| 806 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 807 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 808 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 809 |
+
query_states = query_states.contiguous()
|
| 810 |
+
key_states = key_states.contiguous()
|
| 811 |
+
value_states = value_states.contiguous()
|
| 812 |
+
|
| 813 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 814 |
+
query_states,
|
| 815 |
+
key_states,
|
| 816 |
+
value_states,
|
| 817 |
+
attn_mask=attention_mask,
|
| 818 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 819 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 820 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 824 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 825 |
+
|
| 826 |
+
attn_output = self.o_proj(attn_output)
|
| 827 |
+
|
| 828 |
+
return attn_output, None, past_key_value
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
MISTRAL_ATTENTION_CLASSES = {
|
| 832 |
+
"eager": MistralAttention,
|
| 833 |
+
"flash_attention_2": MistralFlashAttention2,
|
| 834 |
+
"sdpa": MistralSdpaAttention,
|
| 835 |
+
}
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
class MistralDecoderLayer(nn.Module):
|
| 839 |
+
def __init__(self, config: MistralConfig, layer_idx: int):
|
| 840 |
+
super().__init__()
|
| 841 |
+
self.hidden_size = config.hidden_size
|
| 842 |
+
|
| 843 |
+
self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 844 |
+
|
| 845 |
+
self.mlp = MistralMLP(config)
|
| 846 |
+
self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 847 |
+
self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 848 |
+
|
| 849 |
+
def forward(
|
| 850 |
+
self,
|
| 851 |
+
hidden_states: torch.Tensor,
|
| 852 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 853 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 854 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 855 |
+
output_attentions: Optional[bool] = False,
|
| 856 |
+
use_cache: Optional[bool] = False,
|
| 857 |
+
attn_mode: str = "flash",
|
| 858 |
+
**kwargs,
|
| 859 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 860 |
+
if "padding_mask" in kwargs:
|
| 861 |
+
warnings.warn(
|
| 862 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 863 |
+
)
|
| 864 |
+
"""
|
| 865 |
+
Args:
|
| 866 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 867 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 868 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 869 |
+
output_attentions (`bool`, *optional*):
|
| 870 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 871 |
+
returned tensors for more detail.
|
| 872 |
+
use_cache (`bool`, *optional*):
|
| 873 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 874 |
+
(see `past_key_values`).
|
| 875 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 876 |
+
"""
|
| 877 |
+
|
| 878 |
+
residual = hidden_states
|
| 879 |
+
|
| 880 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 881 |
+
|
| 882 |
+
# Self Attention
|
| 883 |
+
if(attn_mode == "flash"):
|
| 884 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 885 |
+
hidden_states=hidden_states,
|
| 886 |
+
attention_mask=attention_mask,
|
| 887 |
+
position_ids=position_ids,
|
| 888 |
+
past_key_value=past_key_value,
|
| 889 |
+
output_attentions=output_attentions,
|
| 890 |
+
use_cache=use_cache,
|
| 891 |
+
**kwargs,
|
| 892 |
+
)
|
| 893 |
+
elif(attn_mode == "torch"):
|
| 894 |
+
hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 895 |
+
hidden_states=hidden_states,
|
| 896 |
+
attention_mask=attention_mask,
|
| 897 |
+
position_ids=position_ids,
|
| 898 |
+
past_key_value=past_key_value,
|
| 899 |
+
output_attentions=output_attentions,
|
| 900 |
+
use_cache=use_cache,
|
| 901 |
+
**kwargs,
|
| 902 |
+
)
|
| 903 |
+
hidden_states = residual + hidden_states
|
| 904 |
+
|
| 905 |
+
# Fully Connected
|
| 906 |
+
residual = hidden_states
|
| 907 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 908 |
+
hidden_states = self.mlp(hidden_states)
|
| 909 |
+
hidden_states = residual + hidden_states
|
| 910 |
+
|
| 911 |
+
outputs = (hidden_states,)
|
| 912 |
+
|
| 913 |
+
if output_attentions:
|
| 914 |
+
outputs += (self_attn_weights,)
|
| 915 |
+
|
| 916 |
+
if use_cache:
|
| 917 |
+
outputs += (present_key_value,)
|
| 918 |
+
|
| 919 |
+
return outputs
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
MISTRAL_START_DOCSTRING = r"""
|
| 923 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 924 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 925 |
+
etc.)
|
| 926 |
+
|
| 927 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 928 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 929 |
+
and behavior.
|
| 930 |
+
|
| 931 |
+
Parameters:
|
| 932 |
+
config ([`MistralConfig`]):
|
| 933 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 934 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 935 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 936 |
+
"""
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
@add_start_docstrings(
|
| 940 |
+
"The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
| 941 |
+
MISTRAL_START_DOCSTRING,
|
| 942 |
+
)
|
| 943 |
+
class MistralPreTrainedModel(PreTrainedModel):
|
| 944 |
+
config_class = MistralConfig
|
| 945 |
+
base_model_prefix = "model"
|
| 946 |
+
supports_gradient_checkpointing = True
|
| 947 |
+
_no_split_modules = ["MistralDecoderLayer"]
|
| 948 |
+
_skip_keys_device_placement = "past_key_values"
|
| 949 |
+
_supports_flash_attn_2 = True
|
| 950 |
+
_supports_sdpa = True
|
| 951 |
+
_supports_cache_class = True
|
| 952 |
+
|
| 953 |
+
def _init_weights(self, module):
|
| 954 |
+
std = self.config.initializer_range
|
| 955 |
+
if isinstance(module, nn.Linear):
|
| 956 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 957 |
+
if module.bias is not None:
|
| 958 |
+
module.bias.data.zero_()
|
| 959 |
+
elif isinstance(module, nn.Embedding):
|
| 960 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 961 |
+
if module.padding_idx is not None:
|
| 962 |
+
module.weight.data[module.padding_idx].zero_()
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
MISTRAL_INPUTS_DOCSTRING = r"""
|
| 966 |
+
Args:
|
| 967 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 968 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 969 |
+
it.
|
| 970 |
+
|
| 971 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 972 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 973 |
+
|
| 974 |
+
[What are input IDs?](../glossary#input-ids)
|
| 975 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 976 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 977 |
+
|
| 978 |
+
- 1 for tokens that are **not masked**,
|
| 979 |
+
- 0 for tokens that are **masked**.
|
| 980 |
+
|
| 981 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 982 |
+
|
| 983 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 984 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 985 |
+
|
| 986 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 987 |
+
`past_key_values`).
|
| 988 |
+
|
| 989 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 990 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 991 |
+
information on the default strategy.
|
| 992 |
+
|
| 993 |
+
- 1 indicates the head is **not masked**,
|
| 994 |
+
- 0 indicates the head is **masked**.
|
| 995 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 996 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 997 |
+
config.n_positions - 1]`.
|
| 998 |
+
|
| 999 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1000 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 1001 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1002 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 1003 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 1004 |
+
|
| 1005 |
+
Two formats are allowed:
|
| 1006 |
+
- a [`~cache_utils.Cache`] instance;
|
| 1007 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 1008 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 1009 |
+
cache format.
|
| 1010 |
+
|
| 1011 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 1012 |
+
legacy cache format will be returned.
|
| 1013 |
+
|
| 1014 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 1015 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 1016 |
+
of shape `(batch_size, sequence_length)`.
|
| 1017 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1018 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1019 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1020 |
+
model's internal embedding lookup matrix.
|
| 1021 |
+
use_cache (`bool`, *optional*):
|
| 1022 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1023 |
+
`past_key_values`).
|
| 1024 |
+
output_attentions (`bool`, *optional*):
|
| 1025 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1026 |
+
tensors for more detail.
|
| 1027 |
+
output_hidden_states (`bool`, *optional*):
|
| 1028 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1029 |
+
more detail.
|
| 1030 |
+
return_dict (`bool`, *optional*):
|
| 1031 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1032 |
+
"""
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
@add_start_docstrings(
|
| 1036 |
+
"The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
| 1037 |
+
MISTRAL_START_DOCSTRING,
|
| 1038 |
+
)
|
| 1039 |
+
class MistralModel(MistralPreTrainedModel):
|
| 1040 |
+
"""
|
| 1041 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
|
| 1042 |
+
|
| 1043 |
+
Args:
|
| 1044 |
+
config: MistralConfig
|
| 1045 |
+
"""
|
| 1046 |
+
|
| 1047 |
+
def __init__(self, config: MistralConfig):
|
| 1048 |
+
super().__init__(config)
|
| 1049 |
+
self.padding_idx = config.pad_token_id
|
| 1050 |
+
self.vocab_size = config.vocab_size
|
| 1051 |
+
|
| 1052 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1053 |
+
self.layers = nn.ModuleList(
|
| 1054 |
+
[MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1055 |
+
)
|
| 1056 |
+
self._attn_implementation = config._attn_implementation
|
| 1057 |
+
self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1058 |
+
|
| 1059 |
+
self.gradient_checkpointing = False
|
| 1060 |
+
# Initialize weights and apply final processing
|
| 1061 |
+
self.post_init()
|
| 1062 |
+
|
| 1063 |
+
def get_input_embeddings(self):
|
| 1064 |
+
return self.embed_tokens
|
| 1065 |
+
|
| 1066 |
+
def set_input_embeddings(self, value):
|
| 1067 |
+
self.embed_tokens = value
|
| 1068 |
+
|
| 1069 |
+
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| 1070 |
+
def forward(
|
| 1071 |
+
self,
|
| 1072 |
+
input_ids: torch.LongTensor = None,
|
| 1073 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1074 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1075 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1076 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1077 |
+
use_cache: Optional[bool] = None,
|
| 1078 |
+
output_attentions: Optional[bool] = None,
|
| 1079 |
+
output_hidden_states: Optional[bool] = None,
|
| 1080 |
+
return_dict: Optional[bool] = None,
|
| 1081 |
+
attn_mode: str = "flash",
|
| 1082 |
+
block_list: list = None
|
| 1083 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 1084 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1085 |
+
output_hidden_states = (
|
| 1086 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1087 |
+
)
|
| 1088 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1089 |
+
|
| 1090 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1091 |
+
|
| 1092 |
+
# retrieve input_ids and inputs_embeds
|
| 1093 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1094 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1095 |
+
elif input_ids is not None:
|
| 1096 |
+
batch_size, seq_length = input_ids.shape
|
| 1097 |
+
elif inputs_embeds is not None:
|
| 1098 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1099 |
+
else:
|
| 1100 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1101 |
+
|
| 1102 |
+
if self.gradient_checkpointing and self.training:
|
| 1103 |
+
if use_cache:
|
| 1104 |
+
logger.warning_once(
|
| 1105 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1106 |
+
)
|
| 1107 |
+
use_cache = False
|
| 1108 |
+
|
| 1109 |
+
past_key_values_length = 0
|
| 1110 |
+
|
| 1111 |
+
if use_cache:
|
| 1112 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1113 |
+
if use_legacy_cache:
|
| 1114 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1115 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1116 |
+
|
| 1117 |
+
if position_ids is None:
|
| 1118 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1119 |
+
position_ids = torch.arange(
|
| 1120 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1121 |
+
)
|
| 1122 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1123 |
+
else:
|
| 1124 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 1125 |
+
|
| 1126 |
+
if inputs_embeds is None:
|
| 1127 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1128 |
+
|
| 1129 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
| 1130 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
| 1131 |
+
if is_padding_right:
|
| 1132 |
+
raise ValueError(
|
| 1133 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1134 |
+
" this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
|
| 1135 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1136 |
+
)
|
| 1137 |
+
|
| 1138 |
+
if self._attn_implementation == "flash_attention_2":
|
| 1139 |
+
# 2d mask is passed through the layers
|
| 1140 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1141 |
+
elif self._attn_implementation == "sdpa" and not output_attentions:
|
| 1142 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
| 1143 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
| 1144 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 1145 |
+
attention_mask,
|
| 1146 |
+
(batch_size, seq_length),
|
| 1147 |
+
inputs_embeds,
|
| 1148 |
+
past_key_values_length,
|
| 1149 |
+
)
|
| 1150 |
+
else:
|
| 1151 |
+
# 4d mask is passed through the layers
|
| 1152 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1153 |
+
attention_mask,
|
| 1154 |
+
(batch_size, seq_length),
|
| 1155 |
+
inputs_embeds,
|
| 1156 |
+
past_key_values_length,
|
| 1157 |
+
sliding_window=self.config.sliding_window,
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
hidden_states = inputs_embeds
|
| 1161 |
+
|
| 1162 |
+
# decoder layers
|
| 1163 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1164 |
+
all_self_attns = () if output_attentions else None
|
| 1165 |
+
next_decoder_cache = None
|
| 1166 |
+
if block_list:
|
| 1167 |
+
kwargs={"block_list":block_list}
|
| 1168 |
+
else:
|
| 1169 |
+
kwargs={}
|
| 1170 |
+
for decoder_layer in self.layers:
|
| 1171 |
+
if output_hidden_states:
|
| 1172 |
+
all_hidden_states += (hidden_states,)
|
| 1173 |
+
|
| 1174 |
+
if self.gradient_checkpointing and self.training:
|
| 1175 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1176 |
+
decoder_layer.__call__,
|
| 1177 |
+
hidden_states,
|
| 1178 |
+
attention_mask,
|
| 1179 |
+
position_ids,
|
| 1180 |
+
past_key_values,
|
| 1181 |
+
output_attentions,
|
| 1182 |
+
use_cache,
|
| 1183 |
+
)
|
| 1184 |
+
else:
|
| 1185 |
+
layer_outputs = decoder_layer(
|
| 1186 |
+
hidden_states,
|
| 1187 |
+
attention_mask=attention_mask,
|
| 1188 |
+
position_ids=position_ids,
|
| 1189 |
+
past_key_value=past_key_values,
|
| 1190 |
+
output_attentions=output_attentions,
|
| 1191 |
+
use_cache=use_cache,
|
| 1192 |
+
attn_mode=attn_mode,
|
| 1193 |
+
**kwargs,
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
hidden_states = layer_outputs[0]
|
| 1197 |
+
|
| 1198 |
+
if use_cache:
|
| 1199 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1200 |
+
|
| 1201 |
+
if output_attentions:
|
| 1202 |
+
all_self_attns += (layer_outputs[1],)
|
| 1203 |
+
|
| 1204 |
+
hidden_states = self.norm(hidden_states)
|
| 1205 |
+
|
| 1206 |
+
# add hidden states from the last decoder layer
|
| 1207 |
+
if output_hidden_states:
|
| 1208 |
+
all_hidden_states += (hidden_states,)
|
| 1209 |
+
|
| 1210 |
+
next_cache = None
|
| 1211 |
+
if use_cache:
|
| 1212 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1213 |
+
|
| 1214 |
+
if not return_dict:
|
| 1215 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1216 |
+
return BaseModelOutputWithPast(
|
| 1217 |
+
last_hidden_state=hidden_states,
|
| 1218 |
+
past_key_values=next_cache,
|
| 1219 |
+
hidden_states=all_hidden_states,
|
| 1220 |
+
attentions=all_self_attns,
|
| 1221 |
+
)
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
class MistralForCausalLM(MistralPreTrainedModel):
|
| 1225 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1226 |
+
|
| 1227 |
+
def __init__(self, config):
|
| 1228 |
+
super().__init__(config)
|
| 1229 |
+
self.model = MistralModel(config)
|
| 1230 |
+
self.vocab_size = config.vocab_size
|
| 1231 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1232 |
+
|
| 1233 |
+
# Initialize weights and apply final processing
|
| 1234 |
+
self.post_init()
|
| 1235 |
+
|
| 1236 |
+
def get_input_embeddings(self):
|
| 1237 |
+
return self.model.embed_tokens
|
| 1238 |
+
|
| 1239 |
+
def set_input_embeddings(self, value):
|
| 1240 |
+
self.model.embed_tokens = value
|
| 1241 |
+
|
| 1242 |
+
def get_output_embeddings(self):
|
| 1243 |
+
return self.lm_head
|
| 1244 |
+
|
| 1245 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1246 |
+
self.lm_head = new_embeddings
|
| 1247 |
+
|
| 1248 |
+
def set_decoder(self, decoder):
|
| 1249 |
+
self.model = decoder
|
| 1250 |
+
|
| 1251 |
+
def get_decoder(self):
|
| 1252 |
+
return self.model
|
| 1253 |
+
|
| 1254 |
+
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| 1255 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1256 |
+
def forward(
|
| 1257 |
+
self,
|
| 1258 |
+
input_ids: torch.LongTensor = None,
|
| 1259 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1260 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1261 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1262 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1263 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1264 |
+
use_cache: Optional[bool] = None,
|
| 1265 |
+
output_attentions: Optional[bool] = None,
|
| 1266 |
+
output_hidden_states: Optional[bool] = None,
|
| 1267 |
+
return_dict: Optional[bool] = None,
|
| 1268 |
+
block_list:list = None,
|
| 1269 |
+
attn_mode: str = "flash",
|
| 1270 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1271 |
+
r"""
|
| 1272 |
+
Args:
|
| 1273 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1274 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1275 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1276 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1277 |
+
|
| 1278 |
+
Returns:
|
| 1279 |
+
|
| 1280 |
+
Example:
|
| 1281 |
+
|
| 1282 |
+
```python
|
| 1283 |
+
>>> from transformers import AutoTokenizer, MistralForCausalLM
|
| 1284 |
+
|
| 1285 |
+
>>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 1286 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 1287 |
+
|
| 1288 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1289 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1290 |
+
|
| 1291 |
+
>>> # Generate
|
| 1292 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1293 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1294 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1295 |
+
```"""
|
| 1296 |
+
|
| 1297 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1298 |
+
output_hidden_states = (
|
| 1299 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1300 |
+
)
|
| 1301 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1302 |
+
|
| 1303 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1304 |
+
outputs = self.model(
|
| 1305 |
+
input_ids=input_ids,
|
| 1306 |
+
attention_mask=attention_mask,
|
| 1307 |
+
position_ids=position_ids,
|
| 1308 |
+
past_key_values=past_key_values,
|
| 1309 |
+
inputs_embeds=inputs_embeds,
|
| 1310 |
+
use_cache=use_cache,
|
| 1311 |
+
output_attentions=output_attentions,
|
| 1312 |
+
output_hidden_states=output_hidden_states,
|
| 1313 |
+
return_dict=return_dict,
|
| 1314 |
+
attn_mode=attn_mode,
|
| 1315 |
+
block_list=block_list
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
hidden_states = outputs[0]
|
| 1319 |
+
logits = self.lm_head(hidden_states)
|
| 1320 |
+
logits = logits.float()
|
| 1321 |
+
|
| 1322 |
+
loss = None
|
| 1323 |
+
if labels is not None:
|
| 1324 |
+
# Shift so that tokens < n predict n
|
| 1325 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1326 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1327 |
+
# Flatten the tokens
|
| 1328 |
+
loss_fct = CrossEntropyLoss()
|
| 1329 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1330 |
+
shift_labels = shift_labels.view(-1)
|
| 1331 |
+
# Enable model parallelism
|
| 1332 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1333 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1334 |
+
|
| 1335 |
+
if not return_dict:
|
| 1336 |
+
output = (logits,) + outputs[1:]
|
| 1337 |
+
return (loss,) + output if loss is not None else output
|
| 1338 |
+
|
| 1339 |
+
return CausalLMOutputWithPast(
|
| 1340 |
+
loss=loss,
|
| 1341 |
+
logits=logits,
|
| 1342 |
+
past_key_values=outputs.past_key_values,
|
| 1343 |
+
hidden_states=outputs.hidden_states,
|
| 1344 |
+
attentions=outputs.attentions,
|
| 1345 |
+
)
|
| 1346 |
+
|
| 1347 |
+
def prepare_inputs_for_generation(
|
| 1348 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1349 |
+
):
|
| 1350 |
+
# Omit tokens covered by past_key_values
|
| 1351 |
+
if past_key_values is not None:
|
| 1352 |
+
if isinstance(past_key_values, Cache):
|
| 1353 |
+
cache_length = past_key_values.get_seq_length()
|
| 1354 |
+
past_length = past_key_values.seen_tokens
|
| 1355 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1356 |
+
else:
|
| 1357 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1358 |
+
max_cache_length = None
|
| 1359 |
+
|
| 1360 |
+
# Keep only the unprocessed tokens:
|
| 1361 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1362 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
| 1363 |
+
# input)
|
| 1364 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1365 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1366 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1367 |
+
# input_ids based on the past_length.
|
| 1368 |
+
elif past_length < input_ids.shape[1]:
|
| 1369 |
+
input_ids = input_ids[:, past_length:]
|
| 1370 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1371 |
+
|
| 1372 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1373 |
+
if (
|
| 1374 |
+
max_cache_length is not None
|
| 1375 |
+
and attention_mask is not None
|
| 1376 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1377 |
+
):
|
| 1378 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1379 |
+
|
| 1380 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1381 |
+
if attention_mask is not None and position_ids is None:
|
| 1382 |
+
# create position_ids on the fly for batch generation
|
| 1383 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1384 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1385 |
+
if past_key_values:
|
| 1386 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1387 |
+
|
| 1388 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1389 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1390 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1391 |
+
else:
|
| 1392 |
+
model_inputs = {"input_ids": input_ids}
|
| 1393 |
+
|
| 1394 |
+
model_inputs.update(
|
| 1395 |
+
{
|
| 1396 |
+
"position_ids": position_ids,
|
| 1397 |
+
"past_key_values": past_key_values,
|
| 1398 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1399 |
+
"attention_mask": attention_mask,
|
| 1400 |
+
}
|
| 1401 |
+
)
|
| 1402 |
+
return model_inputs
|
| 1403 |
+
|
| 1404 |
+
@staticmethod
|
| 1405 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1406 |
+
reordered_past = ()
|
| 1407 |
+
for layer_past in past_key_values:
|
| 1408 |
+
reordered_past += (
|
| 1409 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1410 |
+
)
|
| 1411 |
+
return reordered_past
|
| 1412 |
+
|
| 1413 |
+
|
| 1414 |
+
@add_start_docstrings(
|
| 1415 |
+
"""
|
| 1416 |
+
The Mistral Model transformer with a sequence classification head on top (linear layer).
|
| 1417 |
+
|
| 1418 |
+
[`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1419 |
+
(e.g. GPT-2) do.
|
| 1420 |
+
|
| 1421 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1422 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1423 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1424 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1425 |
+
each row of the batch).
|
| 1426 |
+
""",
|
| 1427 |
+
MISTRAL_START_DOCSTRING,
|
| 1428 |
+
)
|
| 1429 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mistral, LLAMA->MISTRAL
|
| 1430 |
+
class MistralForSequenceClassification(MistralPreTrainedModel):
|
| 1431 |
+
def __init__(self, config):
|
| 1432 |
+
super().__init__(config)
|
| 1433 |
+
self.num_labels = config.num_labels
|
| 1434 |
+
self.model = MistralModel(config)
|
| 1435 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1436 |
+
|
| 1437 |
+
# Initialize weights and apply final processing
|
| 1438 |
+
self.post_init()
|
| 1439 |
+
|
| 1440 |
+
def get_input_embeddings(self):
|
| 1441 |
+
return self.model.embed_tokens
|
| 1442 |
+
|
| 1443 |
+
def set_input_embeddings(self, value):
|
| 1444 |
+
self.model.embed_tokens = value
|
| 1445 |
+
|
| 1446 |
+
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| 1447 |
+
def forward(
|
| 1448 |
+
self,
|
| 1449 |
+
input_ids: torch.LongTensor = None,
|
| 1450 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1451 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1452 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1453 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1454 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1455 |
+
use_cache: Optional[bool] = None,
|
| 1456 |
+
output_attentions: Optional[bool] = None,
|
| 1457 |
+
output_hidden_states: Optional[bool] = None,
|
| 1458 |
+
return_dict: Optional[bool] = None,
|
| 1459 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1460 |
+
r"""
|
| 1461 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1462 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1463 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1464 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1465 |
+
"""
|
| 1466 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1467 |
+
|
| 1468 |
+
transformer_outputs = self.model(
|
| 1469 |
+
input_ids,
|
| 1470 |
+
attention_mask=attention_mask,
|
| 1471 |
+
position_ids=position_ids,
|
| 1472 |
+
past_key_values=past_key_values,
|
| 1473 |
+
inputs_embeds=inputs_embeds,
|
| 1474 |
+
use_cache=use_cache,
|
| 1475 |
+
output_attentions=output_attentions,
|
| 1476 |
+
output_hidden_states=output_hidden_states,
|
| 1477 |
+
return_dict=return_dict,
|
| 1478 |
+
)
|
| 1479 |
+
hidden_states = transformer_outputs[0]
|
| 1480 |
+
logits = self.score(hidden_states)
|
| 1481 |
+
|
| 1482 |
+
if input_ids is not None:
|
| 1483 |
+
batch_size = input_ids.shape[0]
|
| 1484 |
+
else:
|
| 1485 |
+
batch_size = inputs_embeds.shape[0]
|
| 1486 |
+
|
| 1487 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1488 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1489 |
+
if self.config.pad_token_id is None:
|
| 1490 |
+
sequence_lengths = -1
|
| 1491 |
+
else:
|
| 1492 |
+
if input_ids is not None:
|
| 1493 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1494 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1495 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1496 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1497 |
+
else:
|
| 1498 |
+
sequence_lengths = -1
|
| 1499 |
+
|
| 1500 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1501 |
+
|
| 1502 |
+
loss = None
|
| 1503 |
+
if labels is not None:
|
| 1504 |
+
labels = labels.to(logits.device)
|
| 1505 |
+
if self.config.problem_type is None:
|
| 1506 |
+
if self.num_labels == 1:
|
| 1507 |
+
self.config.problem_type = "regression"
|
| 1508 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1509 |
+
self.config.problem_type = "single_label_classification"
|
| 1510 |
+
else:
|
| 1511 |
+
self.config.problem_type = "multi_label_classification"
|
| 1512 |
+
|
| 1513 |
+
if self.config.problem_type == "regression":
|
| 1514 |
+
loss_fct = MSELoss()
|
| 1515 |
+
if self.num_labels == 1:
|
| 1516 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1517 |
+
else:
|
| 1518 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1519 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1520 |
+
loss_fct = CrossEntropyLoss()
|
| 1521 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1522 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1523 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1524 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1525 |
+
if not return_dict:
|
| 1526 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1527 |
+
return ((loss,) + output) if loss is not None else output
|
| 1528 |
+
|
| 1529 |
+
return SequenceClassifierOutputWithPast(
|
| 1530 |
+
loss=loss,
|
| 1531 |
+
logits=pooled_logits,
|
| 1532 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1533 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1534 |
+
attentions=transformer_outputs.attentions,
|
| 1535 |
+
)
|
faiss_attn/source/modeling_mixtral.py
ADDED
|
@@ -0,0 +1,1754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch Mixtral model."""
|
| 21 |
+
import inspect
|
| 22 |
+
import math
|
| 23 |
+
import warnings
|
| 24 |
+
from typing import List, Optional, Tuple, Union, Any
|
| 25 |
+
import transformers
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn.functional as F
|
| 28 |
+
import torch.utils.checkpoint
|
| 29 |
+
from torch import nn
|
| 30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 31 |
+
|
| 32 |
+
from transformers.activations import ACT2FN
|
| 33 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 34 |
+
from transformers.modeling_attn_mask_utils import (
|
| 35 |
+
AttentionMaskConverter,
|
| 36 |
+
_prepare_4d_attention_mask,
|
| 37 |
+
_prepare_4d_causal_attention_mask,
|
| 38 |
+
_prepare_4d_causal_attention_mask_for_sdpa,
|
| 39 |
+
)
|
| 40 |
+
from transformers.modeling_outputs import (
|
| 41 |
+
MoeCausalLMOutputWithPast,
|
| 42 |
+
MoeModelOutputWithPast,
|
| 43 |
+
SequenceClassifierOutputWithPast,
|
| 44 |
+
)
|
| 45 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 46 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
|
| 47 |
+
from transformers.utils import (
|
| 48 |
+
add_start_docstrings,
|
| 49 |
+
add_start_docstrings_to_model_forward,
|
| 50 |
+
is_flash_attn_2_available,
|
| 51 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 52 |
+
logging,
|
| 53 |
+
replace_return_docstrings,
|
| 54 |
+
)
|
| 55 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 56 |
+
from transformers import MixtralConfig
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if is_flash_attn_2_available():
|
| 60 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 61 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 62 |
+
|
| 63 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
| 64 |
+
|
| 65 |
+
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
| 66 |
+
# It means that the function will not be traced through and simply appear as a node in the graph.
|
| 67 |
+
if is_torch_fx_available():
|
| 68 |
+
if not is_torch_greater_or_equal_than_1_13:
|
| 69 |
+
import torch.fx
|
| 70 |
+
|
| 71 |
+
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
logger = logging.get_logger(__name__)
|
| 75 |
+
|
| 76 |
+
_CONFIG_FOR_DOC = "MixtralConfig"
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def load_balancing_loss_func(
|
| 80 |
+
gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None
|
| 81 |
+
) -> float:
|
| 82 |
+
r"""
|
| 83 |
+
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
| 84 |
+
|
| 85 |
+
See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
|
| 86 |
+
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
|
| 87 |
+
experts is too unbalanced.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
|
| 91 |
+
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
|
| 92 |
+
shape [batch_size X sequence_length, num_experts].
|
| 93 |
+
attention_mask (`torch.Tensor`, None):
|
| 94 |
+
The attention_mask used in forward function
|
| 95 |
+
shape [batch_size X sequence_length] if not None.
|
| 96 |
+
num_experts (`int`, *optional*):
|
| 97 |
+
Number of experts
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
The auxiliary loss.
|
| 101 |
+
"""
|
| 102 |
+
if gate_logits is None or not isinstance(gate_logits, tuple):
|
| 103 |
+
return 0
|
| 104 |
+
|
| 105 |
+
if isinstance(gate_logits, tuple):
|
| 106 |
+
compute_device = gate_logits[0].device
|
| 107 |
+
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
|
| 108 |
+
|
| 109 |
+
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
|
| 110 |
+
|
| 111 |
+
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
| 112 |
+
|
| 113 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
|
| 114 |
+
|
| 115 |
+
if attention_mask is None:
|
| 116 |
+
# Compute the percentage of tokens routed to each experts
|
| 117 |
+
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
|
| 118 |
+
|
| 119 |
+
# Compute the average probability of routing to these experts
|
| 120 |
+
router_prob_per_expert = torch.mean(routing_weights, dim=0)
|
| 121 |
+
else:
|
| 122 |
+
batch_size, sequence_length = attention_mask.shape
|
| 123 |
+
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
|
| 124 |
+
|
| 125 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
|
| 126 |
+
expert_attention_mask = (
|
| 127 |
+
attention_mask[None, :, :, None, None]
|
| 128 |
+
.expand((num_hidden_layers, batch_size, sequence_length, 2, num_experts))
|
| 129 |
+
.reshape(-1, 2, num_experts)
|
| 130 |
+
.to(compute_device)
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Compute the percentage of tokens routed to each experts
|
| 134 |
+
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
|
| 135 |
+
expert_attention_mask, dim=0
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
|
| 139 |
+
router_per_expert_attention_mask = (
|
| 140 |
+
attention_mask[None, :, :, None]
|
| 141 |
+
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
|
| 142 |
+
.reshape(-1, num_experts)
|
| 143 |
+
.to(compute_device)
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Compute the average probability of routing to these experts
|
| 147 |
+
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
|
| 148 |
+
router_per_expert_attention_mask, dim=0
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
|
| 152 |
+
return overall_loss * num_experts
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 156 |
+
def _get_unpad_data(attention_mask):
|
| 157 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 158 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 159 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 160 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 161 |
+
return (
|
| 162 |
+
indices,
|
| 163 |
+
cu_seqlens,
|
| 164 |
+
max_seqlen_in_batch,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mixtral
|
| 169 |
+
class MixtralRMSNorm(nn.Module):
|
| 170 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 171 |
+
"""
|
| 172 |
+
MixtralRMSNorm is equivalent to T5LayerNorm
|
| 173 |
+
"""
|
| 174 |
+
super().__init__()
|
| 175 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 176 |
+
self.variance_epsilon = eps
|
| 177 |
+
|
| 178 |
+
def forward(self, hidden_states):
|
| 179 |
+
input_dtype = hidden_states.dtype
|
| 180 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 181 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 182 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 183 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Mixtral
|
| 187 |
+
class MixtralRotaryEmbedding(nn.Module):
|
| 188 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 189 |
+
super().__init__()
|
| 190 |
+
|
| 191 |
+
self.dim = dim
|
| 192 |
+
self.max_position_embeddings = max_position_embeddings
|
| 193 |
+
self.base = base
|
| 194 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
| 195 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 196 |
+
|
| 197 |
+
# Build here to make `torch.jit.trace` work.
|
| 198 |
+
self._set_cos_sin_cache(
|
| 199 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 203 |
+
self.max_seq_len_cached = seq_len
|
| 204 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
| 205 |
+
|
| 206 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 207 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 208 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 209 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 210 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 211 |
+
|
| 212 |
+
def forward(self, x, seq_len=None):
|
| 213 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 214 |
+
if seq_len > self.max_seq_len_cached:
|
| 215 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 216 |
+
|
| 217 |
+
return (
|
| 218 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
| 219 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 224 |
+
def rotate_half(x):
|
| 225 |
+
"""Rotates half the hidden dims of the input."""
|
| 226 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 227 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 228 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
|
| 232 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 233 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 234 |
+
|
| 235 |
+
Args:
|
| 236 |
+
q (`torch.Tensor`): The query tensor.
|
| 237 |
+
k (`torch.Tensor`): The key tensor.
|
| 238 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 239 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 240 |
+
position_ids (`torch.Tensor`):
|
| 241 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 242 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 243 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 244 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 245 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 246 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 247 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 248 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 249 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 250 |
+
Returns:
|
| 251 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 252 |
+
"""
|
| 253 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 254 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 255 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 256 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 257 |
+
return q_embed, k_embed
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 261 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 262 |
+
"""
|
| 263 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 264 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 265 |
+
"""
|
| 266 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 267 |
+
if n_rep == 1:
|
| 268 |
+
return hidden_states
|
| 269 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 270 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Mixtral
|
| 274 |
+
class MixtralAttention(nn.Module):
|
| 275 |
+
"""
|
| 276 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 277 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 278 |
+
"""
|
| 279 |
+
|
| 280 |
+
def __init__(self, config: MixtralConfig, layer_idx: Optional[int] = None):
|
| 281 |
+
super().__init__()
|
| 282 |
+
self.config = config
|
| 283 |
+
self.layer_idx = layer_idx
|
| 284 |
+
if layer_idx is None:
|
| 285 |
+
logger.warning_once(
|
| 286 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 287 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 288 |
+
"when creating this class."
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
self.hidden_size = config.hidden_size
|
| 292 |
+
self.num_heads = config.num_attention_heads
|
| 293 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 294 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 295 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 296 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 297 |
+
self.rope_theta = config.rope_theta
|
| 298 |
+
self.is_causal = True
|
| 299 |
+
self.attention_dropout = config.attention_dropout
|
| 300 |
+
|
| 301 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 302 |
+
raise ValueError(
|
| 303 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 304 |
+
f" and `num_heads`: {self.num_heads})."
|
| 305 |
+
)
|
| 306 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 307 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 308 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| 309 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 310 |
+
|
| 311 |
+
self.rotary_emb = MixtralRotaryEmbedding(
|
| 312 |
+
self.head_dim,
|
| 313 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 314 |
+
base=self.rope_theta,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 318 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 319 |
+
|
| 320 |
+
def forward(
|
| 321 |
+
self,
|
| 322 |
+
hidden_states: torch.Tensor,
|
| 323 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 324 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 325 |
+
past_key_value: Optional[Cache] = None,
|
| 326 |
+
output_attentions: bool = False,
|
| 327 |
+
use_cache: bool = False,
|
| 328 |
+
**kwargs,
|
| 329 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 330 |
+
if "padding_mask" in kwargs:
|
| 331 |
+
warnings.warn(
|
| 332 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 333 |
+
)
|
| 334 |
+
bsz, q_len, _ = hidden_states.size()
|
| 335 |
+
|
| 336 |
+
query_states = self.q_proj(hidden_states)
|
| 337 |
+
key_states = self.k_proj(hidden_states)
|
| 338 |
+
value_states = self.v_proj(hidden_states)
|
| 339 |
+
|
| 340 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 341 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 342 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 343 |
+
|
| 344 |
+
kv_seq_len = key_states.shape[-2]
|
| 345 |
+
if past_key_value is not None:
|
| 346 |
+
if self.layer_idx is None:
|
| 347 |
+
raise ValueError(
|
| 348 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 349 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 350 |
+
"with a layer index."
|
| 351 |
+
)
|
| 352 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 353 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 354 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 355 |
+
|
| 356 |
+
if past_key_value is not None:
|
| 357 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 358 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 359 |
+
|
| 360 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 361 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 362 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 363 |
+
|
| 364 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 365 |
+
|
| 366 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 367 |
+
raise ValueError(
|
| 368 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 369 |
+
f" {attn_weights.size()}"
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
if attention_mask is not None:
|
| 373 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 374 |
+
raise ValueError(
|
| 375 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
attn_weights = attn_weights + attention_mask
|
| 379 |
+
|
| 380 |
+
# upcast attention to fp32
|
| 381 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 382 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 383 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 384 |
+
|
| 385 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 386 |
+
raise ValueError(
|
| 387 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 388 |
+
f" {attn_output.size()}"
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 392 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 393 |
+
|
| 394 |
+
attn_output = self.o_proj(attn_output)
|
| 395 |
+
|
| 396 |
+
if not output_attentions:
|
| 397 |
+
attn_weights = None
|
| 398 |
+
|
| 399 |
+
return attn_output, attn_weights, past_key_value
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Mixtral
|
| 403 |
+
class MixtralFlashAttention2(MixtralAttention):
|
| 404 |
+
"""
|
| 405 |
+
Mixtral flash attention module. This module inherits from `MixtralAttention` as the weights of the module stays
|
| 406 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 407 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 411 |
+
def __init__(self, *args, **kwargs):
|
| 412 |
+
super().__init__(*args, **kwargs)
|
| 413 |
+
|
| 414 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 415 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 416 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 417 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 418 |
+
|
| 419 |
+
def forward(
|
| 420 |
+
self,
|
| 421 |
+
hidden_states: torch.Tensor,
|
| 422 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 423 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 424 |
+
past_key_value: Optional[Cache] = None,
|
| 425 |
+
output_attentions: bool = False,
|
| 426 |
+
use_cache: bool = False,
|
| 427 |
+
**kwargs,
|
| 428 |
+
):
|
| 429 |
+
if "padding_mask" in kwargs:
|
| 430 |
+
warnings.warn(
|
| 431 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
# overwrite attention_mask with padding_mask
|
| 435 |
+
attention_mask = kwargs.pop("padding_mask")
|
| 436 |
+
if(output_attentions):
|
| 437 |
+
_, inspect, attn_weights, _ = self.forward_torch(
|
| 438 |
+
hidden_states,
|
| 439 |
+
attention_mask,
|
| 440 |
+
position_ids,
|
| 441 |
+
past_key_value,
|
| 442 |
+
output_attentions,
|
| 443 |
+
use_cache=False,
|
| 444 |
+
**kwargs,
|
| 445 |
+
)
|
| 446 |
+
else:
|
| 447 |
+
attn_weights = None
|
| 448 |
+
inspect = None
|
| 449 |
+
|
| 450 |
+
bsz, q_len, _ = hidden_states.size()
|
| 451 |
+
|
| 452 |
+
query_states = self.q_proj(hidden_states)
|
| 453 |
+
key_states = self.k_proj(hidden_states)
|
| 454 |
+
value_states = self.v_proj(hidden_states)
|
| 455 |
+
|
| 456 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 457 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 458 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 459 |
+
|
| 460 |
+
kv_seq_len = key_states.shape[-2]
|
| 461 |
+
if past_key_value is not None:
|
| 462 |
+
if self.layer_idx is None:
|
| 463 |
+
raise ValueError(
|
| 464 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 465 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 466 |
+
"with a layer index."
|
| 467 |
+
)
|
| 468 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 469 |
+
|
| 470 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
| 471 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
| 472 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
| 473 |
+
|
| 474 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 475 |
+
|
| 476 |
+
use_sliding_windows = (
|
| 477 |
+
_flash_supports_window_size
|
| 478 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 479 |
+
and kv_seq_len > self.config.sliding_window
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if not _flash_supports_window_size:
|
| 483 |
+
logger.warning_once(
|
| 484 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
| 485 |
+
" make sure to upgrade flash-attn library."
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
if past_key_value is not None:
|
| 489 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
| 490 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| 491 |
+
if (
|
| 492 |
+
getattr(self.config, "sliding_window", None) is not None
|
| 493 |
+
and kv_seq_len > self.config.sliding_window
|
| 494 |
+
and cache_has_contents
|
| 495 |
+
):
|
| 496 |
+
slicing_tokens = 1 - self.config.sliding_window
|
| 497 |
+
|
| 498 |
+
past_key = past_key_value[self.layer_idx][0]
|
| 499 |
+
past_value = past_key_value[self.layer_idx][1]
|
| 500 |
+
|
| 501 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| 502 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
| 503 |
+
|
| 504 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| 507 |
+
f" {past_key.shape}"
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
if attention_mask is not None:
|
| 511 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
| 512 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
| 513 |
+
|
| 514 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 515 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 516 |
+
|
| 517 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 518 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 519 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 520 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 521 |
+
|
| 522 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 523 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 524 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 525 |
+
input_dtype = query_states.dtype
|
| 526 |
+
if input_dtype == torch.float32:
|
| 527 |
+
if torch.is_autocast_enabled():
|
| 528 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 529 |
+
# Handle the case where the model is quantized
|
| 530 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 531 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 532 |
+
else:
|
| 533 |
+
target_dtype = self.q_proj.weight.dtype
|
| 534 |
+
|
| 535 |
+
logger.warning_once(
|
| 536 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 537 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 538 |
+
f" {target_dtype}."
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
query_states = query_states.to(target_dtype)
|
| 542 |
+
key_states = key_states.to(target_dtype)
|
| 543 |
+
value_states = value_states.to(target_dtype)
|
| 544 |
+
|
| 545 |
+
# Reashape to the expected shape for Flash Attention
|
| 546 |
+
query_states = query_states.transpose(1, 2)
|
| 547 |
+
key_states = key_states.transpose(1, 2)
|
| 548 |
+
value_states = value_states.transpose(1, 2)
|
| 549 |
+
|
| 550 |
+
attn_output = self._flash_attention_forward(
|
| 551 |
+
query_states,
|
| 552 |
+
key_states,
|
| 553 |
+
value_states,
|
| 554 |
+
attention_mask,
|
| 555 |
+
q_len,
|
| 556 |
+
dropout=dropout_rate,
|
| 557 |
+
use_sliding_windows=use_sliding_windows,
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 561 |
+
attn_output = self.o_proj(attn_output)
|
| 562 |
+
|
| 563 |
+
if not output_attentions:
|
| 564 |
+
attn_weights = None
|
| 565 |
+
|
| 566 |
+
return attn_output, attn_weights, past_key_value
|
| 567 |
+
def forward_torch(
|
| 568 |
+
self,
|
| 569 |
+
hidden_states: torch.Tensor,
|
| 570 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 571 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 572 |
+
past_key_value: Optional[Any] = None,
|
| 573 |
+
output_attentions: bool = False,
|
| 574 |
+
use_cache: bool = False,
|
| 575 |
+
**kwargs,
|
| 576 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 577 |
+
if "padding_mask" in kwargs:
|
| 578 |
+
warnings.warn(
|
| 579 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
bsz, q_len, _ = hidden_states.size()
|
| 583 |
+
inspect = {}
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
query_states = self.q_proj(hidden_states)
|
| 587 |
+
key_states = self.k_proj(hidden_states)
|
| 588 |
+
value_states = self.v_proj(hidden_states)
|
| 589 |
+
|
| 590 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 591 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 592 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 593 |
+
|
| 594 |
+
kv_seq_len = key_states.shape[-2]
|
| 595 |
+
# print(past_key_value)
|
| 596 |
+
if past_key_value is not None:
|
| 597 |
+
if self.layer_idx is None:
|
| 598 |
+
raise ValueError(
|
| 599 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 600 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 601 |
+
"with a layer index."
|
| 602 |
+
)
|
| 603 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 604 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 605 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 606 |
+
|
| 607 |
+
if past_key_value is not None:
|
| 608 |
+
if(use_cache):
|
| 609 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 610 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 611 |
+
else:
|
| 612 |
+
key_states = torch.cat([past_key_value.key_cache[self.layer_idx], key_states], dim=-2)
|
| 613 |
+
value_states = torch.cat([past_key_value.value_cache[self.layer_idx], value_states], dim=-2)
|
| 614 |
+
|
| 615 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 616 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 617 |
+
|
| 618 |
+
# print(query_states.size())
|
| 619 |
+
# print(key_states.size())
|
| 620 |
+
inspect["query"] = query_states
|
| 621 |
+
inspect["key"] = key_states
|
| 622 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 623 |
+
###write our mask here
|
| 624 |
+
#print(attn_weights.size())#[batch_size, head, q, c]
|
| 625 |
+
|
| 626 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 627 |
+
raise ValueError(
|
| 628 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 629 |
+
f" {attn_weights.size()}"
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
if attention_mask is not None:
|
| 633 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 634 |
+
raise ValueError(
|
| 635 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 636 |
+
)
|
| 637 |
+
attn_weights = attn_weights + attention_mask
|
| 638 |
+
if 'block_list' in kwargs:
|
| 639 |
+
for h in kwargs['block_list']:
|
| 640 |
+
if self.layer_idx==h[0]:
|
| 641 |
+
'''
|
| 642 |
+
if h[1]==0:
|
| 643 |
+
target_head = 1
|
| 644 |
+
elif h[1]==31:
|
| 645 |
+
target_head = 30
|
| 646 |
+
else:
|
| 647 |
+
target_head = h[1] - 1
|
| 648 |
+
|
| 649 |
+
attn_weights[:, h[1], :, :] = attn_weights[:, target_head, :, :]
|
| 650 |
+
'''
|
| 651 |
+
attn_weights[:, h[1], :, :] = 0
|
| 652 |
+
# upcast attention to fp32
|
| 653 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 654 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 655 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 656 |
+
inspect["attn_output_before_o_proj"] = attn_output
|
| 657 |
+
|
| 658 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 659 |
+
raise ValueError(
|
| 660 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 661 |
+
f" {attn_output.size()}"
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 665 |
+
|
| 666 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 667 |
+
|
| 668 |
+
attn_output = self.o_proj(attn_output)
|
| 669 |
+
|
| 670 |
+
if not output_attentions:
|
| 671 |
+
attn_weights = None
|
| 672 |
+
|
| 673 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 674 |
+
|
| 675 |
+
def _flash_attention_forward(
|
| 676 |
+
self,
|
| 677 |
+
query_states,
|
| 678 |
+
key_states,
|
| 679 |
+
value_states,
|
| 680 |
+
attention_mask,
|
| 681 |
+
query_length,
|
| 682 |
+
dropout=0.0,
|
| 683 |
+
softmax_scale=None,
|
| 684 |
+
use_sliding_windows=False,
|
| 685 |
+
):
|
| 686 |
+
"""
|
| 687 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 688 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 689 |
+
|
| 690 |
+
Args:
|
| 691 |
+
query_states (`torch.Tensor`):
|
| 692 |
+
Input query states to be passed to Flash Attention API
|
| 693 |
+
key_states (`torch.Tensor`):
|
| 694 |
+
Input key states to be passed to Flash Attention API
|
| 695 |
+
value_states (`torch.Tensor`):
|
| 696 |
+
Input value states to be passed to Flash Attention API
|
| 697 |
+
attention_mask (`torch.Tensor`):
|
| 698 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 699 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 700 |
+
dropout (`int`, *optional*):
|
| 701 |
+
Attention dropout
|
| 702 |
+
softmax_scale (`float`, *optional*):
|
| 703 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 704 |
+
use_sliding_windows (`bool`, *optional*):
|
| 705 |
+
Whether to activate sliding window attention.
|
| 706 |
+
"""
|
| 707 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 708 |
+
causal = self.is_causal
|
| 709 |
+
else:
|
| 710 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 711 |
+
causal = self.is_causal and query_length != 1
|
| 712 |
+
|
| 713 |
+
# Contains at least one padding token in the sequence
|
| 714 |
+
if attention_mask is not None:
|
| 715 |
+
batch_size = query_states.shape[0]
|
| 716 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 717 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 721 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 722 |
+
|
| 723 |
+
if not use_sliding_windows:
|
| 724 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 725 |
+
query_states,
|
| 726 |
+
key_states,
|
| 727 |
+
value_states,
|
| 728 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 729 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 730 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 731 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 732 |
+
dropout_p=dropout,
|
| 733 |
+
softmax_scale=softmax_scale,
|
| 734 |
+
causal=causal,
|
| 735 |
+
)
|
| 736 |
+
else:
|
| 737 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 738 |
+
query_states,
|
| 739 |
+
key_states,
|
| 740 |
+
value_states,
|
| 741 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 742 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 743 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 744 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 745 |
+
dropout_p=dropout,
|
| 746 |
+
softmax_scale=softmax_scale,
|
| 747 |
+
causal=causal,
|
| 748 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 749 |
+
)
|
| 750 |
+
|
| 751 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 752 |
+
else:
|
| 753 |
+
if not use_sliding_windows:
|
| 754 |
+
attn_output = flash_attn_func(
|
| 755 |
+
query_states,
|
| 756 |
+
key_states,
|
| 757 |
+
value_states,
|
| 758 |
+
dropout,
|
| 759 |
+
softmax_scale=softmax_scale,
|
| 760 |
+
causal=causal,
|
| 761 |
+
)
|
| 762 |
+
else:
|
| 763 |
+
attn_output = flash_attn_func(
|
| 764 |
+
query_states,
|
| 765 |
+
key_states,
|
| 766 |
+
value_states,
|
| 767 |
+
dropout,
|
| 768 |
+
softmax_scale=softmax_scale,
|
| 769 |
+
causal=causal,
|
| 770 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 771 |
+
)
|
| 772 |
+
|
| 773 |
+
return attn_output
|
| 774 |
+
|
| 775 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 776 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
| 777 |
+
|
| 778 |
+
# On the first iteration we need to properly re-create the padding mask
|
| 779 |
+
# by slicing it on the proper place
|
| 780 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
| 781 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
| 782 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
| 783 |
+
|
| 784 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 785 |
+
|
| 786 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 787 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 788 |
+
|
| 789 |
+
if query_length == kv_seq_len:
|
| 790 |
+
query_layer = index_first_axis(
|
| 791 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
| 792 |
+
)
|
| 793 |
+
cu_seqlens_q = cu_seqlens_k
|
| 794 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 795 |
+
indices_q = indices_k
|
| 796 |
+
elif query_length == 1:
|
| 797 |
+
max_seqlen_in_batch_q = 1
|
| 798 |
+
cu_seqlens_q = torch.arange(
|
| 799 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 800 |
+
) # There is a memcpy here, that is very bad.
|
| 801 |
+
indices_q = cu_seqlens_q[:-1]
|
| 802 |
+
query_layer = query_layer.squeeze(1)
|
| 803 |
+
else:
|
| 804 |
+
# The -q_len: slice assumes left padding.
|
| 805 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 806 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 807 |
+
|
| 808 |
+
return (
|
| 809 |
+
query_layer,
|
| 810 |
+
key_layer,
|
| 811 |
+
value_layer,
|
| 812 |
+
indices_q,
|
| 813 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 814 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 815 |
+
)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Mixtral
|
| 819 |
+
class MixtralSdpaAttention(MixtralAttention):
|
| 820 |
+
"""
|
| 821 |
+
Mixtral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 822 |
+
`MixtralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 823 |
+
SDPA API.
|
| 824 |
+
"""
|
| 825 |
+
|
| 826 |
+
# Adapted from MixtralAttention.forward
|
| 827 |
+
def forward(
|
| 828 |
+
self,
|
| 829 |
+
hidden_states: torch.Tensor,
|
| 830 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 831 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 832 |
+
past_key_value: Optional[Cache] = None,
|
| 833 |
+
output_attentions: bool = False,
|
| 834 |
+
use_cache: bool = False,
|
| 835 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 836 |
+
if output_attentions:
|
| 837 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 838 |
+
logger.warning_once(
|
| 839 |
+
"MixtralModel is using MixtralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 840 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 841 |
+
)
|
| 842 |
+
return super().forward(
|
| 843 |
+
hidden_states=hidden_states,
|
| 844 |
+
attention_mask=attention_mask,
|
| 845 |
+
position_ids=position_ids,
|
| 846 |
+
past_key_value=past_key_value,
|
| 847 |
+
output_attentions=output_attentions,
|
| 848 |
+
use_cache=use_cache,
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
bsz, q_len, _ = hidden_states.size()
|
| 852 |
+
|
| 853 |
+
query_states = self.q_proj(hidden_states)
|
| 854 |
+
key_states = self.k_proj(hidden_states)
|
| 855 |
+
value_states = self.v_proj(hidden_states)
|
| 856 |
+
|
| 857 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 858 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 859 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 860 |
+
|
| 861 |
+
kv_seq_len = key_states.shape[-2]
|
| 862 |
+
if past_key_value is not None:
|
| 863 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 864 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 865 |
+
|
| 866 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 867 |
+
|
| 868 |
+
if past_key_value is not None:
|
| 869 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 870 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 871 |
+
|
| 872 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 873 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 874 |
+
|
| 875 |
+
if attention_mask is not None:
|
| 876 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 877 |
+
raise ValueError(
|
| 878 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 882 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 883 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 884 |
+
query_states = query_states.contiguous()
|
| 885 |
+
key_states = key_states.contiguous()
|
| 886 |
+
value_states = value_states.contiguous()
|
| 887 |
+
|
| 888 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 889 |
+
query_states,
|
| 890 |
+
key_states,
|
| 891 |
+
value_states,
|
| 892 |
+
attn_mask=attention_mask,
|
| 893 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 894 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 895 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 899 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 900 |
+
|
| 901 |
+
attn_output = self.o_proj(attn_output)
|
| 902 |
+
|
| 903 |
+
return attn_output, None, past_key_value
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
MIXTRAL_ATTENTION_CLASSES = {
|
| 907 |
+
"eager": MixtralAttention,
|
| 908 |
+
"flash_attention_2": MixtralFlashAttention2,
|
| 909 |
+
"sdpa": MixtralSdpaAttention,
|
| 910 |
+
}
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
class MixtralBlockSparseTop2MLP(nn.Module):
|
| 914 |
+
def __init__(self, config: MixtralConfig):
|
| 915 |
+
super().__init__()
|
| 916 |
+
self.ffn_dim = config.intermediate_size
|
| 917 |
+
self.hidden_dim = config.hidden_size
|
| 918 |
+
|
| 919 |
+
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 920 |
+
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
|
| 921 |
+
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| 922 |
+
|
| 923 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 924 |
+
|
| 925 |
+
def forward(self, hidden_states):
|
| 926 |
+
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
|
| 927 |
+
current_hidden_states = self.w2(current_hidden_states)
|
| 928 |
+
return current_hidden_states
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
class MixtralBLockSparseTop2MLP(MixtralBlockSparseTop2MLP):
|
| 932 |
+
def __init__(self, *args, **kwargs):
|
| 933 |
+
logger.warning_once(
|
| 934 |
+
"MixtralBLockSparseTop2MLP is deprecated by MixtralBlockSparseTop2MLP and will be removed in v4.40."
|
| 935 |
+
)
|
| 936 |
+
super().__init__(*args, **kwargs)
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
class MixtralSparseMoeBlock(nn.Module):
|
| 940 |
+
"""
|
| 941 |
+
This implementation is
|
| 942 |
+
strictly equivalent to standard MoE with full capacity (no
|
| 943 |
+
dropped tokens). It's faster since it formulates MoE operations
|
| 944 |
+
in terms of block-sparse operations to accomodate imbalanced
|
| 945 |
+
assignments of tokens to experts, whereas standard MoE either
|
| 946 |
+
(1) drop tokens at the cost of reduced performance or (2) set
|
| 947 |
+
capacity factor to number of experts and thus waste computation
|
| 948 |
+
and memory on padding.
|
| 949 |
+
"""
|
| 950 |
+
|
| 951 |
+
def __init__(self, config):
|
| 952 |
+
super().__init__()
|
| 953 |
+
self.hidden_dim = config.hidden_size
|
| 954 |
+
self.ffn_dim = config.intermediate_size
|
| 955 |
+
self.num_experts = config.num_local_experts
|
| 956 |
+
self.top_k = config.num_experts_per_tok
|
| 957 |
+
|
| 958 |
+
# gating
|
| 959 |
+
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
|
| 960 |
+
|
| 961 |
+
self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
|
| 962 |
+
|
| 963 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 964 |
+
""" """
|
| 965 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
| 966 |
+
hidden_states = hidden_states.view(-1, hidden_dim)
|
| 967 |
+
# router_logits: (batch * sequence_length, n_experts)
|
| 968 |
+
router_logits = self.gate(hidden_states)
|
| 969 |
+
|
| 970 |
+
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
|
| 971 |
+
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
|
| 972 |
+
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
|
| 973 |
+
# we cast back to the input dtype
|
| 974 |
+
routing_weights = routing_weights.to(hidden_states.dtype)
|
| 975 |
+
|
| 976 |
+
final_hidden_states = torch.zeros(
|
| 977 |
+
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
|
| 978 |
+
)
|
| 979 |
+
|
| 980 |
+
# One hot encode the selected experts to create an expert mask
|
| 981 |
+
# this will be used to easily index which expert is going to be sollicitated
|
| 982 |
+
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
|
| 983 |
+
|
| 984 |
+
# Loop over all available experts in the model and perform the computation on each expert
|
| 985 |
+
for expert_idx in range(self.num_experts):
|
| 986 |
+
expert_layer = self.experts[expert_idx]
|
| 987 |
+
idx, top_x = torch.where(expert_mask[expert_idx])
|
| 988 |
+
|
| 989 |
+
if top_x.shape[0] == 0:
|
| 990 |
+
continue
|
| 991 |
+
|
| 992 |
+
# in torch it is faster to index using lists than torch tensors
|
| 993 |
+
top_x_list = top_x.tolist()
|
| 994 |
+
idx_list = idx.tolist()
|
| 995 |
+
|
| 996 |
+
# Index the correct hidden states and compute the expert hidden state for
|
| 997 |
+
# the current expert. We need to make sure to multiply the output hidden
|
| 998 |
+
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
|
| 999 |
+
current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)
|
| 1000 |
+
current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]
|
| 1001 |
+
|
| 1002 |
+
# However `index_add_` only support torch tensors for indexing so we'll use
|
| 1003 |
+
# the `top_x` tensor here.
|
| 1004 |
+
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
|
| 1005 |
+
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
|
| 1006 |
+
return final_hidden_states, router_logits
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
class MixtralDecoderLayer(nn.Module):
|
| 1010 |
+
def __init__(self, config: MixtralConfig, layer_idx: int):
|
| 1011 |
+
super().__init__()
|
| 1012 |
+
self.hidden_size = config.hidden_size
|
| 1013 |
+
|
| 1014 |
+
self.self_attn = MIXTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 1015 |
+
|
| 1016 |
+
self.block_sparse_moe = MixtralSparseMoeBlock(config)
|
| 1017 |
+
self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1018 |
+
self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1019 |
+
|
| 1020 |
+
def forward(
|
| 1021 |
+
self,
|
| 1022 |
+
hidden_states: torch.Tensor,
|
| 1023 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1024 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1025 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 1026 |
+
output_attentions: Optional[bool] = False,
|
| 1027 |
+
output_router_logits: Optional[bool] = False,
|
| 1028 |
+
use_cache: Optional[bool] = False,
|
| 1029 |
+
attn_mode: str = "flash",
|
| 1030 |
+
**kwargs,
|
| 1031 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 1032 |
+
if "padding_mask" in kwargs:
|
| 1033 |
+
warnings.warn(
|
| 1034 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 1035 |
+
)
|
| 1036 |
+
"""
|
| 1037 |
+
Args:
|
| 1038 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 1039 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 1040 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 1041 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 1042 |
+
output_attentions (`bool`, *optional*):
|
| 1043 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 1044 |
+
returned tensors for more detail.
|
| 1045 |
+
output_router_logits (`bool`, *optional*):
|
| 1046 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
| 1047 |
+
should not be returned during inference.
|
| 1048 |
+
use_cache (`bool`, *optional*):
|
| 1049 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 1050 |
+
(see `past_key_values`).
|
| 1051 |
+
"""
|
| 1052 |
+
|
| 1053 |
+
residual = hidden_states
|
| 1054 |
+
|
| 1055 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 1056 |
+
|
| 1057 |
+
# Self Attention
|
| 1058 |
+
if (attn_mode == "flash"):
|
| 1059 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 1060 |
+
hidden_states=hidden_states,
|
| 1061 |
+
attention_mask=attention_mask,
|
| 1062 |
+
position_ids=position_ids,
|
| 1063 |
+
past_key_value=past_key_value,
|
| 1064 |
+
output_attentions=output_attentions,
|
| 1065 |
+
use_cache=use_cache,
|
| 1066 |
+
)
|
| 1067 |
+
else:
|
| 1068 |
+
hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 1069 |
+
hidden_states=hidden_states,
|
| 1070 |
+
attention_mask=attention_mask,
|
| 1071 |
+
position_ids=position_ids,
|
| 1072 |
+
past_key_value=past_key_value,
|
| 1073 |
+
output_attentions=output_attentions,
|
| 1074 |
+
use_cache=use_cache,
|
| 1075 |
+
**kwargs,
|
| 1076 |
+
)
|
| 1077 |
+
hidden_states = residual + hidden_states
|
| 1078 |
+
|
| 1079 |
+
# Fully Connected
|
| 1080 |
+
residual = hidden_states
|
| 1081 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 1082 |
+
hidden_states, router_logits = self.block_sparse_moe(hidden_states)
|
| 1083 |
+
hidden_states = residual + hidden_states
|
| 1084 |
+
|
| 1085 |
+
outputs = (hidden_states,)
|
| 1086 |
+
|
| 1087 |
+
if output_attentions:
|
| 1088 |
+
outputs += (self_attn_weights,)
|
| 1089 |
+
|
| 1090 |
+
if use_cache:
|
| 1091 |
+
outputs += (present_key_value,)
|
| 1092 |
+
|
| 1093 |
+
if output_router_logits:
|
| 1094 |
+
outputs += (router_logits,)
|
| 1095 |
+
|
| 1096 |
+
return outputs
|
| 1097 |
+
|
| 1098 |
+
|
| 1099 |
+
MIXTRAL_START_DOCSTRING = r"""
|
| 1100 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1101 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1102 |
+
etc.)
|
| 1103 |
+
|
| 1104 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 1105 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 1106 |
+
and behavior.
|
| 1107 |
+
|
| 1108 |
+
Parameters:
|
| 1109 |
+
config ([`MixtralConfig`]):
|
| 1110 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1111 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 1112 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1113 |
+
"""
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
@add_start_docstrings(
|
| 1117 |
+
"The bare Mixtral Model outputting raw hidden-states without any specific head on top.",
|
| 1118 |
+
MIXTRAL_START_DOCSTRING,
|
| 1119 |
+
)
|
| 1120 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Mixtral
|
| 1121 |
+
class MixtralPreTrainedModel(PreTrainedModel):
|
| 1122 |
+
config_class = MixtralConfig
|
| 1123 |
+
base_model_prefix = "model"
|
| 1124 |
+
supports_gradient_checkpointing = True
|
| 1125 |
+
_no_split_modules = ["MixtralDecoderLayer"]
|
| 1126 |
+
_skip_keys_device_placement = "past_key_values"
|
| 1127 |
+
_supports_flash_attn_2 = True
|
| 1128 |
+
_supports_sdpa = True
|
| 1129 |
+
_supports_cache_class = True
|
| 1130 |
+
|
| 1131 |
+
def _init_weights(self, module):
|
| 1132 |
+
std = self.config.initializer_range
|
| 1133 |
+
if isinstance(module, nn.Linear):
|
| 1134 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1135 |
+
if module.bias is not None:
|
| 1136 |
+
module.bias.data.zero_()
|
| 1137 |
+
elif isinstance(module, nn.Embedding):
|
| 1138 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1139 |
+
if module.padding_idx is not None:
|
| 1140 |
+
module.weight.data[module.padding_idx].zero_()
|
| 1141 |
+
|
| 1142 |
+
|
| 1143 |
+
MIXTRAL_INPUTS_DOCSTRING = r"""
|
| 1144 |
+
Args:
|
| 1145 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1146 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 1147 |
+
it.
|
| 1148 |
+
|
| 1149 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1150 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1151 |
+
|
| 1152 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1153 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1154 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1155 |
+
|
| 1156 |
+
- 1 for tokens that are **not masked**,
|
| 1157 |
+
- 0 for tokens that are **masked**.
|
| 1158 |
+
|
| 1159 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1160 |
+
|
| 1161 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1162 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1163 |
+
|
| 1164 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 1165 |
+
`past_key_values`).
|
| 1166 |
+
|
| 1167 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 1168 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 1169 |
+
information on the default strategy.
|
| 1170 |
+
|
| 1171 |
+
- 1 indicates the head is **not masked**,
|
| 1172 |
+
- 0 indicates the head is **masked**.
|
| 1173 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1174 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1175 |
+
config.n_positions - 1]`.
|
| 1176 |
+
|
| 1177 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1178 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 1179 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 1180 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| 1181 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 1182 |
+
|
| 1183 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1184 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 1185 |
+
|
| 1186 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 1187 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 1188 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 1189 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1190 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1191 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1192 |
+
model's internal embedding lookup matrix.
|
| 1193 |
+
use_cache (`bool`, *optional*):
|
| 1194 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1195 |
+
`past_key_values`).
|
| 1196 |
+
output_attentions (`bool`, *optional*):
|
| 1197 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1198 |
+
tensors for more detail.
|
| 1199 |
+
output_hidden_states (`bool`, *optional*):
|
| 1200 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1201 |
+
more detail.
|
| 1202 |
+
output_router_logits (`bool`, *optional*):
|
| 1203 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
| 1204 |
+
should not be returned during inference.
|
| 1205 |
+
return_dict (`bool`, *optional*):
|
| 1206 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1207 |
+
"""
|
| 1208 |
+
|
| 1209 |
+
|
| 1210 |
+
@add_start_docstrings(
|
| 1211 |
+
"The bare Mixtral Model outputting raw hidden-states without any specific head on top.",
|
| 1212 |
+
MIXTRAL_START_DOCSTRING,
|
| 1213 |
+
)
|
| 1214 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->MIXTRAL,Mistral->Mixtral
|
| 1215 |
+
class MixtralModel(MixtralPreTrainedModel):
|
| 1216 |
+
"""
|
| 1217 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MixtralDecoderLayer`]
|
| 1218 |
+
|
| 1219 |
+
Args:
|
| 1220 |
+
config: MixtralConfig
|
| 1221 |
+
"""
|
| 1222 |
+
|
| 1223 |
+
def __init__(self, config: MixtralConfig):
|
| 1224 |
+
super().__init__(config)
|
| 1225 |
+
self.padding_idx = config.pad_token_id
|
| 1226 |
+
self.vocab_size = config.vocab_size
|
| 1227 |
+
|
| 1228 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1229 |
+
self.layers = nn.ModuleList(
|
| 1230 |
+
[MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1231 |
+
)
|
| 1232 |
+
self._attn_implementation = config._attn_implementation
|
| 1233 |
+
self.norm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1234 |
+
|
| 1235 |
+
self.gradient_checkpointing = False
|
| 1236 |
+
# Initialize weights and apply final processing
|
| 1237 |
+
self.post_init()
|
| 1238 |
+
|
| 1239 |
+
def get_input_embeddings(self):
|
| 1240 |
+
return self.embed_tokens
|
| 1241 |
+
|
| 1242 |
+
def set_input_embeddings(self, value):
|
| 1243 |
+
self.embed_tokens = value
|
| 1244 |
+
|
| 1245 |
+
# Ignore copy
|
| 1246 |
+
@add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
|
| 1247 |
+
def forward(
|
| 1248 |
+
self,
|
| 1249 |
+
input_ids: torch.LongTensor = None,
|
| 1250 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1251 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1252 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1253 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1254 |
+
use_cache: Optional[bool] = None,
|
| 1255 |
+
output_attentions: Optional[bool] = None,
|
| 1256 |
+
output_hidden_states: Optional[bool] = None,
|
| 1257 |
+
output_router_logits: Optional[bool] = None,
|
| 1258 |
+
return_dict: Optional[bool] = None,
|
| 1259 |
+
attn_mode: str = "flash",
|
| 1260 |
+
block_list: list = None
|
| 1261 |
+
) -> Union[Tuple, MoeModelOutputWithPast]:
|
| 1262 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1263 |
+
output_router_logits = (
|
| 1264 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 1265 |
+
)
|
| 1266 |
+
output_hidden_states = (
|
| 1267 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1268 |
+
)
|
| 1269 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1270 |
+
|
| 1271 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1272 |
+
|
| 1273 |
+
# retrieve input_ids and inputs_embeds
|
| 1274 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1275 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1276 |
+
elif input_ids is not None:
|
| 1277 |
+
batch_size, seq_length = input_ids.shape
|
| 1278 |
+
elif inputs_embeds is not None:
|
| 1279 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1280 |
+
else:
|
| 1281 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1282 |
+
|
| 1283 |
+
past_key_values_length = 0
|
| 1284 |
+
|
| 1285 |
+
if self.gradient_checkpointing and self.training:
|
| 1286 |
+
if use_cache:
|
| 1287 |
+
logger.warning_once(
|
| 1288 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1289 |
+
)
|
| 1290 |
+
use_cache = False
|
| 1291 |
+
|
| 1292 |
+
if use_cache:
|
| 1293 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1294 |
+
if use_legacy_cache:
|
| 1295 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1296 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1297 |
+
|
| 1298 |
+
if position_ids is None:
|
| 1299 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1300 |
+
position_ids = torch.arange(
|
| 1301 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1302 |
+
)
|
| 1303 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1304 |
+
else:
|
| 1305 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 1306 |
+
|
| 1307 |
+
if inputs_embeds is None:
|
| 1308 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1309 |
+
|
| 1310 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
| 1311 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
| 1312 |
+
if is_padding_right:
|
| 1313 |
+
raise ValueError(
|
| 1314 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1315 |
+
" this may lead to unexpected behaviour for Flash Attention version of Mixtral. Make sure to "
|
| 1316 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1317 |
+
)
|
| 1318 |
+
|
| 1319 |
+
if self._attn_implementation == "flash_attention_2":
|
| 1320 |
+
# 2d mask is passed through the layers
|
| 1321 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1322 |
+
elif self._attn_implementation == "sdpa" and not output_attentions:
|
| 1323 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
| 1324 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
| 1325 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 1326 |
+
attention_mask,
|
| 1327 |
+
(batch_size, seq_length),
|
| 1328 |
+
inputs_embeds,
|
| 1329 |
+
past_key_values_length,
|
| 1330 |
+
)
|
| 1331 |
+
else:
|
| 1332 |
+
# 4d mask is passed through the layers
|
| 1333 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1334 |
+
attention_mask,
|
| 1335 |
+
(batch_size, seq_length),
|
| 1336 |
+
inputs_embeds,
|
| 1337 |
+
past_key_values_length,
|
| 1338 |
+
sliding_window=self.config.sliding_window,
|
| 1339 |
+
)
|
| 1340 |
+
|
| 1341 |
+
hidden_states = inputs_embeds
|
| 1342 |
+
|
| 1343 |
+
# decoder layers
|
| 1344 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1345 |
+
all_self_attns = () if output_attentions else None
|
| 1346 |
+
all_router_logits = () if output_router_logits else None
|
| 1347 |
+
next_decoder_cache = None
|
| 1348 |
+
|
| 1349 |
+
if block_list:
|
| 1350 |
+
kwargs={"block_list":block_list}
|
| 1351 |
+
else:
|
| 1352 |
+
kwargs={}
|
| 1353 |
+
|
| 1354 |
+
for decoder_layer in self.layers:
|
| 1355 |
+
if output_hidden_states:
|
| 1356 |
+
all_hidden_states += (hidden_states,)
|
| 1357 |
+
|
| 1358 |
+
if self.gradient_checkpointing and self.training:
|
| 1359 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1360 |
+
decoder_layer.__call__,
|
| 1361 |
+
hidden_states,
|
| 1362 |
+
attention_mask,
|
| 1363 |
+
position_ids,
|
| 1364 |
+
past_key_values,
|
| 1365 |
+
output_attentions,
|
| 1366 |
+
output_router_logits,
|
| 1367 |
+
use_cache,
|
| 1368 |
+
)
|
| 1369 |
+
else:
|
| 1370 |
+
layer_outputs = decoder_layer(
|
| 1371 |
+
hidden_states,
|
| 1372 |
+
attention_mask=attention_mask,
|
| 1373 |
+
position_ids=position_ids,
|
| 1374 |
+
past_key_value=past_key_values,
|
| 1375 |
+
output_attentions=output_attentions,
|
| 1376 |
+
output_router_logits=output_router_logits,
|
| 1377 |
+
use_cache=use_cache,
|
| 1378 |
+
attn_mode=attn_mode,
|
| 1379 |
+
**kwargs,
|
| 1380 |
+
)
|
| 1381 |
+
|
| 1382 |
+
hidden_states = layer_outputs[0]
|
| 1383 |
+
|
| 1384 |
+
if use_cache:
|
| 1385 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1386 |
+
|
| 1387 |
+
if output_attentions:
|
| 1388 |
+
all_self_attns += (layer_outputs[1],)
|
| 1389 |
+
|
| 1390 |
+
if output_router_logits:
|
| 1391 |
+
all_router_logits += (layer_outputs[-1],)
|
| 1392 |
+
|
| 1393 |
+
hidden_states = self.norm(hidden_states)
|
| 1394 |
+
|
| 1395 |
+
# add hidden states from the last decoder layer
|
| 1396 |
+
if output_hidden_states:
|
| 1397 |
+
all_hidden_states += (hidden_states,)
|
| 1398 |
+
|
| 1399 |
+
next_cache = None
|
| 1400 |
+
if use_cache:
|
| 1401 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1402 |
+
|
| 1403 |
+
if not return_dict:
|
| 1404 |
+
return tuple(
|
| 1405 |
+
v
|
| 1406 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
|
| 1407 |
+
if v is not None
|
| 1408 |
+
)
|
| 1409 |
+
return MoeModelOutputWithPast(
|
| 1410 |
+
last_hidden_state=hidden_states,
|
| 1411 |
+
past_key_values=next_cache,
|
| 1412 |
+
hidden_states=all_hidden_states,
|
| 1413 |
+
attentions=all_self_attns,
|
| 1414 |
+
router_logits=all_router_logits,
|
| 1415 |
+
)
|
| 1416 |
+
|
| 1417 |
+
|
| 1418 |
+
class MixtralForCausalLM(MixtralPreTrainedModel):
|
| 1419 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1420 |
+
|
| 1421 |
+
def __init__(self, config):
|
| 1422 |
+
super().__init__(config)
|
| 1423 |
+
self.model = MixtralModel(config)
|
| 1424 |
+
self.vocab_size = config.vocab_size
|
| 1425 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1426 |
+
self.router_aux_loss_coef = config.router_aux_loss_coef
|
| 1427 |
+
self.num_experts = config.num_local_experts
|
| 1428 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
| 1429 |
+
# Initialize weights and apply final processing
|
| 1430 |
+
self.post_init()
|
| 1431 |
+
|
| 1432 |
+
def get_input_embeddings(self):
|
| 1433 |
+
return self.model.embed_tokens
|
| 1434 |
+
|
| 1435 |
+
def set_input_embeddings(self, value):
|
| 1436 |
+
self.model.embed_tokens = value
|
| 1437 |
+
|
| 1438 |
+
def get_output_embeddings(self):
|
| 1439 |
+
return self.lm_head
|
| 1440 |
+
|
| 1441 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1442 |
+
self.lm_head = new_embeddings
|
| 1443 |
+
|
| 1444 |
+
def set_decoder(self, decoder):
|
| 1445 |
+
self.model = decoder
|
| 1446 |
+
|
| 1447 |
+
def get_decoder(self):
|
| 1448 |
+
return self.model
|
| 1449 |
+
|
| 1450 |
+
@add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
|
| 1451 |
+
@replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1452 |
+
# Ignore copy
|
| 1453 |
+
def forward(
|
| 1454 |
+
self,
|
| 1455 |
+
input_ids: torch.LongTensor = None,
|
| 1456 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1457 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1458 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1459 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1460 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1461 |
+
use_cache: Optional[bool] = None,
|
| 1462 |
+
output_attentions: Optional[bool] = None,
|
| 1463 |
+
output_hidden_states: Optional[bool] = None,
|
| 1464 |
+
output_router_logits: Optional[bool] = None,
|
| 1465 |
+
return_dict: Optional[bool] = None,
|
| 1466 |
+
attn_mode: str = "flash",
|
| 1467 |
+
block_list: list = None,
|
| 1468 |
+
) -> Union[Tuple, MoeCausalLMOutputWithPast]:
|
| 1469 |
+
r"""
|
| 1470 |
+
Args:
|
| 1471 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1472 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1473 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1474 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1475 |
+
|
| 1476 |
+
Returns:
|
| 1477 |
+
|
| 1478 |
+
Example:
|
| 1479 |
+
|
| 1480 |
+
```python
|
| 1481 |
+
>>> from transformers import AutoTokenizer, MixtralForCausalLM
|
| 1482 |
+
|
| 1483 |
+
>>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
| 1484 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
| 1485 |
+
|
| 1486 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1487 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1488 |
+
|
| 1489 |
+
>>> # Generate
|
| 1490 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1491 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1492 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1493 |
+
```"""
|
| 1494 |
+
|
| 1495 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1496 |
+
output_router_logits = (
|
| 1497 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 1498 |
+
)
|
| 1499 |
+
|
| 1500 |
+
output_hidden_states = (
|
| 1501 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1502 |
+
)
|
| 1503 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1504 |
+
|
| 1505 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1506 |
+
|
| 1507 |
+
outputs = self.model(
|
| 1508 |
+
input_ids=input_ids,
|
| 1509 |
+
attention_mask=attention_mask,
|
| 1510 |
+
position_ids=position_ids,
|
| 1511 |
+
past_key_values=past_key_values,
|
| 1512 |
+
inputs_embeds=inputs_embeds,
|
| 1513 |
+
use_cache=use_cache,
|
| 1514 |
+
output_attentions=output_attentions,
|
| 1515 |
+
output_hidden_states=output_hidden_states,
|
| 1516 |
+
output_router_logits=output_router_logits,
|
| 1517 |
+
return_dict=return_dict,
|
| 1518 |
+
attn_mode=attn_mode,
|
| 1519 |
+
block_list=block_list,
|
| 1520 |
+
)
|
| 1521 |
+
|
| 1522 |
+
hidden_states = outputs[0]
|
| 1523 |
+
logits = self.lm_head(hidden_states)
|
| 1524 |
+
logits = logits.float()
|
| 1525 |
+
|
| 1526 |
+
loss = None
|
| 1527 |
+
if labels is not None:
|
| 1528 |
+
# Shift so that tokens < n predict n
|
| 1529 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1530 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1531 |
+
# Flatten the tokens
|
| 1532 |
+
loss_fct = CrossEntropyLoss()
|
| 1533 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1534 |
+
shift_labels = shift_labels.view(-1)
|
| 1535 |
+
# Enable model parallelism
|
| 1536 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1537 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1538 |
+
|
| 1539 |
+
aux_loss = None
|
| 1540 |
+
if output_router_logits:
|
| 1541 |
+
aux_loss = load_balancing_loss_func(
|
| 1542 |
+
outputs.router_logits if return_dict else outputs[-1],
|
| 1543 |
+
self.num_experts,
|
| 1544 |
+
self.num_experts_per_tok,
|
| 1545 |
+
attention_mask,
|
| 1546 |
+
)
|
| 1547 |
+
if labels is not None:
|
| 1548 |
+
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
|
| 1549 |
+
|
| 1550 |
+
if not return_dict:
|
| 1551 |
+
output = (logits,) + outputs[1:]
|
| 1552 |
+
if output_router_logits:
|
| 1553 |
+
output = (aux_loss,) + output
|
| 1554 |
+
return (loss,) + output if loss is not None else output
|
| 1555 |
+
|
| 1556 |
+
return MoeCausalLMOutputWithPast(
|
| 1557 |
+
loss=loss,
|
| 1558 |
+
aux_loss=aux_loss,
|
| 1559 |
+
logits=logits,
|
| 1560 |
+
past_key_values=outputs.past_key_values,
|
| 1561 |
+
hidden_states=outputs.hidden_states,
|
| 1562 |
+
attentions=outputs.attentions,
|
| 1563 |
+
router_logits=outputs.router_logits,
|
| 1564 |
+
)
|
| 1565 |
+
|
| 1566 |
+
def prepare_inputs_for_generation(
|
| 1567 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1568 |
+
):
|
| 1569 |
+
# Omit tokens covered by past_key_values
|
| 1570 |
+
if past_key_values is not None:
|
| 1571 |
+
if isinstance(past_key_values, Cache):
|
| 1572 |
+
cache_length = past_key_values.get_seq_length()
|
| 1573 |
+
past_length = past_key_values.seen_tokens
|
| 1574 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1575 |
+
else:
|
| 1576 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1577 |
+
max_cache_length = None
|
| 1578 |
+
|
| 1579 |
+
# Keep only the unprocessed tokens:
|
| 1580 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1581 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
| 1582 |
+
# input)
|
| 1583 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1584 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1585 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1586 |
+
# input_ids based on the past_length.
|
| 1587 |
+
elif past_length < input_ids.shape[1]:
|
| 1588 |
+
input_ids = input_ids[:, past_length:]
|
| 1589 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1590 |
+
|
| 1591 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1592 |
+
if (
|
| 1593 |
+
max_cache_length is not None
|
| 1594 |
+
and attention_mask is not None
|
| 1595 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1596 |
+
):
|
| 1597 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1598 |
+
|
| 1599 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1600 |
+
if attention_mask is not None and position_ids is None:
|
| 1601 |
+
# create position_ids on the fly for batch generation
|
| 1602 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1603 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1604 |
+
if past_key_values:
|
| 1605 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1606 |
+
|
| 1607 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1608 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1609 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1610 |
+
else:
|
| 1611 |
+
model_inputs = {"input_ids": input_ids}
|
| 1612 |
+
|
| 1613 |
+
model_inputs.update(
|
| 1614 |
+
{
|
| 1615 |
+
"position_ids": position_ids,
|
| 1616 |
+
"past_key_values": past_key_values,
|
| 1617 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1618 |
+
"attention_mask": attention_mask,
|
| 1619 |
+
}
|
| 1620 |
+
)
|
| 1621 |
+
return model_inputs
|
| 1622 |
+
|
| 1623 |
+
@staticmethod
|
| 1624 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1625 |
+
reordered_past = ()
|
| 1626 |
+
for layer_past in past_key_values:
|
| 1627 |
+
reordered_past += (
|
| 1628 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1629 |
+
)
|
| 1630 |
+
return reordered_past
|
| 1631 |
+
|
| 1632 |
+
|
| 1633 |
+
@add_start_docstrings(
|
| 1634 |
+
"""
|
| 1635 |
+
The Mixtral Model transformer with a sequence classification head on top (linear layer).
|
| 1636 |
+
|
| 1637 |
+
[`MixtralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1638 |
+
(e.g. GPT-2) do.
|
| 1639 |
+
|
| 1640 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1641 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1642 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1643 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1644 |
+
each row of the batch).
|
| 1645 |
+
""",
|
| 1646 |
+
MIXTRAL_START_DOCSTRING,
|
| 1647 |
+
)
|
| 1648 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mixtral, LLAMA->MIXTRAL
|
| 1649 |
+
class MixtralForSequenceClassification(MixtralPreTrainedModel):
|
| 1650 |
+
def __init__(self, config):
|
| 1651 |
+
super().__init__(config)
|
| 1652 |
+
self.num_labels = config.num_labels
|
| 1653 |
+
self.model = MixtralModel(config)
|
| 1654 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1655 |
+
|
| 1656 |
+
# Initialize weights and apply final processing
|
| 1657 |
+
self.post_init()
|
| 1658 |
+
|
| 1659 |
+
def get_input_embeddings(self):
|
| 1660 |
+
return self.model.embed_tokens
|
| 1661 |
+
|
| 1662 |
+
def set_input_embeddings(self, value):
|
| 1663 |
+
self.model.embed_tokens = value
|
| 1664 |
+
|
| 1665 |
+
@add_start_docstrings_to_model_forward(MIXTRAL_INPUTS_DOCSTRING)
|
| 1666 |
+
def forward(
|
| 1667 |
+
self,
|
| 1668 |
+
input_ids: torch.LongTensor = None,
|
| 1669 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1670 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1671 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1672 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1673 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1674 |
+
use_cache: Optional[bool] = None,
|
| 1675 |
+
output_attentions: Optional[bool] = None,
|
| 1676 |
+
output_hidden_states: Optional[bool] = None,
|
| 1677 |
+
return_dict: Optional[bool] = None,
|
| 1678 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1679 |
+
r"""
|
| 1680 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1681 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1682 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1683 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1684 |
+
"""
|
| 1685 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1686 |
+
|
| 1687 |
+
transformer_outputs = self.model(
|
| 1688 |
+
input_ids,
|
| 1689 |
+
attention_mask=attention_mask,
|
| 1690 |
+
position_ids=position_ids,
|
| 1691 |
+
past_key_values=past_key_values,
|
| 1692 |
+
inputs_embeds=inputs_embeds,
|
| 1693 |
+
use_cache=use_cache,
|
| 1694 |
+
output_attentions=output_attentions,
|
| 1695 |
+
output_hidden_states=output_hidden_states,
|
| 1696 |
+
return_dict=return_dict,
|
| 1697 |
+
)
|
| 1698 |
+
hidden_states = transformer_outputs[0]
|
| 1699 |
+
logits = self.score(hidden_states)
|
| 1700 |
+
|
| 1701 |
+
if input_ids is not None:
|
| 1702 |
+
batch_size = input_ids.shape[0]
|
| 1703 |
+
else:
|
| 1704 |
+
batch_size = inputs_embeds.shape[0]
|
| 1705 |
+
|
| 1706 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1707 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1708 |
+
if self.config.pad_token_id is None:
|
| 1709 |
+
sequence_lengths = -1
|
| 1710 |
+
else:
|
| 1711 |
+
if input_ids is not None:
|
| 1712 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1713 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1714 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1715 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1716 |
+
else:
|
| 1717 |
+
sequence_lengths = -1
|
| 1718 |
+
|
| 1719 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1720 |
+
|
| 1721 |
+
loss = None
|
| 1722 |
+
if labels is not None:
|
| 1723 |
+
labels = labels.to(logits.device)
|
| 1724 |
+
if self.config.problem_type is None:
|
| 1725 |
+
if self.num_labels == 1:
|
| 1726 |
+
self.config.problem_type = "regression"
|
| 1727 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1728 |
+
self.config.problem_type = "single_label_classification"
|
| 1729 |
+
else:
|
| 1730 |
+
self.config.problem_type = "multi_label_classification"
|
| 1731 |
+
|
| 1732 |
+
if self.config.problem_type == "regression":
|
| 1733 |
+
loss_fct = MSELoss()
|
| 1734 |
+
if self.num_labels == 1:
|
| 1735 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1736 |
+
else:
|
| 1737 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1738 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1739 |
+
loss_fct = CrossEntropyLoss()
|
| 1740 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1741 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1742 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1743 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1744 |
+
if not return_dict:
|
| 1745 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1746 |
+
return ((loss,) + output) if loss is not None else output
|
| 1747 |
+
|
| 1748 |
+
return SequenceClassifierOutputWithPast(
|
| 1749 |
+
loss=loss,
|
| 1750 |
+
logits=pooled_logits,
|
| 1751 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1752 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1753 |
+
attentions=transformer_outputs.attentions,
|
| 1754 |
+
)
|
faiss_attn/source/modeling_phi3.py
ADDED
|
@@ -0,0 +1,1772 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
""" PyTorch Phi-3 model."""
|
| 17 |
+
|
| 18 |
+
import inspect
|
| 19 |
+
import math
|
| 20 |
+
from typing import List, Optional, Tuple, Union, Any
|
| 21 |
+
import warnings
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn.functional as F
|
| 25 |
+
import torch.utils.checkpoint
|
| 26 |
+
from torch import nn
|
| 27 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 28 |
+
|
| 29 |
+
from transformers.activations import ACT2FN
|
| 30 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 31 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
| 32 |
+
from transformers.modeling_outputs import (
|
| 33 |
+
BaseModelOutputWithPast,
|
| 34 |
+
CausalLMOutputWithPast,
|
| 35 |
+
SequenceClassifierOutputWithPast,
|
| 36 |
+
TokenClassifierOutput,
|
| 37 |
+
)
|
| 38 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 39 |
+
from transformers.utils import (
|
| 40 |
+
add_code_sample_docstrings,
|
| 41 |
+
add_start_docstrings,
|
| 42 |
+
add_start_docstrings_to_model_forward,
|
| 43 |
+
is_flash_attn_2_available,
|
| 44 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 45 |
+
logging,
|
| 46 |
+
replace_return_docstrings,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 50 |
+
from transformers.models.phi3.configuration_phi3 import Phi3Config
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if is_flash_attn_2_available():
|
| 54 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 55 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 56 |
+
|
| 57 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
| 58 |
+
|
| 59 |
+
logger = logging.get_logger(__name__)
|
| 60 |
+
|
| 61 |
+
_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
|
| 62 |
+
_CONFIG_FOR_DOC = "Phi3Config"
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
|
| 66 |
+
class Phi3RMSNorm(nn.Module):
|
| 67 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 68 |
+
"""
|
| 69 |
+
Phi3RMSNorm is equivalent to T5LayerNorm
|
| 70 |
+
"""
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 73 |
+
self.variance_epsilon = eps
|
| 74 |
+
|
| 75 |
+
def forward(self, hidden_states):
|
| 76 |
+
input_dtype = hidden_states.dtype
|
| 77 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 78 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 79 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 80 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 84 |
+
def _get_unpad_data(attention_mask):
|
| 85 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 86 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 87 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 88 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 89 |
+
return (
|
| 90 |
+
indices,
|
| 91 |
+
cu_seqlens,
|
| 92 |
+
max_seqlen_in_batch,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
|
| 97 |
+
class Phi3RotaryEmbedding(nn.Module):
|
| 98 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 99 |
+
super().__init__()
|
| 100 |
+
|
| 101 |
+
self.dim = dim
|
| 102 |
+
self.max_position_embeddings = max_position_embeddings
|
| 103 |
+
self.base = base
|
| 104 |
+
|
| 105 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim))
|
| 106 |
+
self.register_buffer("inv_freq", tensor=inv_freq, persistent=False)
|
| 107 |
+
|
| 108 |
+
@torch.no_grad()
|
| 109 |
+
def forward(self, x, position_ids, seq_len=None):
|
| 110 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 111 |
+
self.inv_freq.to(x.device)
|
| 112 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 113 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 114 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
| 115 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
| 116 |
+
device_type = x.device.type
|
| 117 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 118 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 119 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 120 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 121 |
+
cos = emb.cos()
|
| 122 |
+
sin = emb.sin()
|
| 123 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 124 |
+
|
| 125 |
+
class Phi3LongRoPEScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
| 126 |
+
def __init__(self, dim, config, device=None):
|
| 127 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
| 128 |
+
|
| 129 |
+
self.short_factor = config.rope_scaling["short_factor"]
|
| 130 |
+
self.long_factor = config.rope_scaling["long_factor"]
|
| 131 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
| 132 |
+
|
| 133 |
+
@torch.no_grad()
|
| 134 |
+
def forward(self, x, position_ids, seq_len=None):
|
| 135 |
+
seq_len = seq_len or torch.max(position_ids) + 1
|
| 136 |
+
if seq_len > self.original_max_position_embeddings:
|
| 137 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
| 138 |
+
else:
|
| 139 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
| 140 |
+
|
| 141 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
| 142 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
| 143 |
+
|
| 144 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 145 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 146 |
+
|
| 147 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
| 148 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
| 149 |
+
device_type = x.device.type
|
| 150 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 151 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 152 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 153 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 154 |
+
|
| 155 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
| 156 |
+
if scale <= 1.0:
|
| 157 |
+
scaling_factor = 1.0
|
| 158 |
+
else:
|
| 159 |
+
scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
|
| 160 |
+
|
| 161 |
+
cos = emb.cos() * scaling_factor
|
| 162 |
+
sin = emb.sin() * scaling_factor
|
| 163 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 164 |
+
|
| 165 |
+
class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
| 166 |
+
def __init__(self, dim, config, device=None):
|
| 167 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
| 168 |
+
|
| 169 |
+
self.short_factor = config.rope_scaling["short_factor"]
|
| 170 |
+
self.long_factor = config.rope_scaling["long_factor"]
|
| 171 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
| 172 |
+
|
| 173 |
+
@torch.no_grad()
|
| 174 |
+
def forward(self, x, position_ids, seq_len=None):
|
| 175 |
+
seq_len = torch.max(position_ids) + 1
|
| 176 |
+
if seq_len > self.original_max_position_embeddings:
|
| 177 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
| 178 |
+
else:
|
| 179 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
| 180 |
+
|
| 181 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
| 182 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
| 183 |
+
|
| 184 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 185 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 186 |
+
|
| 187 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
| 188 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
| 189 |
+
device_type = x.device.type
|
| 190 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 191 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 192 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 193 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 194 |
+
|
| 195 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
| 196 |
+
if scale <= 1.0:
|
| 197 |
+
scaling_factor = 1.0
|
| 198 |
+
else:
|
| 199 |
+
scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
|
| 200 |
+
|
| 201 |
+
cos = emb.cos() * scaling_factor
|
| 202 |
+
sin = emb.sin() * scaling_factor
|
| 203 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
| 207 |
+
def __init__(self, dim, config, device=None):
|
| 208 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
| 209 |
+
|
| 210 |
+
self.short_factor = config.rope_scaling["short_factor"]
|
| 211 |
+
self.long_factor = config.rope_scaling["long_factor"]
|
| 212 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
| 213 |
+
|
| 214 |
+
@torch.no_grad()
|
| 215 |
+
def forward(self, x, position_ids, seq_len=None):
|
| 216 |
+
seq_len = torch.max(position_ids) + 1
|
| 217 |
+
if seq_len > self.original_max_position_embeddings:
|
| 218 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
| 219 |
+
else:
|
| 220 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
| 221 |
+
|
| 222 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
| 223 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
| 224 |
+
|
| 225 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| 226 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 227 |
+
|
| 228 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
| 229 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
| 230 |
+
device_type = x.device.type
|
| 231 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 232 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 233 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 234 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 235 |
+
|
| 236 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
| 237 |
+
if scale <= 1.0:
|
| 238 |
+
scaling_factor = 1.0
|
| 239 |
+
else:
|
| 240 |
+
scaling_factor = 0.1 * math.log(scale) + 1.0
|
| 241 |
+
|
| 242 |
+
cos = emb.cos() * scaling_factor
|
| 243 |
+
sin = emb.sin() * scaling_factor
|
| 244 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 248 |
+
def rotate_half(x):
|
| 249 |
+
"""Rotates half the hidden dims of the input."""
|
| 250 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 251 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 252 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
| 256 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 257 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
q (`torch.Tensor`): The query tensor.
|
| 261 |
+
k (`torch.Tensor`): The key tensor.
|
| 262 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 263 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 264 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 265 |
+
Deprecated and unused.
|
| 266 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 267 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 268 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 269 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 270 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 271 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 272 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 273 |
+
Returns:
|
| 274 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 275 |
+
"""
|
| 276 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 277 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 278 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 279 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 280 |
+
return q_embed, k_embed
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class Phi3MLP(nn.Module):
|
| 284 |
+
def __init__(self, config):
|
| 285 |
+
super().__init__()
|
| 286 |
+
|
| 287 |
+
self.config = config
|
| 288 |
+
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
|
| 289 |
+
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
| 290 |
+
|
| 291 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
| 292 |
+
|
| 293 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
| 294 |
+
up_states = self.gate_up_proj(hidden_states)
|
| 295 |
+
|
| 296 |
+
gate, up_states = up_states.chunk(2, dim=-1)
|
| 297 |
+
up_states = up_states * self.activation_fn(gate)
|
| 298 |
+
|
| 299 |
+
return self.down_proj(up_states)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
|
| 303 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 304 |
+
"""
|
| 305 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 306 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 307 |
+
"""
|
| 308 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 309 |
+
if n_rep == 1:
|
| 310 |
+
return hidden_states
|
| 311 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 312 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
class Phi3Attention(nn.Module):
|
| 316 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 317 |
+
|
| 318 |
+
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
|
| 319 |
+
super().__init__()
|
| 320 |
+
self.config = config
|
| 321 |
+
self.layer_idx = layer_idx
|
| 322 |
+
if layer_idx is None:
|
| 323 |
+
logger.warning_once(
|
| 324 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 325 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 326 |
+
"when creating this class."
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
self.attention_dropout = config.attention_dropout
|
| 330 |
+
self.hidden_size = config.hidden_size
|
| 331 |
+
self.num_heads = config.num_attention_heads
|
| 332 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 333 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 334 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 335 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 336 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
| 337 |
+
self.rope_theta = config.rope_theta
|
| 338 |
+
self.rope_scaling = config.rope_scaling
|
| 339 |
+
self.is_causal = True
|
| 340 |
+
|
| 341 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 342 |
+
raise ValueError(
|
| 343 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 344 |
+
f" and `num_heads`: {self.num_heads})."
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
|
| 348 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 349 |
+
self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
|
| 350 |
+
self._init_rope()
|
| 351 |
+
|
| 352 |
+
def _init_rope(self):
|
| 353 |
+
if self.rope_scaling is None:
|
| 354 |
+
self.rotary_emb = Phi3RotaryEmbedding(
|
| 355 |
+
self.head_dim,
|
| 356 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 357 |
+
base=self.rope_theta,
|
| 358 |
+
)
|
| 359 |
+
else:
|
| 360 |
+
scaling_type = self.config.rope_scaling["type"]
|
| 361 |
+
if scaling_type == "su":
|
| 362 |
+
self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
|
| 363 |
+
elif scaling_type == "yarn":
|
| 364 |
+
self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
|
| 365 |
+
elif scaling_type == "longrope":
|
| 366 |
+
self.rotary_emb = Phi3LongRoPEScaledRotaryEmbedding(self.head_dim, self.config)
|
| 367 |
+
else:
|
| 368 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
| 369 |
+
|
| 370 |
+
def forward(
|
| 371 |
+
self,
|
| 372 |
+
hidden_states: torch.Tensor,
|
| 373 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 374 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 375 |
+
past_key_value: Optional[Cache] = None,
|
| 376 |
+
output_attentions: bool = False,
|
| 377 |
+
use_cache: bool = False,
|
| 378 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 379 |
+
logger.warning_once("You are not running the flash-attention implementation, expect numerical differences.")
|
| 380 |
+
|
| 381 |
+
bsz, q_len, _ = hidden_states.size()
|
| 382 |
+
|
| 383 |
+
qkv = self.qkv_proj(hidden_states)
|
| 384 |
+
query_pos = self.num_heads * self.head_dim
|
| 385 |
+
query_states = qkv[..., :query_pos]
|
| 386 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
| 387 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
| 388 |
+
|
| 389 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 390 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 391 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 392 |
+
|
| 393 |
+
kv_seq_len = key_states.shape[-2]
|
| 394 |
+
if past_key_value is not None:
|
| 395 |
+
if self.layer_idx is None:
|
| 396 |
+
raise ValueError(
|
| 397 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 398 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 399 |
+
"with a layer index."
|
| 400 |
+
)
|
| 401 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 402 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
| 403 |
+
|
| 404 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 405 |
+
|
| 406 |
+
if past_key_value is not None:
|
| 407 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 408 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 409 |
+
|
| 410 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 411 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 412 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 413 |
+
|
| 414 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 415 |
+
|
| 416 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 417 |
+
raise ValueError(
|
| 418 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 419 |
+
f" {attn_weights.size()}"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
if attention_mask is not None:
|
| 423 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 424 |
+
raise ValueError(
|
| 425 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 426 |
+
)
|
| 427 |
+
attn_weights = attn_weights + attention_mask
|
| 428 |
+
|
| 429 |
+
# upcast attention to fp32
|
| 430 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
|
| 431 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 432 |
+
|
| 433 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 434 |
+
|
| 435 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 436 |
+
raise ValueError(
|
| 437 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 438 |
+
f" {attn_output.size()}"
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 442 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 443 |
+
|
| 444 |
+
attn_output = self.o_proj(attn_output)
|
| 445 |
+
|
| 446 |
+
if not output_attentions:
|
| 447 |
+
attn_weights = None
|
| 448 |
+
|
| 449 |
+
return attn_output, attn_weights, past_key_value
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
class Phi3FlashAttention2(Phi3Attention):
|
| 453 |
+
"""
|
| 454 |
+
Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
|
| 455 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 456 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 457 |
+
"""
|
| 458 |
+
|
| 459 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 460 |
+
def __init__(self, *args, **kwargs):
|
| 461 |
+
super().__init__(*args, **kwargs)
|
| 462 |
+
|
| 463 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 464 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 465 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 466 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 467 |
+
|
| 468 |
+
def forward(
|
| 469 |
+
self,
|
| 470 |
+
hidden_states: torch.Tensor,
|
| 471 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 472 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 473 |
+
past_key_value: Optional[Cache] = None,
|
| 474 |
+
output_attentions: bool = False,
|
| 475 |
+
use_cache: bool = False,
|
| 476 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 477 |
+
# Phi3FlashAttention2 attention does not support output_attentions
|
| 478 |
+
|
| 479 |
+
if not _flash_supports_window_size:
|
| 480 |
+
logger.warning_once(
|
| 481 |
+
"The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
|
| 482 |
+
)
|
| 483 |
+
raise ValueError("The current flash attention version does not support sliding window attention.")
|
| 484 |
+
|
| 485 |
+
output_attentions = False
|
| 486 |
+
|
| 487 |
+
bsz, q_len, _ = hidden_states.size()
|
| 488 |
+
|
| 489 |
+
qkv = self.qkv_proj(hidden_states)
|
| 490 |
+
query_pos = self.num_heads * self.head_dim
|
| 491 |
+
query_states = qkv[..., :query_pos]
|
| 492 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
| 493 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
| 494 |
+
|
| 495 |
+
# Flash attention requires the input to have the shape
|
| 496 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 497 |
+
# therefore we just need to keep the original shape
|
| 498 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 499 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 500 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 501 |
+
|
| 502 |
+
kv_seq_len = key_states.shape[-2]
|
| 503 |
+
if past_key_value is not None:
|
| 504 |
+
if self.layer_idx is None:
|
| 505 |
+
raise ValueError(
|
| 506 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 507 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 508 |
+
"with a layer index."
|
| 509 |
+
)
|
| 510 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 511 |
+
|
| 512 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
| 513 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
| 514 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
|
| 515 |
+
|
| 516 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 517 |
+
|
| 518 |
+
use_sliding_windows = (
|
| 519 |
+
_flash_supports_window_size
|
| 520 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 521 |
+
and kv_seq_len > self.config.sliding_window
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
if past_key_value is not None:
|
| 525 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
| 526 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| 527 |
+
if (
|
| 528 |
+
getattr(self.config, "sliding_window", None) is not None
|
| 529 |
+
and kv_seq_len > self.config.sliding_window
|
| 530 |
+
and cache_has_contents
|
| 531 |
+
):
|
| 532 |
+
slicing_tokens = 1 - self.config.sliding_window
|
| 533 |
+
|
| 534 |
+
past_key = past_key_value[self.layer_idx][0]
|
| 535 |
+
past_value = past_key_value[self.layer_idx][1]
|
| 536 |
+
|
| 537 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| 538 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
| 539 |
+
|
| 540 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
| 541 |
+
raise ValueError(
|
| 542 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| 543 |
+
f" {past_key.shape}"
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
if attention_mask is not None:
|
| 547 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
| 548 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
| 549 |
+
|
| 550 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 551 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 552 |
+
|
| 553 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 554 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 555 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 556 |
+
|
| 557 |
+
attn_dropout = self.attention_dropout if self.training else 0.0
|
| 558 |
+
|
| 559 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 560 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 561 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
| 562 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
| 563 |
+
# in fp32.
|
| 564 |
+
|
| 565 |
+
if query_states.dtype == torch.float32:
|
| 566 |
+
if torch.is_autocast_enabled():
|
| 567 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 568 |
+
# Handle the case where the model is quantized
|
| 569 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 570 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 571 |
+
else:
|
| 572 |
+
target_dtype = self.qkv_proj.weight.dtype
|
| 573 |
+
|
| 574 |
+
logger.warning_once(
|
| 575 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 576 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 577 |
+
f" {target_dtype}."
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
query_states = query_states.to(target_dtype)
|
| 581 |
+
key_states = key_states.to(target_dtype)
|
| 582 |
+
value_states = value_states.to(target_dtype)
|
| 583 |
+
|
| 584 |
+
# Reashape to the expected shape for Flash Attention
|
| 585 |
+
query_states = query_states.transpose(1, 2)
|
| 586 |
+
key_states = key_states.transpose(1, 2)
|
| 587 |
+
value_states = value_states.transpose(1, 2)
|
| 588 |
+
|
| 589 |
+
attn_output = self._flash_attention_forward(
|
| 590 |
+
query_states,
|
| 591 |
+
key_states,
|
| 592 |
+
value_states,
|
| 593 |
+
attention_mask,
|
| 594 |
+
q_len,
|
| 595 |
+
dropout=attn_dropout,
|
| 596 |
+
use_sliding_windows=use_sliding_windows,
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 600 |
+
attn_output = self.o_proj(attn_output)
|
| 601 |
+
|
| 602 |
+
if not output_attentions:
|
| 603 |
+
attn_weights = None
|
| 604 |
+
|
| 605 |
+
return attn_output, attn_weights, past_key_value
|
| 606 |
+
|
| 607 |
+
def forward_torch(
|
| 608 |
+
self,
|
| 609 |
+
hidden_states: torch.Tensor,
|
| 610 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 611 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 612 |
+
past_key_value: Optional[Any] = None,
|
| 613 |
+
output_attentions: bool = False,
|
| 614 |
+
use_cache: bool = False,
|
| 615 |
+
**kwargs,
|
| 616 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 617 |
+
if "padding_mask" in kwargs:
|
| 618 |
+
warnings.warn(
|
| 619 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
bsz, q_len, _ = hidden_states.size()
|
| 623 |
+
inspect = {}
|
| 624 |
+
|
| 625 |
+
# phi3 has different way of computing qkv
|
| 626 |
+
# query_states = self.q_proj(hidden_states)
|
| 627 |
+
# key_states = self.k_proj(hidden_states)
|
| 628 |
+
# value_states = self.v_proj(hidden_states)
|
| 629 |
+
|
| 630 |
+
qkv = self.qkv_proj(hidden_states)
|
| 631 |
+
query_pos = self.num_heads * self.head_dim
|
| 632 |
+
query_states = qkv[..., : query_pos]
|
| 633 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
| 634 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
| 635 |
+
|
| 636 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 637 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 638 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 639 |
+
|
| 640 |
+
kv_seq_len = key_states.shape[-2]
|
| 641 |
+
# print(past_key_value)
|
| 642 |
+
if past_key_value is not None:
|
| 643 |
+
if self.layer_idx is None:
|
| 644 |
+
raise ValueError(
|
| 645 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 646 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 647 |
+
"with a layer index."
|
| 648 |
+
)
|
| 649 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 650 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len, position_ids=position_ids)
|
| 651 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 652 |
+
|
| 653 |
+
if past_key_value is not None:
|
| 654 |
+
if(use_cache):
|
| 655 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 656 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 657 |
+
else:
|
| 658 |
+
key_states = torch.cat([past_key_value.key_cache[self.layer_idx], key_states], dim=-2)
|
| 659 |
+
value_states = torch.cat([past_key_value.value_cache[self.layer_idx], value_states], dim=-2)
|
| 660 |
+
|
| 661 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 662 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 663 |
+
|
| 664 |
+
# print(query_states.size())
|
| 665 |
+
# print(key_states.size())
|
| 666 |
+
inspect["query"] = query_states
|
| 667 |
+
inspect["key"] = key_states
|
| 668 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 669 |
+
###write our mask here
|
| 670 |
+
#print(attn_weights.size())#[batch_size, head, q, c]
|
| 671 |
+
|
| 672 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 673 |
+
raise ValueError(
|
| 674 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 675 |
+
f" {attn_weights.size()}"
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
if attention_mask is not None:
|
| 679 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 680 |
+
raise ValueError(
|
| 681 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 682 |
+
)
|
| 683 |
+
attn_weights = attn_weights + attention_mask
|
| 684 |
+
if 'block_list' in kwargs:
|
| 685 |
+
for h in kwargs['block_list']:
|
| 686 |
+
if self.layer_idx==h[0]:
|
| 687 |
+
'''
|
| 688 |
+
if h[1]==0:
|
| 689 |
+
target_head = 1
|
| 690 |
+
elif h[1]==31:
|
| 691 |
+
target_head = 30
|
| 692 |
+
else:
|
| 693 |
+
target_head = h[1] - 1
|
| 694 |
+
|
| 695 |
+
attn_weights[:, h[1], :, :] = attn_weights[:, target_head, :, :]
|
| 696 |
+
'''
|
| 697 |
+
attn_weights[:, h[1], :, :] = 0
|
| 698 |
+
# upcast attention to fp32
|
| 699 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 700 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 701 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 702 |
+
inspect["attn_output_before_o_proj"] = attn_output
|
| 703 |
+
|
| 704 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 705 |
+
raise ValueError(
|
| 706 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 707 |
+
f" {attn_output.size()}"
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 711 |
+
|
| 712 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 713 |
+
|
| 714 |
+
attn_output = self.o_proj(attn_output)
|
| 715 |
+
|
| 716 |
+
if not output_attentions:
|
| 717 |
+
attn_weights = None
|
| 718 |
+
|
| 719 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 720 |
+
|
| 721 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
|
| 722 |
+
def _flash_attention_forward(
|
| 723 |
+
self,
|
| 724 |
+
query_states,
|
| 725 |
+
key_states,
|
| 726 |
+
value_states,
|
| 727 |
+
attention_mask,
|
| 728 |
+
query_length,
|
| 729 |
+
dropout=0.0,
|
| 730 |
+
softmax_scale=None,
|
| 731 |
+
use_sliding_windows=False,
|
| 732 |
+
):
|
| 733 |
+
"""
|
| 734 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 735 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 736 |
+
|
| 737 |
+
Args:
|
| 738 |
+
query_states (`torch.Tensor`):
|
| 739 |
+
Input query states to be passed to Flash Attention API
|
| 740 |
+
key_states (`torch.Tensor`):
|
| 741 |
+
Input key states to be passed to Flash Attention API
|
| 742 |
+
value_states (`torch.Tensor`):
|
| 743 |
+
Input value states to be passed to Flash Attention API
|
| 744 |
+
attention_mask (`torch.Tensor`):
|
| 745 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 746 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 747 |
+
dropout (`float`):
|
| 748 |
+
Attention dropout
|
| 749 |
+
softmax_scale (`float`, *optional*):
|
| 750 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 751 |
+
use_sliding_windows (`bool`, *optional*):
|
| 752 |
+
Whether to activate sliding window attention.
|
| 753 |
+
"""
|
| 754 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 755 |
+
causal = self.is_causal
|
| 756 |
+
else:
|
| 757 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 758 |
+
causal = self.is_causal and query_length != 1
|
| 759 |
+
|
| 760 |
+
# Contains at least one padding token in the sequence
|
| 761 |
+
if attention_mask is not None:
|
| 762 |
+
batch_size = query_states.shape[0]
|
| 763 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 764 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 765 |
+
)
|
| 766 |
+
|
| 767 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 768 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 769 |
+
|
| 770 |
+
if not use_sliding_windows:
|
| 771 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 772 |
+
query_states,
|
| 773 |
+
key_states,
|
| 774 |
+
value_states,
|
| 775 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 776 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 777 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 778 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 779 |
+
dropout_p=dropout,
|
| 780 |
+
softmax_scale=softmax_scale,
|
| 781 |
+
causal=causal,
|
| 782 |
+
)
|
| 783 |
+
else:
|
| 784 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 785 |
+
query_states,
|
| 786 |
+
key_states,
|
| 787 |
+
value_states,
|
| 788 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 789 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 790 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 791 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 792 |
+
dropout_p=dropout,
|
| 793 |
+
softmax_scale=softmax_scale,
|
| 794 |
+
causal=causal,
|
| 795 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 796 |
+
)
|
| 797 |
+
|
| 798 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 799 |
+
else:
|
| 800 |
+
if not use_sliding_windows:
|
| 801 |
+
attn_output = flash_attn_func(
|
| 802 |
+
query_states,
|
| 803 |
+
key_states,
|
| 804 |
+
value_states,
|
| 805 |
+
dropout,
|
| 806 |
+
softmax_scale=softmax_scale,
|
| 807 |
+
causal=causal,
|
| 808 |
+
)
|
| 809 |
+
else:
|
| 810 |
+
attn_output = flash_attn_func(
|
| 811 |
+
query_states,
|
| 812 |
+
key_states,
|
| 813 |
+
value_states,
|
| 814 |
+
dropout,
|
| 815 |
+
softmax_scale=softmax_scale,
|
| 816 |
+
causal=causal,
|
| 817 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
return attn_output
|
| 821 |
+
|
| 822 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
| 823 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 824 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
| 825 |
+
|
| 826 |
+
# On the first iteration we need to properly re-create the padding mask
|
| 827 |
+
# by slicing it on the proper place
|
| 828 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
| 829 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
| 830 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
| 831 |
+
|
| 832 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 833 |
+
|
| 834 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 835 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 836 |
+
|
| 837 |
+
if query_length == kv_seq_len:
|
| 838 |
+
query_layer = index_first_axis(
|
| 839 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
| 840 |
+
)
|
| 841 |
+
cu_seqlens_q = cu_seqlens_k
|
| 842 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 843 |
+
indices_q = indices_k
|
| 844 |
+
elif query_length == 1:
|
| 845 |
+
max_seqlen_in_batch_q = 1
|
| 846 |
+
cu_seqlens_q = torch.arange(
|
| 847 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 848 |
+
) # There is a memcpy here, that is very bad.
|
| 849 |
+
indices_q = cu_seqlens_q[:-1]
|
| 850 |
+
query_layer = query_layer.squeeze(1)
|
| 851 |
+
else:
|
| 852 |
+
# The -q_len: slice assumes left padding.
|
| 853 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 854 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 855 |
+
|
| 856 |
+
return (
|
| 857 |
+
query_layer,
|
| 858 |
+
key_layer,
|
| 859 |
+
value_layer,
|
| 860 |
+
indices_q,
|
| 861 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 862 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
|
| 867 |
+
# TODO @Arthur no longer copied from LLama after static cache
|
| 868 |
+
class Phi3SdpaAttention(Phi3Attention):
|
| 869 |
+
"""
|
| 870 |
+
Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 871 |
+
`Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 872 |
+
SDPA API.
|
| 873 |
+
"""
|
| 874 |
+
|
| 875 |
+
# Adapted from Phi3Attention.forward
|
| 876 |
+
def forward(
|
| 877 |
+
self,
|
| 878 |
+
hidden_states: torch.Tensor,
|
| 879 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 880 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 881 |
+
past_key_value: Optional[Cache] = None,
|
| 882 |
+
output_attentions: bool = False,
|
| 883 |
+
use_cache: bool = False,
|
| 884 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 885 |
+
if output_attentions:
|
| 886 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 887 |
+
logger.warning_once(
|
| 888 |
+
"Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 889 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 890 |
+
)
|
| 891 |
+
return super().forward(
|
| 892 |
+
hidden_states=hidden_states,
|
| 893 |
+
attention_mask=attention_mask,
|
| 894 |
+
position_ids=position_ids,
|
| 895 |
+
past_key_value=past_key_value,
|
| 896 |
+
output_attentions=output_attentions,
|
| 897 |
+
use_cache=use_cache,
|
| 898 |
+
)
|
| 899 |
+
|
| 900 |
+
bsz, q_len, _ = hidden_states.size()
|
| 901 |
+
|
| 902 |
+
qkv = self.qkv_proj(hidden_states)
|
| 903 |
+
query_pos = self.num_heads * self.head_dim
|
| 904 |
+
query_states = qkv[..., :query_pos]
|
| 905 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
| 906 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
| 907 |
+
|
| 908 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 909 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 910 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 911 |
+
|
| 912 |
+
kv_seq_len = key_states.shape[-2]
|
| 913 |
+
if past_key_value is not None:
|
| 914 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 915 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
| 916 |
+
|
| 917 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 918 |
+
|
| 919 |
+
if past_key_value is not None:
|
| 920 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 921 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 922 |
+
|
| 923 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 924 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 925 |
+
|
| 926 |
+
if attention_mask is not None:
|
| 927 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 928 |
+
raise ValueError(
|
| 929 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 930 |
+
)
|
| 931 |
+
|
| 932 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 933 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 934 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 935 |
+
query_states = query_states.contiguous()
|
| 936 |
+
key_states = key_states.contiguous()
|
| 937 |
+
value_states = value_states.contiguous()
|
| 938 |
+
|
| 939 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 940 |
+
query_states,
|
| 941 |
+
key_states,
|
| 942 |
+
value_states,
|
| 943 |
+
attn_mask=attention_mask,
|
| 944 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 945 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 946 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 947 |
+
)
|
| 948 |
+
|
| 949 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 950 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 951 |
+
|
| 952 |
+
attn_output = self.o_proj(attn_output)
|
| 953 |
+
|
| 954 |
+
return attn_output, None, past_key_value
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
PHI3_ATTENTION_CLASSES = {
|
| 958 |
+
"eager": Phi3Attention,
|
| 959 |
+
"flash_attention_2": Phi3FlashAttention2,
|
| 960 |
+
"sdpa": Phi3SdpaAttention,
|
| 961 |
+
}
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
class Phi3DecoderLayer(nn.Module):
|
| 965 |
+
def __init__(self, config: Phi3Config, layer_idx: int):
|
| 966 |
+
super().__init__()
|
| 967 |
+
|
| 968 |
+
self.config = config
|
| 969 |
+
self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
| 970 |
+
|
| 971 |
+
self.mlp = Phi3MLP(config)
|
| 972 |
+
self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 973 |
+
|
| 974 |
+
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
|
| 975 |
+
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
|
| 976 |
+
self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 977 |
+
|
| 978 |
+
def forward(
|
| 979 |
+
self,
|
| 980 |
+
hidden_states: torch.Tensor,
|
| 981 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 982 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 983 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 984 |
+
output_attentions: Optional[bool] = False,
|
| 985 |
+
use_cache: Optional[bool] = False,
|
| 986 |
+
|
| 987 |
+
attn_mode: str = "flash",
|
| 988 |
+
**kwargs,
|
| 989 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 990 |
+
"""
|
| 991 |
+
Args:
|
| 992 |
+
hidden_states (`torch.FloatTensor`):
|
| 993 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 994 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 995 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 996 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
| 997 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
|
| 998 |
+
`[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
| 999 |
+
output_attentions (`bool`, *optional*):
|
| 1000 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 1001 |
+
returned tensors for more detail.
|
| 1002 |
+
use_cache (`bool`, *optional*):
|
| 1003 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 1004 |
+
(see `past_key_values`).
|
| 1005 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 1006 |
+
"""
|
| 1007 |
+
|
| 1008 |
+
residual = hidden_states
|
| 1009 |
+
|
| 1010 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 1011 |
+
|
| 1012 |
+
# Self Attention
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
if (attn_mode == "flash"):
|
| 1016 |
+
attn_outputs, self_attn_weights, present_key_value = self.self_attn(
|
| 1017 |
+
# hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 1018 |
+
hidden_states=hidden_states,
|
| 1019 |
+
attention_mask=attention_mask,
|
| 1020 |
+
position_ids=position_ids,
|
| 1021 |
+
past_key_value=past_key_value,
|
| 1022 |
+
output_attentions=output_attentions,
|
| 1023 |
+
use_cache=use_cache,
|
| 1024 |
+
)
|
| 1025 |
+
else:
|
| 1026 |
+
attn_outputs, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 1027 |
+
# hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 1028 |
+
hidden_states=hidden_states,
|
| 1029 |
+
attention_mask=attention_mask,
|
| 1030 |
+
position_ids=position_ids,
|
| 1031 |
+
past_key_value=past_key_value,
|
| 1032 |
+
output_attentions=output_attentions,
|
| 1033 |
+
use_cache=use_cache,
|
| 1034 |
+
)
|
| 1035 |
+
|
| 1036 |
+
# attn_outputs, self_attn_weights, present_key_value = self.self_attn(
|
| 1037 |
+
# hidden_states=hidden_states,
|
| 1038 |
+
# attention_mask=attention_mask,
|
| 1039 |
+
# position_ids=position_ids,
|
| 1040 |
+
# past_key_value=past_key_value,
|
| 1041 |
+
# output_attentions=output_attentions,
|
| 1042 |
+
# use_cache=use_cache,
|
| 1043 |
+
# )
|
| 1044 |
+
|
| 1045 |
+
hidden_states = residual + self.resid_attn_dropout(attn_outputs)
|
| 1046 |
+
|
| 1047 |
+
residual = hidden_states
|
| 1048 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 1049 |
+
hidden_states = self.mlp(hidden_states)
|
| 1050 |
+
hidden_states = residual + self.resid_mlp_dropout(hidden_states)
|
| 1051 |
+
|
| 1052 |
+
outputs = (hidden_states,)
|
| 1053 |
+
|
| 1054 |
+
if output_attentions:
|
| 1055 |
+
outputs += (self_attn_weights,)
|
| 1056 |
+
|
| 1057 |
+
if use_cache:
|
| 1058 |
+
outputs += (present_key_value,)
|
| 1059 |
+
|
| 1060 |
+
return outputs
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
PHI3_START_DOCSTRING = r"""
|
| 1064 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1065 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1066 |
+
etc.)
|
| 1067 |
+
|
| 1068 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 1069 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 1070 |
+
and behavior.
|
| 1071 |
+
|
| 1072 |
+
Parameters:
|
| 1073 |
+
config ([`Phi3Config`]):
|
| 1074 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1075 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 1076 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1077 |
+
"""
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
@add_start_docstrings(
|
| 1081 |
+
"The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
|
| 1082 |
+
PHI3_START_DOCSTRING,
|
| 1083 |
+
)
|
| 1084 |
+
class Phi3PreTrainedModel(PreTrainedModel):
|
| 1085 |
+
config_class = Phi3Config
|
| 1086 |
+
base_model_prefix = "model"
|
| 1087 |
+
supports_gradient_checkpointing = True
|
| 1088 |
+
_no_split_modules = ["Phi3DecoderLayer"]
|
| 1089 |
+
_skip_keys_device_placement = "past_key_values"
|
| 1090 |
+
_supports_flash_attn_2 = True
|
| 1091 |
+
_supports_sdpa = False
|
| 1092 |
+
_supports_cache_class = True
|
| 1093 |
+
|
| 1094 |
+
_version = "0.0.5"
|
| 1095 |
+
|
| 1096 |
+
def _init_weights(self, module):
|
| 1097 |
+
std = self.config.initializer_range
|
| 1098 |
+
if isinstance(module, nn.Linear):
|
| 1099 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1100 |
+
if module.bias is not None:
|
| 1101 |
+
module.bias.data.zero_()
|
| 1102 |
+
elif isinstance(module, nn.Embedding):
|
| 1103 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 1104 |
+
if module.padding_idx is not None:
|
| 1105 |
+
module.weight.data[module.padding_idx].zero_()
|
| 1106 |
+
|
| 1107 |
+
|
| 1108 |
+
PHI3_INPUTS_DOCSTRING = r"""
|
| 1109 |
+
Args:
|
| 1110 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1111 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 1112 |
+
it.
|
| 1113 |
+
|
| 1114 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1115 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1116 |
+
|
| 1117 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1118 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1119 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1120 |
+
|
| 1121 |
+
- 1 for tokens that are **not masked**,
|
| 1122 |
+
- 0 for tokens that are **masked**.
|
| 1123 |
+
|
| 1124 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1125 |
+
|
| 1126 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1127 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1128 |
+
|
| 1129 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
| 1130 |
+
`past_key_values`).
|
| 1131 |
+
|
| 1132 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 1133 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 1134 |
+
information on the default strategy.
|
| 1135 |
+
|
| 1136 |
+
- 1 indicates the head is **not masked**,
|
| 1137 |
+
- 0 indicates the head is **masked**.
|
| 1138 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1139 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1140 |
+
config.n_positions - 1]`.
|
| 1141 |
+
|
| 1142 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1143 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 1144 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1145 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 1146 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 1147 |
+
|
| 1148 |
+
Two formats are allowed:
|
| 1149 |
+
- a [`~cache_utils.Cache`] instance;
|
| 1150 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 1151 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 1152 |
+
cache format.
|
| 1153 |
+
|
| 1154 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 1155 |
+
legacy cache format will be returned.
|
| 1156 |
+
|
| 1157 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 1158 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 1159 |
+
of shape `(batch_size, sequence_length)`.
|
| 1160 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1161 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1162 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1163 |
+
model's internal embedding lookup matrix.
|
| 1164 |
+
use_cache (`bool`, *optional*):
|
| 1165 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1166 |
+
`past_key_values`).
|
| 1167 |
+
output_attentions (`bool`, *optional*):
|
| 1168 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1169 |
+
tensors for more detail.
|
| 1170 |
+
output_hidden_states (`bool`, *optional*):
|
| 1171 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1172 |
+
more detail.
|
| 1173 |
+
return_dict (`bool`, *optional*):
|
| 1174 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1175 |
+
"""
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
@add_start_docstrings(
|
| 1179 |
+
"The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
|
| 1180 |
+
PHI3_START_DOCSTRING,
|
| 1181 |
+
)
|
| 1182 |
+
class Phi3Model(Phi3PreTrainedModel):
|
| 1183 |
+
"""
|
| 1184 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
|
| 1185 |
+
|
| 1186 |
+
Args:
|
| 1187 |
+
config: Phi3Config
|
| 1188 |
+
"""
|
| 1189 |
+
|
| 1190 |
+
def __init__(self, config: Phi3Config):
|
| 1191 |
+
super().__init__(config)
|
| 1192 |
+
self.padding_idx = config.pad_token_id
|
| 1193 |
+
self.vocab_size = config.vocab_size
|
| 1194 |
+
|
| 1195 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1196 |
+
self.embed_dropout = nn.Dropout(config.embd_pdrop)
|
| 1197 |
+
self.layers = nn.ModuleList(
|
| 1198 |
+
[Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1199 |
+
)
|
| 1200 |
+
self._attn_implementation = config._attn_implementation
|
| 1201 |
+
self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1202 |
+
|
| 1203 |
+
self.gradient_checkpointing = False
|
| 1204 |
+
# Initialize weights and apply final processing
|
| 1205 |
+
self.post_init()
|
| 1206 |
+
|
| 1207 |
+
def get_input_embeddings(self):
|
| 1208 |
+
return self.embed_tokens
|
| 1209 |
+
|
| 1210 |
+
def set_input_embeddings(self, value):
|
| 1211 |
+
self.embed_tokens = value
|
| 1212 |
+
|
| 1213 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
| 1214 |
+
def forward(
|
| 1215 |
+
self,
|
| 1216 |
+
input_ids: torch.LongTensor = None,
|
| 1217 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1218 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1219 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1220 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1221 |
+
use_cache: Optional[bool] = None,
|
| 1222 |
+
attn_mode: str = "flash",
|
| 1223 |
+
output_attentions: Optional[bool] = None,
|
| 1224 |
+
output_hidden_states: Optional[bool] = None,
|
| 1225 |
+
return_dict: Optional[bool] = None,
|
| 1226 |
+
block_list: list = None,
|
| 1227 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 1228 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1229 |
+
output_hidden_states = (
|
| 1230 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1231 |
+
)
|
| 1232 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1233 |
+
|
| 1234 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1235 |
+
|
| 1236 |
+
# retrieve input_ids and inputs_embeds
|
| 1237 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1238 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 1239 |
+
elif input_ids is not None:
|
| 1240 |
+
batch_size, seq_length = input_ids.shape[:2]
|
| 1241 |
+
elif inputs_embeds is not None:
|
| 1242 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
| 1243 |
+
else:
|
| 1244 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 1245 |
+
|
| 1246 |
+
past_key_values_length = 0
|
| 1247 |
+
|
| 1248 |
+
if self.gradient_checkpointing and self.training:
|
| 1249 |
+
if use_cache:
|
| 1250 |
+
logger.warning_once(
|
| 1251 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1252 |
+
)
|
| 1253 |
+
use_cache = False
|
| 1254 |
+
|
| 1255 |
+
if use_cache:
|
| 1256 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1257 |
+
if use_legacy_cache:
|
| 1258 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1259 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1260 |
+
|
| 1261 |
+
if position_ids is None:
|
| 1262 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1263 |
+
position_ids = torch.arange(
|
| 1264 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1265 |
+
)
|
| 1266 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1267 |
+
else:
|
| 1268 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 1269 |
+
|
| 1270 |
+
if inputs_embeds is None:
|
| 1271 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1272 |
+
|
| 1273 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
| 1274 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
| 1275 |
+
if is_padding_right:
|
| 1276 |
+
raise ValueError(
|
| 1277 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1278 |
+
" this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
|
| 1279 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1280 |
+
)
|
| 1281 |
+
|
| 1282 |
+
if self._attn_implementation == "flash_attention_2":
|
| 1283 |
+
# 2d mask is passed through the layers
|
| 1284 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1285 |
+
else:
|
| 1286 |
+
# 4d mask is passed through the layers
|
| 1287 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1288 |
+
attention_mask,
|
| 1289 |
+
(batch_size, seq_length),
|
| 1290 |
+
inputs_embeds,
|
| 1291 |
+
past_key_values_length,
|
| 1292 |
+
sliding_window=self.config.sliding_window,
|
| 1293 |
+
)
|
| 1294 |
+
|
| 1295 |
+
hidden_states = inputs_embeds
|
| 1296 |
+
|
| 1297 |
+
# decoder layers
|
| 1298 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1299 |
+
all_self_attns = () if output_attentions else None
|
| 1300 |
+
next_decoder_cache = None
|
| 1301 |
+
|
| 1302 |
+
if block_list:
|
| 1303 |
+
kwargs = {"block_list": block_list}
|
| 1304 |
+
else:
|
| 1305 |
+
kwargs={}
|
| 1306 |
+
#print(blocklist)
|
| 1307 |
+
|
| 1308 |
+
for decoder_layer in self.layers:
|
| 1309 |
+
if output_hidden_states:
|
| 1310 |
+
all_hidden_states += (hidden_states,)
|
| 1311 |
+
|
| 1312 |
+
if self.gradient_checkpointing and self.training:
|
| 1313 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1314 |
+
decoder_layer.__call__,
|
| 1315 |
+
hidden_states,
|
| 1316 |
+
attention_mask,
|
| 1317 |
+
position_ids,
|
| 1318 |
+
past_key_values,
|
| 1319 |
+
output_attentions,
|
| 1320 |
+
use_cache,
|
| 1321 |
+
)
|
| 1322 |
+
else:
|
| 1323 |
+
layer_outputs = decoder_layer(
|
| 1324 |
+
hidden_states,
|
| 1325 |
+
attention_mask=attention_mask,
|
| 1326 |
+
position_ids=position_ids,
|
| 1327 |
+
past_key_value=past_key_values,
|
| 1328 |
+
output_attentions=output_attentions,
|
| 1329 |
+
use_cache=use_cache,
|
| 1330 |
+
attn_mode=attn_mode,
|
| 1331 |
+
**kwargs,
|
| 1332 |
+
)
|
| 1333 |
+
|
| 1334 |
+
hidden_states = layer_outputs[0]
|
| 1335 |
+
|
| 1336 |
+
if use_cache:
|
| 1337 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1338 |
+
|
| 1339 |
+
if output_attentions:
|
| 1340 |
+
all_self_attns += (layer_outputs[1],)
|
| 1341 |
+
|
| 1342 |
+
hidden_states = self.norm(hidden_states)
|
| 1343 |
+
|
| 1344 |
+
# add hidden states from the last decoder layer
|
| 1345 |
+
if output_hidden_states:
|
| 1346 |
+
all_hidden_states += (hidden_states,)
|
| 1347 |
+
|
| 1348 |
+
next_cache = None
|
| 1349 |
+
if use_cache:
|
| 1350 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1351 |
+
if not return_dict:
|
| 1352 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1353 |
+
return BaseModelOutputWithPast(
|
| 1354 |
+
last_hidden_state=hidden_states,
|
| 1355 |
+
past_key_values=next_cache,
|
| 1356 |
+
hidden_states=all_hidden_states,
|
| 1357 |
+
attentions=all_self_attns,
|
| 1358 |
+
)
|
| 1359 |
+
|
| 1360 |
+
|
| 1361 |
+
class Phi3ForCausalLM(Phi3PreTrainedModel):
|
| 1362 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1363 |
+
|
| 1364 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
|
| 1365 |
+
def __init__(self, config):
|
| 1366 |
+
super().__init__(config)
|
| 1367 |
+
self.model = Phi3Model(config)
|
| 1368 |
+
self.vocab_size = config.vocab_size
|
| 1369 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1370 |
+
|
| 1371 |
+
# Initialize weights and apply final processing
|
| 1372 |
+
self.post_init()
|
| 1373 |
+
|
| 1374 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
|
| 1375 |
+
def get_input_embeddings(self):
|
| 1376 |
+
return self.model.embed_tokens
|
| 1377 |
+
|
| 1378 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
|
| 1379 |
+
def set_input_embeddings(self, value):
|
| 1380 |
+
self.model.embed_tokens = value
|
| 1381 |
+
|
| 1382 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
|
| 1383 |
+
def get_output_embeddings(self):
|
| 1384 |
+
return self.lm_head
|
| 1385 |
+
|
| 1386 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
|
| 1387 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1388 |
+
self.lm_head = new_embeddings
|
| 1389 |
+
|
| 1390 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
|
| 1391 |
+
def set_decoder(self, decoder):
|
| 1392 |
+
self.model = decoder
|
| 1393 |
+
|
| 1394 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
|
| 1395 |
+
def get_decoder(self):
|
| 1396 |
+
return self.model
|
| 1397 |
+
|
| 1398 |
+
# Ignore copy
|
| 1399 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
| 1400 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1401 |
+
def forward(
|
| 1402 |
+
self,
|
| 1403 |
+
input_ids: torch.LongTensor = None,
|
| 1404 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1405 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1406 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1407 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1408 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1409 |
+
use_cache: Optional[bool] = None,
|
| 1410 |
+
output_attentions: Optional[bool] = None,
|
| 1411 |
+
output_hidden_states: Optional[bool] = None,
|
| 1412 |
+
return_dict: Optional[bool] = None,
|
| 1413 |
+
attn_mode: str = "flash",
|
| 1414 |
+
block_list: list = None,
|
| 1415 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1416 |
+
r"""
|
| 1417 |
+
Args:
|
| 1418 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1419 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1420 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1421 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1422 |
+
|
| 1423 |
+
Returns:
|
| 1424 |
+
|
| 1425 |
+
Example:
|
| 1426 |
+
|
| 1427 |
+
```python
|
| 1428 |
+
>>> from transformers import AutoTokenizer, Phi3ForCausalLM
|
| 1429 |
+
|
| 1430 |
+
>>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
| 1431 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
| 1432 |
+
|
| 1433 |
+
>>> prompt = "This is an example script ."
|
| 1434 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1435 |
+
|
| 1436 |
+
>>> # Generate
|
| 1437 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1438 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1439 |
+
'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
|
| 1440 |
+
```"""
|
| 1441 |
+
|
| 1442 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1443 |
+
output_hidden_states = (
|
| 1444 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1445 |
+
)
|
| 1446 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1447 |
+
|
| 1448 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1449 |
+
outputs = self.model(
|
| 1450 |
+
input_ids=input_ids,
|
| 1451 |
+
attention_mask=attention_mask,
|
| 1452 |
+
position_ids=position_ids,
|
| 1453 |
+
past_key_values=past_key_values,
|
| 1454 |
+
inputs_embeds=inputs_embeds,
|
| 1455 |
+
use_cache=use_cache,
|
| 1456 |
+
output_attentions=output_attentions,
|
| 1457 |
+
output_hidden_states=output_hidden_states,
|
| 1458 |
+
return_dict=return_dict,
|
| 1459 |
+
|
| 1460 |
+
attn_mode=attn_mode,
|
| 1461 |
+
block_list=block_list,
|
| 1462 |
+
)
|
| 1463 |
+
|
| 1464 |
+
hidden_states = outputs[0]
|
| 1465 |
+
logits = self.lm_head(hidden_states)
|
| 1466 |
+
logits = logits.float()
|
| 1467 |
+
|
| 1468 |
+
loss = None
|
| 1469 |
+
if labels is not None:
|
| 1470 |
+
# Shift so that tokens < n predict n
|
| 1471 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1472 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1473 |
+
# Flatten the tokens
|
| 1474 |
+
loss_fct = CrossEntropyLoss()
|
| 1475 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1476 |
+
shift_labels = shift_labels.view(-1)
|
| 1477 |
+
# Enable model parallelism
|
| 1478 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1479 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1480 |
+
|
| 1481 |
+
if not return_dict:
|
| 1482 |
+
output = (logits,) + outputs[1:]
|
| 1483 |
+
return (loss,) + output if loss is not None else output
|
| 1484 |
+
|
| 1485 |
+
return CausalLMOutputWithPast(
|
| 1486 |
+
loss=loss,
|
| 1487 |
+
logits=logits,
|
| 1488 |
+
past_key_values=outputs.past_key_values,
|
| 1489 |
+
hidden_states=outputs.hidden_states,
|
| 1490 |
+
attentions=outputs.attentions,
|
| 1491 |
+
)
|
| 1492 |
+
|
| 1493 |
+
# Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
|
| 1494 |
+
def prepare_inputs_for_generation(
|
| 1495 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1496 |
+
):
|
| 1497 |
+
if past_key_values is not None:
|
| 1498 |
+
if isinstance(past_key_values, Cache):
|
| 1499 |
+
cache_length = past_key_values.get_seq_length()
|
| 1500 |
+
past_length = past_key_values.seen_tokens
|
| 1501 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1502 |
+
else:
|
| 1503 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1504 |
+
max_cache_length = None
|
| 1505 |
+
|
| 1506 |
+
# Keep only the unprocessed tokens:
|
| 1507 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1508 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
| 1509 |
+
# input)
|
| 1510 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1511 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1512 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1513 |
+
# input_ids based on the past_length.
|
| 1514 |
+
elif past_length < input_ids.shape[1]:
|
| 1515 |
+
input_ids = input_ids[:, past_length:]
|
| 1516 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1517 |
+
|
| 1518 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1519 |
+
if (
|
| 1520 |
+
max_cache_length is not None
|
| 1521 |
+
and attention_mask is not None
|
| 1522 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1523 |
+
):
|
| 1524 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1525 |
+
|
| 1526 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1527 |
+
if attention_mask is not None and position_ids is None:
|
| 1528 |
+
# create position_ids on the fly for batch generation
|
| 1529 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1530 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1531 |
+
if past_key_values:
|
| 1532 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1533 |
+
|
| 1534 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1535 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1536 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1537 |
+
else:
|
| 1538 |
+
model_inputs = {"input_ids": input_ids}
|
| 1539 |
+
|
| 1540 |
+
model_inputs.update(
|
| 1541 |
+
{
|
| 1542 |
+
"position_ids": position_ids,
|
| 1543 |
+
"past_key_values": past_key_values,
|
| 1544 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1545 |
+
"attention_mask": attention_mask,
|
| 1546 |
+
}
|
| 1547 |
+
)
|
| 1548 |
+
return model_inputs
|
| 1549 |
+
|
| 1550 |
+
@staticmethod
|
| 1551 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
|
| 1552 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1553 |
+
reordered_past = ()
|
| 1554 |
+
for layer_past in past_key_values:
|
| 1555 |
+
reordered_past += (
|
| 1556 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1557 |
+
)
|
| 1558 |
+
return reordered_past
|
| 1559 |
+
|
| 1560 |
+
|
| 1561 |
+
@add_start_docstrings(
|
| 1562 |
+
"""
|
| 1563 |
+
The [`Phi3Model`] with a sequence classification head on top (linear layer).
|
| 1564 |
+
|
| 1565 |
+
[`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1566 |
+
(e.g. GPT-2) do.
|
| 1567 |
+
|
| 1568 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1569 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1570 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1571 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1572 |
+
each row of the batch).
|
| 1573 |
+
""",
|
| 1574 |
+
PHI3_START_DOCSTRING,
|
| 1575 |
+
)
|
| 1576 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
|
| 1577 |
+
class Phi3ForSequenceClassification(Phi3PreTrainedModel):
|
| 1578 |
+
def __init__(self, config):
|
| 1579 |
+
super().__init__(config)
|
| 1580 |
+
self.num_labels = config.num_labels
|
| 1581 |
+
self.model = Phi3Model(config)
|
| 1582 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1583 |
+
|
| 1584 |
+
# Initialize weights and apply final processing
|
| 1585 |
+
self.post_init()
|
| 1586 |
+
|
| 1587 |
+
def get_input_embeddings(self):
|
| 1588 |
+
return self.model.embed_tokens
|
| 1589 |
+
|
| 1590 |
+
def set_input_embeddings(self, value):
|
| 1591 |
+
self.model.embed_tokens = value
|
| 1592 |
+
|
| 1593 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
| 1594 |
+
def forward(
|
| 1595 |
+
self,
|
| 1596 |
+
input_ids: torch.LongTensor = None,
|
| 1597 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1598 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1599 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| 1600 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1601 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1602 |
+
use_cache: Optional[bool] = None,
|
| 1603 |
+
output_attentions: Optional[bool] = None,
|
| 1604 |
+
output_hidden_states: Optional[bool] = None,
|
| 1605 |
+
return_dict: Optional[bool] = None,
|
| 1606 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1607 |
+
r"""
|
| 1608 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1609 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1610 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1611 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1612 |
+
"""
|
| 1613 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1614 |
+
|
| 1615 |
+
model_outputs = self.model(
|
| 1616 |
+
input_ids,
|
| 1617 |
+
attention_mask=attention_mask,
|
| 1618 |
+
position_ids=position_ids,
|
| 1619 |
+
past_key_values=past_key_values,
|
| 1620 |
+
inputs_embeds=inputs_embeds,
|
| 1621 |
+
use_cache=use_cache,
|
| 1622 |
+
output_attentions=output_attentions,
|
| 1623 |
+
output_hidden_states=output_hidden_states,
|
| 1624 |
+
return_dict=return_dict,
|
| 1625 |
+
)
|
| 1626 |
+
hidden_states = model_outputs[0]
|
| 1627 |
+
logits = self.score(hidden_states)
|
| 1628 |
+
|
| 1629 |
+
if input_ids is not None:
|
| 1630 |
+
batch_size = input_ids.shape[0]
|
| 1631 |
+
else:
|
| 1632 |
+
batch_size = inputs_embeds.shape[0]
|
| 1633 |
+
|
| 1634 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1635 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1636 |
+
if self.config.pad_token_id is None:
|
| 1637 |
+
sequence_lengths = -1
|
| 1638 |
+
else:
|
| 1639 |
+
if input_ids is not None:
|
| 1640 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1641 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1642 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1643 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1644 |
+
else:
|
| 1645 |
+
sequence_lengths = -1
|
| 1646 |
+
|
| 1647 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1648 |
+
|
| 1649 |
+
loss = None
|
| 1650 |
+
if labels is not None:
|
| 1651 |
+
labels = labels.to(logits.device)
|
| 1652 |
+
if self.config.problem_type is None:
|
| 1653 |
+
if self.num_labels == 1:
|
| 1654 |
+
self.config.problem_type = "regression"
|
| 1655 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1656 |
+
self.config.problem_type = "single_label_classification"
|
| 1657 |
+
else:
|
| 1658 |
+
self.config.problem_type = "multi_label_classification"
|
| 1659 |
+
|
| 1660 |
+
if self.config.problem_type == "regression":
|
| 1661 |
+
loss_fct = MSELoss()
|
| 1662 |
+
if self.num_labels == 1:
|
| 1663 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1664 |
+
else:
|
| 1665 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1666 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1667 |
+
loss_fct = CrossEntropyLoss()
|
| 1668 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1669 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1670 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1671 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1672 |
+
if not return_dict:
|
| 1673 |
+
output = (pooled_logits,) + model_outputs[1:]
|
| 1674 |
+
return ((loss,) + output) if loss is not None else output
|
| 1675 |
+
|
| 1676 |
+
return SequenceClassifierOutputWithPast(
|
| 1677 |
+
loss=loss,
|
| 1678 |
+
logits=pooled_logits,
|
| 1679 |
+
past_key_values=model_outputs.past_key_values,
|
| 1680 |
+
hidden_states=model_outputs.hidden_states,
|
| 1681 |
+
attentions=model_outputs.attentions,
|
| 1682 |
+
)
|
| 1683 |
+
|
| 1684 |
+
|
| 1685 |
+
@add_start_docstrings(
|
| 1686 |
+
"""
|
| 1687 |
+
[`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
| 1688 |
+
Named-Entity-Recognition (NER) tasks.
|
| 1689 |
+
""",
|
| 1690 |
+
PHI3_START_DOCSTRING,
|
| 1691 |
+
)
|
| 1692 |
+
# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
|
| 1693 |
+
class Phi3ForTokenClassification(Phi3PreTrainedModel):
|
| 1694 |
+
def __init__(self, config: Phi3Config):
|
| 1695 |
+
super().__init__(config)
|
| 1696 |
+
self.num_labels = config.num_labels
|
| 1697 |
+
|
| 1698 |
+
self.model = Phi3Model(config)
|
| 1699 |
+
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
|
| 1700 |
+
classifier_dropout = config.classifier_dropout
|
| 1701 |
+
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
|
| 1702 |
+
classifier_dropout = config.hidden_dropout
|
| 1703 |
+
else:
|
| 1704 |
+
classifier_dropout = 0.1
|
| 1705 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1706 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 1707 |
+
|
| 1708 |
+
# Initialize weights and apply final processing
|
| 1709 |
+
self.post_init()
|
| 1710 |
+
|
| 1711 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
| 1712 |
+
@add_code_sample_docstrings(
|
| 1713 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1714 |
+
output_type=TokenClassifierOutput,
|
| 1715 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1716 |
+
)
|
| 1717 |
+
def forward(
|
| 1718 |
+
self,
|
| 1719 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1720 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
| 1721 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1722 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1723 |
+
labels: Optional[torch.Tensor] = None,
|
| 1724 |
+
use_cache: Optional[bool] = None,
|
| 1725 |
+
output_attentions: Optional[bool] = None,
|
| 1726 |
+
output_hidden_states: Optional[bool] = None,
|
| 1727 |
+
return_dict: Optional[bool] = None,
|
| 1728 |
+
**deprecated_arguments,
|
| 1729 |
+
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
|
| 1730 |
+
r"""
|
| 1731 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1732 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1733 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1734 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1735 |
+
"""
|
| 1736 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1737 |
+
|
| 1738 |
+
model_outputs = self.model(
|
| 1739 |
+
input_ids,
|
| 1740 |
+
past_key_values=past_key_values,
|
| 1741 |
+
attention_mask=attention_mask,
|
| 1742 |
+
inputs_embeds=inputs_embeds,
|
| 1743 |
+
use_cache=use_cache,
|
| 1744 |
+
output_attentions=output_attentions,
|
| 1745 |
+
output_hidden_states=output_hidden_states,
|
| 1746 |
+
return_dict=return_dict,
|
| 1747 |
+
)
|
| 1748 |
+
|
| 1749 |
+
hidden_states = model_outputs[0]
|
| 1750 |
+
hidden_states = self.dropout(hidden_states)
|
| 1751 |
+
logits = self.classifier(hidden_states)
|
| 1752 |
+
|
| 1753 |
+
loss = None
|
| 1754 |
+
if labels is not None:
|
| 1755 |
+
# move labels to correct device to enable model parallelism
|
| 1756 |
+
labels = labels.to(logits.device)
|
| 1757 |
+
batch_size, seq_length = labels.shape
|
| 1758 |
+
loss_fct = CrossEntropyLoss()
|
| 1759 |
+
loss = loss_fct(
|
| 1760 |
+
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
|
| 1761 |
+
)
|
| 1762 |
+
|
| 1763 |
+
if not return_dict:
|
| 1764 |
+
output = (logits,) + model_outputs[2:]
|
| 1765 |
+
return ((loss,) + output) if loss is not None else output
|
| 1766 |
+
|
| 1767 |
+
return TokenClassifierOutput(
|
| 1768 |
+
loss=loss,
|
| 1769 |
+
logits=logits,
|
| 1770 |
+
hidden_states=model_outputs.hidden_states,
|
| 1771 |
+
attentions=model_outputs.attentions,
|
| 1772 |
+
)
|
faiss_attn/source/modeling_qwen2.py
ADDED
|
@@ -0,0 +1,1563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch Qwen2 model."""
|
| 21 |
+
import inspect
|
| 22 |
+
import math
|
| 23 |
+
import warnings
|
| 24 |
+
from typing import List, Optional, Tuple, Union, Any
|
| 25 |
+
import logging
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn.functional as F
|
| 28 |
+
import torch.utils.checkpoint
|
| 29 |
+
from torch import nn
|
| 30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 31 |
+
|
| 32 |
+
from transformers.activations import ACT2FN
|
| 33 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 34 |
+
from transformers.modeling_attn_mask_utils import (
|
| 35 |
+
AttentionMaskConverter,
|
| 36 |
+
_prepare_4d_attention_mask,
|
| 37 |
+
_prepare_4d_causal_attention_mask,
|
| 38 |
+
_prepare_4d_causal_attention_mask_for_sdpa,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
from transformers.modeling_outputs import ModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 42 |
+
#from .configuration_qwen2 import Qwen2Config
|
| 43 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
| 44 |
+
from transformers.utils import (
|
| 45 |
+
add_start_docstrings,
|
| 46 |
+
add_start_docstrings_to_model_forward,
|
| 47 |
+
is_flash_attn_2_available,
|
| 48 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 49 |
+
logging,
|
| 50 |
+
replace_return_docstrings,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 54 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
|
| 55 |
+
from transformers.utils import (
|
| 56 |
+
add_start_docstrings,
|
| 57 |
+
add_start_docstrings_to_model_forward,
|
| 58 |
+
is_flash_attn_2_available,
|
| 59 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 60 |
+
logging,
|
| 61 |
+
replace_return_docstrings,
|
| 62 |
+
)
|
| 63 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
| 64 |
+
from transformers import LlamaConfig
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
if is_flash_attn_2_available():
|
| 68 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| 69 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
| 70 |
+
|
| 71 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
| 72 |
+
logger = logging.get_logger(__name__)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
|
| 76 |
+
_CONFIG_FOR_DOC = "Qwen2Config"
|
| 77 |
+
|
| 78 |
+
QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 79 |
+
"Qwen/Qwen2-7B-beta",
|
| 80 |
+
# See all Qwen2 models at https://huggingface.co/models?filter=qwen2
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
| 85 |
+
def _get_unpad_data(attention_mask):
|
| 86 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 87 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 88 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 89 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 90 |
+
return (
|
| 91 |
+
indices,
|
| 92 |
+
cu_seqlens,
|
| 93 |
+
max_seqlen_in_batch,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
|
| 98 |
+
class Qwen2RMSNorm(nn.Module):
|
| 99 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 100 |
+
"""
|
| 101 |
+
Qwen2RMSNorm is equivalent to T5LayerNorm
|
| 102 |
+
"""
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 105 |
+
self.variance_epsilon = eps
|
| 106 |
+
|
| 107 |
+
def forward(self, hidden_states):
|
| 108 |
+
input_dtype = hidden_states.dtype
|
| 109 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 110 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 111 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 112 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
|
| 116 |
+
class Qwen2RotaryEmbedding(nn.Module):
|
| 117 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 118 |
+
super().__init__()
|
| 119 |
+
|
| 120 |
+
self.dim = dim
|
| 121 |
+
self.max_position_embeddings = max_position_embeddings
|
| 122 |
+
self.base = base
|
| 123 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
| 124 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 125 |
+
|
| 126 |
+
# Build here to make `torch.jit.trace` work.
|
| 127 |
+
self._set_cos_sin_cache(
|
| 128 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 132 |
+
self.max_seq_len_cached = seq_len
|
| 133 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
| 134 |
+
|
| 135 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 136 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 137 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 138 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 139 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 140 |
+
|
| 141 |
+
def forward(self, x, seq_len=None):
|
| 142 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 143 |
+
if seq_len > self.max_seq_len_cached:
|
| 144 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 145 |
+
|
| 146 |
+
return (
|
| 147 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
| 148 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
| 153 |
+
def rotate_half(x):
|
| 154 |
+
"""Rotates half the hidden dims of the input."""
|
| 155 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 156 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 157 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
|
| 161 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
| 162 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
q (`torch.Tensor`): The query tensor.
|
| 166 |
+
k (`torch.Tensor`): The key tensor.
|
| 167 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 168 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 169 |
+
position_ids (`torch.Tensor`):
|
| 170 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 171 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 172 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 173 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 174 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 175 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 176 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 177 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 178 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 179 |
+
Returns:
|
| 180 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 181 |
+
"""
|
| 182 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
| 183 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
| 184 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 185 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 186 |
+
return q_embed, k_embed
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
|
| 190 |
+
class Qwen2MLP(nn.Module):
|
| 191 |
+
def __init__(self, config):
|
| 192 |
+
super().__init__()
|
| 193 |
+
self.config = config
|
| 194 |
+
self.hidden_size = config.hidden_size
|
| 195 |
+
self.intermediate_size = config.intermediate_size
|
| 196 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 197 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 198 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 199 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 200 |
+
|
| 201 |
+
def forward(self, x):
|
| 202 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 206 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 207 |
+
"""
|
| 208 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 209 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 210 |
+
"""
|
| 211 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 212 |
+
if n_rep == 1:
|
| 213 |
+
return hidden_states
|
| 214 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 215 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class Qwen2Attention(nn.Module):
|
| 219 |
+
"""
|
| 220 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 221 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
|
| 225 |
+
super().__init__()
|
| 226 |
+
self.config = config
|
| 227 |
+
self.layer_idx = layer_idx
|
| 228 |
+
if layer_idx is None:
|
| 229 |
+
logger.warning_once(
|
| 230 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 231 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 232 |
+
"when creating this class."
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
self.hidden_size = config.hidden_size
|
| 236 |
+
self.num_heads = config.num_attention_heads
|
| 237 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 238 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 239 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 240 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 241 |
+
self.rope_theta = config.rope_theta
|
| 242 |
+
self.is_causal = True
|
| 243 |
+
self.attention_dropout = config.attention_dropout
|
| 244 |
+
|
| 245 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 246 |
+
raise ValueError(
|
| 247 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 248 |
+
f" and `num_heads`: {self.num_heads})."
|
| 249 |
+
)
|
| 250 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
| 251 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 252 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
| 253 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 254 |
+
|
| 255 |
+
self.rotary_emb = Qwen2RotaryEmbedding(
|
| 256 |
+
self.head_dim,
|
| 257 |
+
max_position_embeddings=self.max_position_embeddings,
|
| 258 |
+
base=self.rope_theta,
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
def forward(
|
| 262 |
+
self,
|
| 263 |
+
hidden_states: torch.Tensor,
|
| 264 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 265 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 266 |
+
past_key_value: Optional[Cache] = None,
|
| 267 |
+
output_attentions: bool = False,
|
| 268 |
+
use_cache: bool = False,
|
| 269 |
+
**kwargs,
|
| 270 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 271 |
+
if "padding_mask" in kwargs:
|
| 272 |
+
warnings.warn(
|
| 273 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 274 |
+
)
|
| 275 |
+
bsz, q_len, _ = hidden_states.size()
|
| 276 |
+
|
| 277 |
+
query_states = self.q_proj(hidden_states)
|
| 278 |
+
key_states = self.k_proj(hidden_states)
|
| 279 |
+
value_states = self.v_proj(hidden_states)
|
| 280 |
+
|
| 281 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 282 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 283 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 284 |
+
|
| 285 |
+
kv_seq_len = key_states.shape[-2]
|
| 286 |
+
if past_key_value is not None:
|
| 287 |
+
if self.layer_idx is None:
|
| 288 |
+
raise ValueError(
|
| 289 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 290 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 291 |
+
"with a layer index."
|
| 292 |
+
)
|
| 293 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 294 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 295 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 296 |
+
|
| 297 |
+
if past_key_value is not None:
|
| 298 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 299 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 300 |
+
|
| 301 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 302 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 303 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 304 |
+
|
| 305 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 306 |
+
|
| 307 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 308 |
+
raise ValueError(
|
| 309 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 310 |
+
f" {attn_weights.size()}"
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
if attention_mask is not None:
|
| 314 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 315 |
+
raise ValueError(
|
| 316 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
attn_weights = attn_weights + attention_mask
|
| 320 |
+
|
| 321 |
+
# upcast attention to fp32
|
| 322 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 323 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 324 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 325 |
+
|
| 326 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 327 |
+
raise ValueError(
|
| 328 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 329 |
+
f" {attn_output.size()}"
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 333 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 334 |
+
|
| 335 |
+
attn_output = self.o_proj(attn_output)
|
| 336 |
+
|
| 337 |
+
if not output_attentions:
|
| 338 |
+
attn_weights = None
|
| 339 |
+
|
| 340 |
+
return attn_output, attn_weights, past_key_value
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class Qwen2FlashAttention2(Qwen2Attention):
|
| 344 |
+
"""
|
| 345 |
+
Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
|
| 346 |
+
as the weights of the module stays untouched. The only required change would be on the forward pass
|
| 347 |
+
where it needs to correctly call the public API of flash attention and deal with padding tokens
|
| 348 |
+
in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
|
| 349 |
+
config.max_window_layers layers.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
| 353 |
+
def __init__(self, *args, **kwargs):
|
| 354 |
+
super().__init__(*args, **kwargs)
|
| 355 |
+
|
| 356 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 357 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 358 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 359 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 360 |
+
|
| 361 |
+
def forward(
|
| 362 |
+
self,
|
| 363 |
+
hidden_states: torch.Tensor,
|
| 364 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 365 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 366 |
+
past_key_value: Optional[Cache] = None,
|
| 367 |
+
output_attentions: bool = False,
|
| 368 |
+
use_cache: bool = False,
|
| 369 |
+
**kwargs,
|
| 370 |
+
):
|
| 371 |
+
if "padding_mask" in kwargs:
|
| 372 |
+
warnings.warn(
|
| 373 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
# overwrite attention_mask with padding_mask
|
| 377 |
+
attention_mask = kwargs.pop("padding_mask")
|
| 378 |
+
if(output_attentions):
|
| 379 |
+
_, inspect, attn_weights, _ = self.forward_torch(
|
| 380 |
+
hidden_states,
|
| 381 |
+
attention_mask,
|
| 382 |
+
position_ids,
|
| 383 |
+
past_key_value,
|
| 384 |
+
output_attentions,
|
| 385 |
+
use_cache=False,
|
| 386 |
+
**kwargs,
|
| 387 |
+
)
|
| 388 |
+
else:
|
| 389 |
+
attn_weights = None
|
| 390 |
+
inspect = None
|
| 391 |
+
bsz, q_len, _ = hidden_states.size()
|
| 392 |
+
|
| 393 |
+
query_states = self.q_proj(hidden_states)
|
| 394 |
+
key_states = self.k_proj(hidden_states)
|
| 395 |
+
value_states = self.v_proj(hidden_states)
|
| 396 |
+
|
| 397 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 398 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 399 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 400 |
+
|
| 401 |
+
kv_seq_len = key_states.shape[-2]
|
| 402 |
+
if past_key_value is not None:
|
| 403 |
+
if self.layer_idx is None:
|
| 404 |
+
raise ValueError(
|
| 405 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 406 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 407 |
+
"with a layer index."
|
| 408 |
+
)
|
| 409 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 410 |
+
|
| 411 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
| 412 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
| 413 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
| 414 |
+
|
| 415 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 416 |
+
|
| 417 |
+
use_sliding_windows = (
|
| 418 |
+
_flash_supports_window_size
|
| 419 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 420 |
+
and kv_seq_len > self.config.sliding_window
|
| 421 |
+
and self.config.use_sliding_window
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
if not _flash_supports_window_size:
|
| 425 |
+
logger.warning_once(
|
| 426 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
| 427 |
+
" make sure to upgrade flash-attn library."
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
if past_key_value is not None:
|
| 431 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
| 432 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| 433 |
+
if (
|
| 434 |
+
getattr(self.config, "sliding_window", None) is not None
|
| 435 |
+
and kv_seq_len > self.config.sliding_window
|
| 436 |
+
and cache_has_contents
|
| 437 |
+
):
|
| 438 |
+
slicing_tokens = 1 - self.config.sliding_window
|
| 439 |
+
|
| 440 |
+
past_key = past_key_value[self.layer_idx][0]
|
| 441 |
+
past_value = past_key_value[self.layer_idx][1]
|
| 442 |
+
|
| 443 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| 444 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
| 445 |
+
|
| 446 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
| 447 |
+
raise ValueError(
|
| 448 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| 449 |
+
f" {past_key.shape}"
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
if attention_mask is not None:
|
| 453 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
| 454 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
| 455 |
+
|
| 456 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 457 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 458 |
+
|
| 459 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 460 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 461 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 462 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 463 |
+
|
| 464 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 465 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 466 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 467 |
+
input_dtype = query_states.dtype
|
| 468 |
+
if input_dtype == torch.float32:
|
| 469 |
+
if torch.is_autocast_enabled():
|
| 470 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 471 |
+
# Handle the case where the model is quantized
|
| 472 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 473 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 474 |
+
else:
|
| 475 |
+
target_dtype = self.q_proj.weight.dtype
|
| 476 |
+
|
| 477 |
+
logger.warning_once(
|
| 478 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 479 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 480 |
+
f" {target_dtype}."
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
query_states = query_states.to(target_dtype)
|
| 484 |
+
key_states = key_states.to(target_dtype)
|
| 485 |
+
value_states = value_states.to(target_dtype)
|
| 486 |
+
|
| 487 |
+
# Reashape to the expected shape for Flash Attention
|
| 488 |
+
query_states = query_states.transpose(1, 2)
|
| 489 |
+
key_states = key_states.transpose(1, 2)
|
| 490 |
+
value_states = value_states.transpose(1, 2)
|
| 491 |
+
|
| 492 |
+
attn_output = self._flash_attention_forward(
|
| 493 |
+
query_states,
|
| 494 |
+
key_states,
|
| 495 |
+
value_states,
|
| 496 |
+
attention_mask,
|
| 497 |
+
q_len,
|
| 498 |
+
dropout=dropout_rate,
|
| 499 |
+
use_sliding_windows=use_sliding_windows,
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 503 |
+
attn_output = self.o_proj(attn_output)
|
| 504 |
+
|
| 505 |
+
if not output_attentions:
|
| 506 |
+
attn_weights = None
|
| 507 |
+
|
| 508 |
+
return attn_output, attn_weights, past_key_value
|
| 509 |
+
|
| 510 |
+
def forward_torch(
|
| 511 |
+
self,
|
| 512 |
+
hidden_states: torch.Tensor,
|
| 513 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 514 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 515 |
+
past_key_value: Optional[Any] = None,
|
| 516 |
+
output_attentions: bool = False,
|
| 517 |
+
use_cache: bool = False,
|
| 518 |
+
**kwargs,
|
| 519 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 520 |
+
if "padding_mask" in kwargs:
|
| 521 |
+
warnings.warn(
|
| 522 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
bsz, q_len, _ = hidden_states.size()
|
| 526 |
+
inspect = {}
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
query_states = self.q_proj(hidden_states)
|
| 530 |
+
key_states = self.k_proj(hidden_states)
|
| 531 |
+
value_states = self.v_proj(hidden_states)
|
| 532 |
+
|
| 533 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 534 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 535 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 536 |
+
|
| 537 |
+
kv_seq_len = key_states.shape[-2]
|
| 538 |
+
# print(past_key_value)
|
| 539 |
+
if past_key_value is not None:
|
| 540 |
+
if self.layer_idx is None:
|
| 541 |
+
raise ValueError(
|
| 542 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| 543 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| 544 |
+
"with a layer index."
|
| 545 |
+
)
|
| 546 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 547 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 548 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 549 |
+
|
| 550 |
+
if past_key_value is not None:
|
| 551 |
+
if(use_cache):
|
| 552 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 553 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 554 |
+
else:
|
| 555 |
+
key_states = torch.cat([past_key_value.key_cache[self.layer_idx], key_states], dim=-2)
|
| 556 |
+
value_states = torch.cat([past_key_value.value_cache[self.layer_idx], value_states], dim=-2)
|
| 557 |
+
|
| 558 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 559 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 560 |
+
|
| 561 |
+
# print(query_states.size())
|
| 562 |
+
# print(key_states.size())
|
| 563 |
+
inspect["query"] = query_states
|
| 564 |
+
inspect["key"] = key_states
|
| 565 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 566 |
+
###write our mask here
|
| 567 |
+
#print(attn_weights.size())#[batch_size, head, q, c]
|
| 568 |
+
|
| 569 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 570 |
+
raise ValueError(
|
| 571 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| 572 |
+
f" {attn_weights.size()}"
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
if attention_mask is not None:
|
| 576 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 577 |
+
raise ValueError(
|
| 578 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 579 |
+
)
|
| 580 |
+
attn_weights = attn_weights + attention_mask
|
| 581 |
+
if 'block_list' in kwargs:
|
| 582 |
+
for h in kwargs['block_list']:
|
| 583 |
+
if self.layer_idx==h[0]:
|
| 584 |
+
'''
|
| 585 |
+
if h[1]==0:
|
| 586 |
+
target_head = 1
|
| 587 |
+
elif h[1]==31:
|
| 588 |
+
target_head = 30
|
| 589 |
+
else:
|
| 590 |
+
target_head = h[1] - 1
|
| 591 |
+
|
| 592 |
+
attn_weights[:, h[1], :, :] = attn_weights[:, target_head, :, :]
|
| 593 |
+
'''
|
| 594 |
+
attn_weights[:, h[1], :, :] = 0
|
| 595 |
+
# upcast attention to fp32
|
| 596 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 597 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| 598 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 599 |
+
inspect["attn_output_before_o_proj"] = attn_output
|
| 600 |
+
|
| 601 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 602 |
+
raise ValueError(
|
| 603 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 604 |
+
f" {attn_output.size()}"
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 608 |
+
|
| 609 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 610 |
+
|
| 611 |
+
attn_output = self.o_proj(attn_output)
|
| 612 |
+
|
| 613 |
+
if not output_attentions:
|
| 614 |
+
attn_weights = None
|
| 615 |
+
|
| 616 |
+
return attn_output, inspect, attn_weights, past_key_value
|
| 617 |
+
|
| 618 |
+
def _flash_attention_forward(
|
| 619 |
+
self,
|
| 620 |
+
query_states,
|
| 621 |
+
key_states,
|
| 622 |
+
value_states,
|
| 623 |
+
attention_mask,
|
| 624 |
+
query_length,
|
| 625 |
+
dropout=0.0,
|
| 626 |
+
softmax_scale=None,
|
| 627 |
+
use_sliding_windows=False,
|
| 628 |
+
):
|
| 629 |
+
"""
|
| 630 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 631 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 632 |
+
|
| 633 |
+
Args:
|
| 634 |
+
query_states (`torch.Tensor`):
|
| 635 |
+
Input query states to be passed to Flash Attention API
|
| 636 |
+
key_states (`torch.Tensor`):
|
| 637 |
+
Input key states to be passed to Flash Attention API
|
| 638 |
+
value_states (`torch.Tensor`):
|
| 639 |
+
Input value states to be passed to Flash Attention API
|
| 640 |
+
attention_mask (`torch.Tensor`):
|
| 641 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 642 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 643 |
+
dropout (`int`, *optional*):
|
| 644 |
+
Attention dropout
|
| 645 |
+
softmax_scale (`float`, *optional*):
|
| 646 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 647 |
+
use_sliding_windows (`bool`, *optional*):
|
| 648 |
+
Whether to activate sliding window attention.
|
| 649 |
+
"""
|
| 650 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 651 |
+
causal = self.is_causal
|
| 652 |
+
else:
|
| 653 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
| 654 |
+
causal = self.is_causal and query_length != 1
|
| 655 |
+
|
| 656 |
+
# Decide whether to use SWA or not by layer index.
|
| 657 |
+
if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
|
| 658 |
+
use_sliding_windows = False
|
| 659 |
+
|
| 660 |
+
# Contains at least one padding token in the sequence
|
| 661 |
+
if attention_mask is not None:
|
| 662 |
+
batch_size = query_states.shape[0]
|
| 663 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 664 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 668 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 669 |
+
|
| 670 |
+
if not use_sliding_windows:
|
| 671 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 672 |
+
query_states,
|
| 673 |
+
key_states,
|
| 674 |
+
value_states,
|
| 675 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 676 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 677 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 678 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 679 |
+
dropout_p=dropout,
|
| 680 |
+
softmax_scale=softmax_scale,
|
| 681 |
+
causal=causal,
|
| 682 |
+
)
|
| 683 |
+
else:
|
| 684 |
+
attn_output_unpad = flash_attn_varlen_func(
|
| 685 |
+
query_states,
|
| 686 |
+
key_states,
|
| 687 |
+
value_states,
|
| 688 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 689 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 690 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
| 691 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
| 692 |
+
dropout_p=dropout,
|
| 693 |
+
softmax_scale=softmax_scale,
|
| 694 |
+
causal=causal,
|
| 695 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 699 |
+
else:
|
| 700 |
+
if not use_sliding_windows:
|
| 701 |
+
attn_output = flash_attn_func(
|
| 702 |
+
query_states,
|
| 703 |
+
key_states,
|
| 704 |
+
value_states,
|
| 705 |
+
dropout,
|
| 706 |
+
softmax_scale=softmax_scale,
|
| 707 |
+
causal=causal,
|
| 708 |
+
)
|
| 709 |
+
else:
|
| 710 |
+
attn_output = flash_attn_func(
|
| 711 |
+
query_states,
|
| 712 |
+
key_states,
|
| 713 |
+
value_states,
|
| 714 |
+
dropout,
|
| 715 |
+
softmax_scale=softmax_scale,
|
| 716 |
+
causal=causal,
|
| 717 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
return attn_output
|
| 721 |
+
|
| 722 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
| 723 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
| 724 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
| 725 |
+
|
| 726 |
+
# On the first iteration we need to properly re-create the padding mask
|
| 727 |
+
# by slicing it on the proper place
|
| 728 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
| 729 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
| 730 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
| 731 |
+
|
| 732 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
| 733 |
+
|
| 734 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 735 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
| 736 |
+
|
| 737 |
+
if query_length == kv_seq_len:
|
| 738 |
+
query_layer = index_first_axis(
|
| 739 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
| 740 |
+
)
|
| 741 |
+
cu_seqlens_q = cu_seqlens_k
|
| 742 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
| 743 |
+
indices_q = indices_k
|
| 744 |
+
elif query_length == 1:
|
| 745 |
+
max_seqlen_in_batch_q = 1
|
| 746 |
+
cu_seqlens_q = torch.arange(
|
| 747 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
| 748 |
+
) # There is a memcpy here, that is very bad.
|
| 749 |
+
indices_q = cu_seqlens_q[:-1]
|
| 750 |
+
query_layer = query_layer.squeeze(1)
|
| 751 |
+
else:
|
| 752 |
+
# The -q_len: slice assumes left padding.
|
| 753 |
+
attention_mask = attention_mask[:, -query_length:]
|
| 754 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
| 755 |
+
|
| 756 |
+
return (
|
| 757 |
+
query_layer,
|
| 758 |
+
key_layer,
|
| 759 |
+
value_layer,
|
| 760 |
+
indices_q,
|
| 761 |
+
(cu_seqlens_q, cu_seqlens_k),
|
| 762 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
|
| 767 |
+
class Qwen2SdpaAttention(Qwen2Attention):
|
| 768 |
+
"""
|
| 769 |
+
Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 770 |
+
`Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 771 |
+
SDPA API.
|
| 772 |
+
"""
|
| 773 |
+
|
| 774 |
+
# Adapted from Qwen2Attention.forward
|
| 775 |
+
def forward(
|
| 776 |
+
self,
|
| 777 |
+
hidden_states: torch.Tensor,
|
| 778 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 779 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 780 |
+
past_key_value: Optional[Cache] = None,
|
| 781 |
+
output_attentions: bool = False,
|
| 782 |
+
use_cache: bool = False,
|
| 783 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 784 |
+
if output_attentions:
|
| 785 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 786 |
+
logger.warning_once(
|
| 787 |
+
"Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 788 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 789 |
+
)
|
| 790 |
+
return super().forward(
|
| 791 |
+
hidden_states=hidden_states,
|
| 792 |
+
attention_mask=attention_mask,
|
| 793 |
+
position_ids=position_ids,
|
| 794 |
+
past_key_value=past_key_value,
|
| 795 |
+
output_attentions=output_attentions,
|
| 796 |
+
use_cache=use_cache,
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
bsz, q_len, _ = hidden_states.size()
|
| 800 |
+
|
| 801 |
+
query_states = self.q_proj(hidden_states)
|
| 802 |
+
key_states = self.k_proj(hidden_states)
|
| 803 |
+
value_states = self.v_proj(hidden_states)
|
| 804 |
+
|
| 805 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 806 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 807 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 808 |
+
|
| 809 |
+
kv_seq_len = key_states.shape[-2]
|
| 810 |
+
if past_key_value is not None:
|
| 811 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 812 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 813 |
+
|
| 814 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 815 |
+
|
| 816 |
+
if past_key_value is not None:
|
| 817 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
| 818 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 819 |
+
|
| 820 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 821 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 822 |
+
|
| 823 |
+
if attention_mask is not None:
|
| 824 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 825 |
+
raise ValueError(
|
| 826 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 827 |
+
)
|
| 828 |
+
|
| 829 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 830 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 831 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 832 |
+
query_states = query_states.contiguous()
|
| 833 |
+
key_states = key_states.contiguous()
|
| 834 |
+
value_states = value_states.contiguous()
|
| 835 |
+
|
| 836 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 837 |
+
query_states,
|
| 838 |
+
key_states,
|
| 839 |
+
value_states,
|
| 840 |
+
attn_mask=attention_mask,
|
| 841 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 842 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 843 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
| 844 |
+
)
|
| 845 |
+
|
| 846 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 847 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 848 |
+
|
| 849 |
+
attn_output = self.o_proj(attn_output)
|
| 850 |
+
|
| 851 |
+
return attn_output, None, past_key_value
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
QWEN2_ATTENTION_CLASSES = {
|
| 855 |
+
"eager": Qwen2Attention,
|
| 856 |
+
"flash_attention_2": Qwen2FlashAttention2,
|
| 857 |
+
"sdpa": Qwen2SdpaAttention,
|
| 858 |
+
}
|
| 859 |
+
|
| 860 |
+
|
| 861 |
+
class Qwen2DecoderLayer(nn.Module):
|
| 862 |
+
def __init__(self, config: Qwen2Config, layer_idx: int):
|
| 863 |
+
super().__init__()
|
| 864 |
+
self.hidden_size = config.hidden_size
|
| 865 |
+
|
| 866 |
+
if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
|
| 867 |
+
logger.warning_once(
|
| 868 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
| 869 |
+
"unexpected results may be encountered."
|
| 870 |
+
)
|
| 871 |
+
self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| 872 |
+
|
| 873 |
+
self.mlp = Qwen2MLP(config)
|
| 874 |
+
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 875 |
+
self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 876 |
+
|
| 877 |
+
def forward(
|
| 878 |
+
self,
|
| 879 |
+
hidden_states: torch.Tensor,
|
| 880 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 881 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 882 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 883 |
+
output_attentions: Optional[bool] = False,
|
| 884 |
+
use_cache: Optional[bool] = False,
|
| 885 |
+
attn_mode: str = "flash",
|
| 886 |
+
**kwargs,
|
| 887 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 888 |
+
if "padding_mask" in kwargs:
|
| 889 |
+
warnings.warn(
|
| 890 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
| 891 |
+
"Please make sure use `attention_mask` instead.`"
|
| 892 |
+
)
|
| 893 |
+
"""
|
| 894 |
+
Args:
|
| 895 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 896 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 897 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 898 |
+
output_attentions (`bool`, *optional*):
|
| 899 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 900 |
+
returned tensors for more detail.
|
| 901 |
+
use_cache (`bool`, *optional*):
|
| 902 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 903 |
+
(see `past_key_values`).
|
| 904 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 905 |
+
"""
|
| 906 |
+
|
| 907 |
+
residual = hidden_states
|
| 908 |
+
|
| 909 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 910 |
+
|
| 911 |
+
# Self Attention
|
| 912 |
+
if (attn_mode == "flash"):
|
| 913 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 914 |
+
hidden_states=hidden_states,
|
| 915 |
+
attention_mask=attention_mask,
|
| 916 |
+
position_ids=position_ids,
|
| 917 |
+
past_key_value=past_key_value,
|
| 918 |
+
output_attentions=output_attentions,
|
| 919 |
+
use_cache=use_cache,
|
| 920 |
+
)
|
| 921 |
+
else:
|
| 922 |
+
hidden_states, inspect, self_attn_weights, present_key_value = self.self_attn.forward_torch(
|
| 923 |
+
hidden_states=hidden_states,
|
| 924 |
+
attention_mask=attention_mask,
|
| 925 |
+
position_ids=position_ids,
|
| 926 |
+
past_key_value=past_key_value,
|
| 927 |
+
output_attentions=output_attentions,
|
| 928 |
+
use_cache=use_cache,
|
| 929 |
+
)
|
| 930 |
+
|
| 931 |
+
hidden_states = residual + hidden_states
|
| 932 |
+
|
| 933 |
+
# Fully Connected
|
| 934 |
+
residual = hidden_states
|
| 935 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 936 |
+
hidden_states = self.mlp(hidden_states)
|
| 937 |
+
hidden_states = residual + hidden_states
|
| 938 |
+
|
| 939 |
+
outputs = (hidden_states,)
|
| 940 |
+
|
| 941 |
+
if output_attentions:
|
| 942 |
+
outputs += (self_attn_weights,)
|
| 943 |
+
|
| 944 |
+
if use_cache:
|
| 945 |
+
outputs += (present_key_value,)
|
| 946 |
+
|
| 947 |
+
return outputs
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
QWEN2_START_DOCSTRING = r"""
|
| 951 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 952 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 953 |
+
etc.)
|
| 954 |
+
|
| 955 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 956 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 957 |
+
and behavior.
|
| 958 |
+
|
| 959 |
+
Parameters:
|
| 960 |
+
config ([`Qwen2Config`]):
|
| 961 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 962 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 963 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 964 |
+
"""
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
@add_start_docstrings(
|
| 968 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
| 969 |
+
QWEN2_START_DOCSTRING,
|
| 970 |
+
)
|
| 971 |
+
class Qwen2PreTrainedModel(PreTrainedModel):
|
| 972 |
+
config_class = Qwen2Config
|
| 973 |
+
base_model_prefix = "model"
|
| 974 |
+
supports_gradient_checkpointing = True
|
| 975 |
+
_no_split_modules = ["Qwen2DecoderLayer"]
|
| 976 |
+
_skip_keys_device_placement = "past_key_values"
|
| 977 |
+
_supports_flash_attn_2 = True
|
| 978 |
+
_supports_sdpa = True
|
| 979 |
+
_supports_cache_class = True
|
| 980 |
+
|
| 981 |
+
def _init_weights(self, module):
|
| 982 |
+
std = self.config.initializer_range
|
| 983 |
+
if isinstance(module, nn.Linear):
|
| 984 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 985 |
+
if module.bias is not None:
|
| 986 |
+
module.bias.data.zero_()
|
| 987 |
+
elif isinstance(module, nn.Embedding):
|
| 988 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 989 |
+
if module.padding_idx is not None:
|
| 990 |
+
module.weight.data[module.padding_idx].zero_()
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
QWEN2_INPUTS_DOCSTRING = r"""
|
| 994 |
+
Args:
|
| 995 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 996 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 997 |
+
it.
|
| 998 |
+
|
| 999 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1000 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1001 |
+
|
| 1002 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1003 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1004 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1005 |
+
|
| 1006 |
+
- 1 for tokens that are **not masked**,
|
| 1007 |
+
- 0 for tokens that are **masked**.
|
| 1008 |
+
|
| 1009 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1010 |
+
|
| 1011 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1012 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1013 |
+
|
| 1014 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 1015 |
+
`past_key_values`).
|
| 1016 |
+
|
| 1017 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 1018 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 1019 |
+
information on the default strategy.
|
| 1020 |
+
|
| 1021 |
+
- 1 indicates the head is **not masked**,
|
| 1022 |
+
- 0 indicates the head is **masked**.
|
| 1023 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1024 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 1025 |
+
config.n_positions - 1]`.
|
| 1026 |
+
|
| 1027 |
+
[What are position IDs?](../glossary#position-ids)
|
| 1028 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| 1029 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 1030 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| 1031 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
| 1032 |
+
|
| 1033 |
+
Two formats are allowed:
|
| 1034 |
+
- a [`~cache_utils.Cache`] instance;
|
| 1035 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 1036 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| 1037 |
+
cache format.
|
| 1038 |
+
|
| 1039 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| 1040 |
+
legacy cache format will be returned.
|
| 1041 |
+
|
| 1042 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| 1043 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| 1044 |
+
of shape `(batch_size, sequence_length)`.
|
| 1045 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1046 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1047 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1048 |
+
model's internal embedding lookup matrix.
|
| 1049 |
+
use_cache (`bool`, *optional*):
|
| 1050 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 1051 |
+
`past_key_values`).
|
| 1052 |
+
output_attentions (`bool`, *optional*):
|
| 1053 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1054 |
+
tensors for more detail.
|
| 1055 |
+
output_hidden_states (`bool`, *optional*):
|
| 1056 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1057 |
+
more detail.
|
| 1058 |
+
return_dict (`bool`, *optional*):
|
| 1059 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1060 |
+
"""
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
@add_start_docstrings(
|
| 1064 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
| 1065 |
+
QWEN2_START_DOCSTRING,
|
| 1066 |
+
)
|
| 1067 |
+
class Qwen2Model(Qwen2PreTrainedModel):
|
| 1068 |
+
"""
|
| 1069 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
|
| 1070 |
+
|
| 1071 |
+
Args:
|
| 1072 |
+
config: Qwen2Config
|
| 1073 |
+
"""
|
| 1074 |
+
|
| 1075 |
+
def __init__(self, config: Qwen2Config):
|
| 1076 |
+
super().__init__(config)
|
| 1077 |
+
self.padding_idx = config.pad_token_id
|
| 1078 |
+
self.vocab_size = config.vocab_size
|
| 1079 |
+
|
| 1080 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1081 |
+
self.layers = nn.ModuleList(
|
| 1082 |
+
[Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 1083 |
+
)
|
| 1084 |
+
self._attn_implementation = config._attn_implementation
|
| 1085 |
+
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1086 |
+
|
| 1087 |
+
self.gradient_checkpointing = False
|
| 1088 |
+
# Initialize weights and apply final processing
|
| 1089 |
+
self.post_init()
|
| 1090 |
+
|
| 1091 |
+
def get_input_embeddings(self):
|
| 1092 |
+
return self.embed_tokens
|
| 1093 |
+
|
| 1094 |
+
def set_input_embeddings(self, value):
|
| 1095 |
+
self.embed_tokens = value
|
| 1096 |
+
|
| 1097 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
| 1098 |
+
def forward(
|
| 1099 |
+
self,
|
| 1100 |
+
input_ids: torch.LongTensor = None,
|
| 1101 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1102 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1103 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1104 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1105 |
+
use_cache: Optional[bool] = None,
|
| 1106 |
+
attn_mode: str = "flash",
|
| 1107 |
+
output_attentions: Optional[bool] = None,
|
| 1108 |
+
output_hidden_states: Optional[bool] = None,
|
| 1109 |
+
return_dict: Optional[bool] = None,
|
| 1110 |
+
block_list: list = None,
|
| 1111 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 1112 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1113 |
+
output_hidden_states = (
|
| 1114 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1115 |
+
)
|
| 1116 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1117 |
+
|
| 1118 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1119 |
+
|
| 1120 |
+
# retrieve input_ids and inputs_embeds
|
| 1121 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1122 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1123 |
+
elif input_ids is not None:
|
| 1124 |
+
batch_size, seq_length = input_ids.shape
|
| 1125 |
+
elif inputs_embeds is not None:
|
| 1126 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1127 |
+
else:
|
| 1128 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1129 |
+
|
| 1130 |
+
if self.gradient_checkpointing and self.training:
|
| 1131 |
+
if use_cache:
|
| 1132 |
+
logger.warning_once(
|
| 1133 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1134 |
+
)
|
| 1135 |
+
use_cache = False
|
| 1136 |
+
|
| 1137 |
+
past_key_values_length = 0
|
| 1138 |
+
|
| 1139 |
+
if use_cache:
|
| 1140 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
| 1141 |
+
if use_legacy_cache:
|
| 1142 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 1143 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
| 1144 |
+
|
| 1145 |
+
if position_ids is None:
|
| 1146 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1147 |
+
position_ids = torch.arange(
|
| 1148 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1149 |
+
)
|
| 1150 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1151 |
+
else:
|
| 1152 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 1153 |
+
|
| 1154 |
+
if inputs_embeds is None:
|
| 1155 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1156 |
+
|
| 1157 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
| 1158 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
| 1159 |
+
if is_padding_right:
|
| 1160 |
+
raise ValueError(
|
| 1161 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 1162 |
+
" this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
|
| 1163 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 1164 |
+
)
|
| 1165 |
+
|
| 1166 |
+
if self._attn_implementation == "flash_attention_2":
|
| 1167 |
+
# 2d mask is passed through the layers
|
| 1168 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
| 1169 |
+
elif self._attn_implementation == "sdpa" and not output_attentions:
|
| 1170 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
| 1171 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
| 1172 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
| 1173 |
+
attention_mask,
|
| 1174 |
+
(batch_size, seq_length),
|
| 1175 |
+
inputs_embeds,
|
| 1176 |
+
past_key_values_length,
|
| 1177 |
+
)
|
| 1178 |
+
else:
|
| 1179 |
+
# 4d mask is passed through the layers
|
| 1180 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
| 1181 |
+
attention_mask,
|
| 1182 |
+
(batch_size, seq_length),
|
| 1183 |
+
inputs_embeds,
|
| 1184 |
+
past_key_values_length,
|
| 1185 |
+
sliding_window=self.config.sliding_window,
|
| 1186 |
+
)
|
| 1187 |
+
|
| 1188 |
+
hidden_states = inputs_embeds
|
| 1189 |
+
|
| 1190 |
+
# decoder layers
|
| 1191 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1192 |
+
all_self_attns = () if output_attentions else None
|
| 1193 |
+
next_decoder_cache = None
|
| 1194 |
+
if block_list:
|
| 1195 |
+
kwargs={"block_list":block_list}
|
| 1196 |
+
else:
|
| 1197 |
+
kwargs={}
|
| 1198 |
+
#print(block_list)
|
| 1199 |
+
for decoder_layer in self.layers:
|
| 1200 |
+
if output_hidden_states:
|
| 1201 |
+
all_hidden_states += (hidden_states,)
|
| 1202 |
+
|
| 1203 |
+
if self.gradient_checkpointing and self.training:
|
| 1204 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 1205 |
+
decoder_layer.__call__,
|
| 1206 |
+
hidden_states,
|
| 1207 |
+
attention_mask,
|
| 1208 |
+
position_ids,
|
| 1209 |
+
past_key_values,
|
| 1210 |
+
output_attentions,
|
| 1211 |
+
use_cache,
|
| 1212 |
+
)
|
| 1213 |
+
else:
|
| 1214 |
+
layer_outputs = decoder_layer(
|
| 1215 |
+
hidden_states,
|
| 1216 |
+
attention_mask=attention_mask,
|
| 1217 |
+
position_ids=position_ids,
|
| 1218 |
+
past_key_value=past_key_values,
|
| 1219 |
+
output_attentions=output_attentions,
|
| 1220 |
+
use_cache=use_cache,
|
| 1221 |
+
attn_mode=attn_mode,
|
| 1222 |
+
**kwargs,
|
| 1223 |
+
)
|
| 1224 |
+
|
| 1225 |
+
hidden_states = layer_outputs[0]
|
| 1226 |
+
|
| 1227 |
+
if use_cache:
|
| 1228 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
| 1229 |
+
|
| 1230 |
+
if output_attentions:
|
| 1231 |
+
all_self_attns += (layer_outputs[1],)
|
| 1232 |
+
|
| 1233 |
+
hidden_states = self.norm(hidden_states)
|
| 1234 |
+
|
| 1235 |
+
# add hidden states from the last decoder layer
|
| 1236 |
+
if output_hidden_states:
|
| 1237 |
+
all_hidden_states += (hidden_states,)
|
| 1238 |
+
|
| 1239 |
+
next_cache = None
|
| 1240 |
+
if use_cache:
|
| 1241 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
| 1242 |
+
|
| 1243 |
+
if not return_dict:
|
| 1244 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1245 |
+
return BaseModelOutputWithPast(
|
| 1246 |
+
last_hidden_state=hidden_states,
|
| 1247 |
+
past_key_values=next_cache,
|
| 1248 |
+
hidden_states=all_hidden_states,
|
| 1249 |
+
attentions=all_self_attns,
|
| 1250 |
+
)
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
class Qwen2ForCausalLM(Qwen2PreTrainedModel):
|
| 1254 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1255 |
+
|
| 1256 |
+
def __init__(self, config):
|
| 1257 |
+
super().__init__(config)
|
| 1258 |
+
self.model = Qwen2Model(config)
|
| 1259 |
+
self.vocab_size = config.vocab_size
|
| 1260 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1261 |
+
|
| 1262 |
+
# Initialize weights and apply final processing
|
| 1263 |
+
self.post_init()
|
| 1264 |
+
|
| 1265 |
+
def get_input_embeddings(self):
|
| 1266 |
+
return self.model.embed_tokens
|
| 1267 |
+
|
| 1268 |
+
def set_input_embeddings(self, value):
|
| 1269 |
+
self.model.embed_tokens = value
|
| 1270 |
+
|
| 1271 |
+
def get_output_embeddings(self):
|
| 1272 |
+
return self.lm_head
|
| 1273 |
+
|
| 1274 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1275 |
+
self.lm_head = new_embeddings
|
| 1276 |
+
|
| 1277 |
+
def set_decoder(self, decoder):
|
| 1278 |
+
self.model = decoder
|
| 1279 |
+
|
| 1280 |
+
def get_decoder(self):
|
| 1281 |
+
return self.model
|
| 1282 |
+
|
| 1283 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
| 1284 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1285 |
+
def forward(
|
| 1286 |
+
self,
|
| 1287 |
+
input_ids: torch.LongTensor = None,
|
| 1288 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1289 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1290 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1291 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1292 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1293 |
+
use_cache: Optional[bool] = None,
|
| 1294 |
+
output_attentions: Optional[bool] = None,
|
| 1295 |
+
output_hidden_states: Optional[bool] = None,
|
| 1296 |
+
return_dict: Optional[bool] = None,
|
| 1297 |
+
attn_mode: str = "flash",
|
| 1298 |
+
block_list:list = None,
|
| 1299 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1300 |
+
r"""
|
| 1301 |
+
Args:
|
| 1302 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1303 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1304 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1305 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1306 |
+
|
| 1307 |
+
Returns:
|
| 1308 |
+
|
| 1309 |
+
Example:
|
| 1310 |
+
|
| 1311 |
+
```python
|
| 1312 |
+
>>> from transformers import AutoTokenizer, Qwen2ForCausalLM
|
| 1313 |
+
|
| 1314 |
+
>>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 1315 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 1316 |
+
|
| 1317 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 1318 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1319 |
+
|
| 1320 |
+
>>> # Generate
|
| 1321 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1322 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1323 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 1324 |
+
```"""
|
| 1325 |
+
|
| 1326 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1327 |
+
output_hidden_states = (
|
| 1328 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1329 |
+
)
|
| 1330 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1331 |
+
|
| 1332 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 1333 |
+
outputs = self.model(
|
| 1334 |
+
input_ids=input_ids,
|
| 1335 |
+
attention_mask=attention_mask,
|
| 1336 |
+
position_ids=position_ids,
|
| 1337 |
+
past_key_values=past_key_values,
|
| 1338 |
+
inputs_embeds=inputs_embeds,
|
| 1339 |
+
use_cache=use_cache,
|
| 1340 |
+
output_attentions=output_attentions,
|
| 1341 |
+
output_hidden_states=output_hidden_states,
|
| 1342 |
+
return_dict=return_dict,
|
| 1343 |
+
attn_mode=attn_mode,
|
| 1344 |
+
block_list=block_list,
|
| 1345 |
+
)
|
| 1346 |
+
|
| 1347 |
+
hidden_states = outputs[0]
|
| 1348 |
+
logits = self.lm_head(hidden_states)
|
| 1349 |
+
logits = logits.float()
|
| 1350 |
+
|
| 1351 |
+
loss = None
|
| 1352 |
+
if labels is not None:
|
| 1353 |
+
# Shift so that tokens < n predict n
|
| 1354 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1355 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1356 |
+
# Flatten the tokens
|
| 1357 |
+
loss_fct = CrossEntropyLoss()
|
| 1358 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1359 |
+
shift_labels = shift_labels.view(-1)
|
| 1360 |
+
# Enable model parallelism
|
| 1361 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1362 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1363 |
+
|
| 1364 |
+
if not return_dict:
|
| 1365 |
+
output = (logits,) + outputs[1:]
|
| 1366 |
+
return (loss,) + output if loss is not None else output
|
| 1367 |
+
|
| 1368 |
+
return CausalLMOutputWithPast(
|
| 1369 |
+
loss=loss,
|
| 1370 |
+
logits=logits,
|
| 1371 |
+
past_key_values=outputs.past_key_values,
|
| 1372 |
+
hidden_states=outputs.hidden_states,
|
| 1373 |
+
attentions=outputs.attentions,
|
| 1374 |
+
)
|
| 1375 |
+
|
| 1376 |
+
def prepare_inputs_for_generation(
|
| 1377 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1378 |
+
):
|
| 1379 |
+
# Omit tokens covered by past_key_values
|
| 1380 |
+
if past_key_values is not None:
|
| 1381 |
+
if isinstance(past_key_values, Cache):
|
| 1382 |
+
cache_length = past_key_values.get_seq_length()
|
| 1383 |
+
past_length = past_key_values.seen_tokens
|
| 1384 |
+
max_cache_length = past_key_values.get_max_length()
|
| 1385 |
+
else:
|
| 1386 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
| 1387 |
+
max_cache_length = None
|
| 1388 |
+
|
| 1389 |
+
# Keep only the unprocessed tokens:
|
| 1390 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
| 1391 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
| 1392 |
+
# input)
|
| 1393 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| 1394 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
| 1395 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
| 1396 |
+
# input_ids based on the past_length.
|
| 1397 |
+
elif past_length < input_ids.shape[1]:
|
| 1398 |
+
input_ids = input_ids[:, past_length:]
|
| 1399 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
| 1400 |
+
|
| 1401 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
| 1402 |
+
if (
|
| 1403 |
+
max_cache_length is not None
|
| 1404 |
+
and attention_mask is not None
|
| 1405 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
| 1406 |
+
):
|
| 1407 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
| 1408 |
+
|
| 1409 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1410 |
+
if attention_mask is not None and position_ids is None:
|
| 1411 |
+
# create position_ids on the fly for batch generation
|
| 1412 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1413 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1414 |
+
if past_key_values:
|
| 1415 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1416 |
+
|
| 1417 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1418 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1419 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1420 |
+
else:
|
| 1421 |
+
model_inputs = {"input_ids": input_ids}
|
| 1422 |
+
|
| 1423 |
+
model_inputs.update(
|
| 1424 |
+
{
|
| 1425 |
+
"position_ids": position_ids,
|
| 1426 |
+
"past_key_values": past_key_values,
|
| 1427 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1428 |
+
"attention_mask": attention_mask,
|
| 1429 |
+
}
|
| 1430 |
+
)
|
| 1431 |
+
return model_inputs
|
| 1432 |
+
|
| 1433 |
+
@staticmethod
|
| 1434 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1435 |
+
reordered_past = ()
|
| 1436 |
+
for layer_past in past_key_values:
|
| 1437 |
+
reordered_past += (
|
| 1438 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| 1439 |
+
)
|
| 1440 |
+
return reordered_past
|
| 1441 |
+
|
| 1442 |
+
|
| 1443 |
+
@add_start_docstrings(
|
| 1444 |
+
"""
|
| 1445 |
+
The Qwen2 Model transformer with a sequence classification head on top (linear layer).
|
| 1446 |
+
|
| 1447 |
+
[`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1448 |
+
(e.g. GPT-2) do.
|
| 1449 |
+
|
| 1450 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1451 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1452 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1453 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1454 |
+
each row of the batch).
|
| 1455 |
+
""",
|
| 1456 |
+
QWEN2_START_DOCSTRING,
|
| 1457 |
+
)
|
| 1458 |
+
class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
|
| 1459 |
+
def __init__(self, config):
|
| 1460 |
+
super().__init__(config)
|
| 1461 |
+
self.num_labels = config.num_labels
|
| 1462 |
+
self.model = Qwen2Model(config)
|
| 1463 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1464 |
+
|
| 1465 |
+
# Initialize weights and apply final processing
|
| 1466 |
+
self.post_init()
|
| 1467 |
+
|
| 1468 |
+
def get_input_embeddings(self):
|
| 1469 |
+
return self.model.embed_tokens
|
| 1470 |
+
|
| 1471 |
+
def set_input_embeddings(self, value):
|
| 1472 |
+
self.model.embed_tokens = value
|
| 1473 |
+
|
| 1474 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
| 1475 |
+
def forward(
|
| 1476 |
+
self,
|
| 1477 |
+
input_ids: torch.LongTensor = None,
|
| 1478 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1479 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1480 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1481 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1482 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1483 |
+
use_cache: Optional[bool] = None,
|
| 1484 |
+
output_attentions: Optional[bool] = None,
|
| 1485 |
+
output_hidden_states: Optional[bool] = None,
|
| 1486 |
+
return_dict: Optional[bool] = None,
|
| 1487 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1488 |
+
r"""
|
| 1489 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1490 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1491 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1492 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1493 |
+
"""
|
| 1494 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1495 |
+
|
| 1496 |
+
transformer_outputs = self.model(
|
| 1497 |
+
input_ids,
|
| 1498 |
+
attention_mask=attention_mask,
|
| 1499 |
+
position_ids=position_ids,
|
| 1500 |
+
past_key_values=past_key_values,
|
| 1501 |
+
inputs_embeds=inputs_embeds,
|
| 1502 |
+
use_cache=use_cache,
|
| 1503 |
+
output_attentions=output_attentions,
|
| 1504 |
+
output_hidden_states=output_hidden_states,
|
| 1505 |
+
return_dict=return_dict,
|
| 1506 |
+
)
|
| 1507 |
+
hidden_states = transformer_outputs[0]
|
| 1508 |
+
logits = self.score(hidden_states)
|
| 1509 |
+
|
| 1510 |
+
if input_ids is not None:
|
| 1511 |
+
batch_size = input_ids.shape[0]
|
| 1512 |
+
else:
|
| 1513 |
+
batch_size = inputs_embeds.shape[0]
|
| 1514 |
+
|
| 1515 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1516 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1517 |
+
if self.config.pad_token_id is None:
|
| 1518 |
+
sequence_lengths = -1
|
| 1519 |
+
else:
|
| 1520 |
+
if input_ids is not None:
|
| 1521 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
| 1522 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| 1523 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| 1524 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
| 1525 |
+
else:
|
| 1526 |
+
sequence_lengths = -1
|
| 1527 |
+
|
| 1528 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1529 |
+
|
| 1530 |
+
loss = None
|
| 1531 |
+
if labels is not None:
|
| 1532 |
+
labels = labels.to(logits.device)
|
| 1533 |
+
if self.config.problem_type is None:
|
| 1534 |
+
if self.num_labels == 1:
|
| 1535 |
+
self.config.problem_type = "regression"
|
| 1536 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1537 |
+
self.config.problem_type = "single_label_classification"
|
| 1538 |
+
else:
|
| 1539 |
+
self.config.problem_type = "multi_label_classification"
|
| 1540 |
+
|
| 1541 |
+
if self.config.problem_type == "regression":
|
| 1542 |
+
loss_fct = MSELoss()
|
| 1543 |
+
if self.num_labels == 1:
|
| 1544 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1545 |
+
else:
|
| 1546 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1547 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1548 |
+
loss_fct = CrossEntropyLoss()
|
| 1549 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1550 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1551 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1552 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1553 |
+
if not return_dict:
|
| 1554 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1555 |
+
return ((loss,) + output) if loss is not None else output
|
| 1556 |
+
|
| 1557 |
+
return SequenceClassifierOutputWithPast(
|
| 1558 |
+
loss=loss,
|
| 1559 |
+
logits=pooled_logits,
|
| 1560 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1561 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1562 |
+
attentions=transformer_outputs.attentions,
|
| 1563 |
+
)
|
faiss_attn/source/utils.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
|
| 3 |
+
def load_context(fpath="eval/needle/PaulGrahamEssays/*.txt", ctx_len=100000):
|
| 4 |
+
context = ""
|
| 5 |
+
for file in glob.glob(fpath):
|
| 6 |
+
with open(file, 'r') as f:
|
| 7 |
+
context += f.read()
|
| 8 |
+
LLAMA_CHAR_TO_TOKEN_RATIO = 3.66
|
| 9 |
+
context = context[: int(ctx_len * LLAMA_CHAR_TO_TOKEN_RATIO)]
|
| 10 |
+
return context
|
| 11 |
+
|
| 12 |
+
def insert_needle(context, needle, depth):
|
| 13 |
+
context = context.split(".")
|
| 14 |
+
c_len = len(context)
|
| 15 |
+
needle_place = int(depth * c_len)
|
| 16 |
+
context = ".".join(context[:needle_place]) + "." + needle + ".".join(context[needle_place:])
|
| 17 |
+
return context
|
head_score/.ipynb_checkpoints/qwen2_5_7b_de-checkpoint.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/phi_35_mini_inst_de.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/phi_35_mini_inst_en.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/phi_35_mini_inst_zh.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/qwen2_5_7b_de.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/qwen2_5_7b_en.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
head_score/qwen2_5_7b_zh.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 0.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 6.530974864959717, "test_timestamp_utc": "2025-06-30 09:41:56+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 0.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0521862506866455, "test_timestamp_utc": "2025-06-30 10:29:55+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_0_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 0.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 2.000372886657715, "test_timestamp_utc": "2025-06-30 11:05:43+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_10000_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 100.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.069295883178711, "test_timestamp_utc": "2025-06-30 10:30:22+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_10000_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 100.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 1.9967286586761475, "test_timestamp_utc": "2025-06-30 11:06:09+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_1100_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 11.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.057366132736206, "test_timestamp_utc": "2025-06-30 10:29:58+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_1100_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 11.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 2.003633499145508, "test_timestamp_utc": "2025-06-30 11:05:46+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 22.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.2842941284179688, "test_timestamp_utc": "2025-06-30 09:42:04+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 22.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0797903537750244, "test_timestamp_utc": "2025-06-30 10:30:01+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_2200_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 22.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 1.9966988563537598, "test_timestamp_utc": "2025-06-30 11:05:49+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_3300_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 33.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.3200223445892334, "test_timestamp_utc": "2025-06-30 09:42:08+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_3300_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 33.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0722362995147705, "test_timestamp_utc": "2025-06-30 10:30:04+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_4400_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 44.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0712997913360596, "test_timestamp_utc": "2025-06-30 10:30:07+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_4400_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 44.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 1.9991486072540283, "test_timestamp_utc": "2025-06-30 11:05:55+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_5600_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 56.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.3211605548858643, "test_timestamp_utc": "2025-06-30 09:42:17+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_6700_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 67.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.3110311031341553, "test_timestamp_utc": "2025-06-30 09:42:21+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_6700_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 67.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0753467082977295, "test_timestamp_utc": "2025-06-30 10:30:13+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 78.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.3182666301727295, "test_timestamp_utc": "2025-06-30 09:42:25+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 78.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0798654556274414, "test_timestamp_utc": "2025-06-30 10:30:16+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_7800_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 78.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 1.9971694946289062, "test_timestamp_utc": "2025-06-30 11:06:03+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 89.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 3.316357135772705, "test_timestamp_utc": "2025-06-30 09:42:29+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 89.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Am besten kann man abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 2.0711023807525635, "test_timestamp_utc": "2025-06-30 10:30:19+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_0_depth_8900_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 0, "depth_percent": 89.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "score": 100.0, "test_duration_seconds": 1.9935648441314697, "test_timestamp_utc": "2025-06-30 11:06:06+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_0_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 0.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der neue Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 94.44444444444444, "test_duration_seconds": 6.4423508644104, "test_timestamp_utc": "2025-06-30 09:45:52+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_0_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 0.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "Herrn Green wird von allen nicht gemocht, weil er als gemeiner Mensch beschrieben wird und au\u00dferdem nicht in der Lage ist, zu reiten oder ein Auto zu fahren. Diese Eigenschaften k\u00f6nnten als unangenehm oder unbehaglich f\u00fcr die anderen Personen wahrgenommen werden, weshalb er in der Gruppe un", "score": 80.0, "test_duration_seconds": 7.826849699020386, "test_timestamp_utc": "2025-06-30 11:09:01+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_10000_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 100.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht der Weltmeteorologischen Organisation (WMO) zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden. Der Begriff \u201eKunststoff\u201c wird", "score": 100.0, "test_duration_seconds": 7.850459337234497, "test_timestamp_utc": "2025-06-30 09:47:02+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 11.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 100.0, "test_duration_seconds": 6.551539421081543, "test_timestamp_utc": "2025-06-30 09:45:59+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_1_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 11.0, "version": 1, "needle": "Das Beste, was man in Peking tun kann, ist, abends einen Spaziergang im Chaoyang-Park zu machen und eine Tasse Espresso zu trinken.", "model_response": "Abends einen Spaziergang im Chaoyang-Park machen und eine Tasse Espresso trinken.", "score": 85.71428571428571, "test_duration_seconds": 3.7770957946777344, "test_timestamp_utc": "2025-06-30 10:32:59+0000", "ni": 1}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_1100_2_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 11.0, "version": 1, "needle": "Herr Green wird von allen nicht gemocht, weil er ein gemeiner Mensch ist und au\u00dferdem weder reiten noch Auto fahren kann.", "model_response": "The text does not provide a specific reason why nobody likes Mr. Green. It only states that \"Herr Green is not liked by all.\" Without further context or details, it's impossible to determine why this is the case.", "score": 0.0, "test_duration_seconds": 5.5665223598480225, "test_timestamp_utc": "2025-06-30 11:09:08+0000", "ni": 2}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_2200_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 22.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Rekorde bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung", "score": 44.44444444444444, "test_duration_seconds": 3.9250869750976562, "test_timestamp_utc": "2025-06-30 09:46:04+0000", "ni": 0}
|
results/graph/phi_35_mini_inst_de/Phi-3_5-mini-instruct_len_10526_depth_3300_0_results.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"model": "/scratch/spp9399/LLMS/Phi-3.5-mini-instruct", "context_length": 10526, "depth_percent": 33.0, "version": 1, "needle": "Ein neuer Bericht der WMO zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "model_response": "Der Bericht der Weltorganisation f\u00fcr Meteorologie (WMO) zeigt, dass bei Treibhausgasemissionen, Oberfl\u00e4chentemperaturen, Meereserw\u00e4rmung und Versauerung erneut Rekorde gebrochen und in einigen F\u00e4llen sogar \u00fcbertroffen wurden.", "score": 100.0, "test_duration_seconds": 7.012622356414795, "test_timestamp_utc": "2025-06-30 09:46:12+0000", "ni": 0}
|