Commit
·
b30c1d8
1
Parent(s):
7fa4822
This view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -35
- LICENSE +3 -0
- README.md +256 -3
- cog.yaml +3 -0
- config.json +3 -0
- dataset/Objaverse/8192_npy.zip +3 -0
- dataset/Objaverse/PointLLM_brief_description_val_200_GT.json +3 -0
- dataset/Objaverse/PointLLM_complex_50k_brief_40k_all_90k.json +3 -0
- dataset/T3D/stage_1/brief_1M_caption.json +3 -0
- dataset/T3D/stage_2/stage_2_data_210k.json +3 -0
- dataset/T3D/stage_2/stage_2_data_5M.json +3 -0
- dataset/modelnet40_data/modelnet40_test_8192pts_fps.dat +3 -0
- envInstall.sh +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/.gitattributes +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/CODE_OF_CONDUCT.md +9 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/LICENSE +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/NOTICE.md +38 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/README.md +256 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/SECURITY.md +41 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/added_tokens.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/config.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/configuration_phi3.py +213 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/generation_config.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model-00001-of-00002.safetensors +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model-00002-of-00002.safetensors +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model.safetensors.index.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/modeling_phi3.py +1606 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/sample_finetune.py +217 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/special_tokens_map.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer.json +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer.model +3 -0
- lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer_config.json +3 -0
- llava/__init__.py +1 -0
- llava/__pycache__/__init__.cpython-310.pyc +3 -0
- llava/__pycache__/constants.cpython-310.pyc +3 -0
- llava/__pycache__/conversation.cpython-310.pyc +3 -0
- llava/__pycache__/mm_utils.cpython-310.pyc +3 -0
- llava/bpe_simple_vocab_16e6.txt.gz +3 -0
- llava/constants.py +14 -0
- llava/conversation.py +422 -0
- llava/eval/eval_gpt_review.py +113 -0
- llava/eval/eval_gpt_review_bench.py +121 -0
- llava/eval/eval_gpt_review_visual.py +118 -0
- llava/eval/eval_pope.py +81 -0
- llava/eval/eval_science_qa.py +114 -0
- llava/eval/eval_science_qa_gpt4.py +104 -0
- llava/eval/eval_science_qa_gpt4_requery.py +149 -0
- llava/eval/eval_textvqa.py +65 -0
- llava/eval/generate_webpage_data_from_table.py +111 -0
- llava/eval/m4c_evaluator.py +334 -0
.gitattributes
CHANGED
@@ -1,35 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8628eb71bc7c80d0709a04c69c25570f006ef9ca0dbb27bf2b1a3be74605edda
|
3 |
+
size 1619
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4
|
3 |
+
size 11357
|
README.md
CHANGED
@@ -1,3 +1,256 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<h1 align="center"><strong>More Text, Less Point: Towards 3D Data-Efficient Point-Language Understanding</strong></h1>
|
2 |
+
<p align="center">
|
3 |
+
Yuan Tang*  Xu Han*  Xianzhi Li<sup>✝</sup>  Qiao Yu  Jinfeng Xu  Yixue Hao  Long Hu  Min Chen
|
4 |
+
<br>
|
5 |
+
Huazhong University of Science and Technology South China University of Technology
|
6 |
+
</p>
|
7 |
+
</p>
|
8 |
+
|
9 |
+
<p align="center">
|
10 |
+
<a><strong>AAAI 2025 </strong></a>
|
11 |
+
<a href='https://arxiv.org/pdf/2408.15966'><img src='https://img.shields.io/badge/Paper-Arxiv-red'></a>
|
12 |
+
<a href='https://huggingface.co/YuanTang96/GreenPLM'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a>
|
13 |
+
</p>
|
14 |
+
|
15 |
+
|
16 |
+
<!-- contents with emoji -->
|
17 |
+
## 📋 Contents
|
18 |
+
|
19 |
+
- [🔍 Overview](#-overview)
|
20 |
+
- [📦 Training and Evaluation](#-Training-and-Evaluation)
|
21 |
+
- [🔗 Citation](#-citation)
|
22 |
+
- [📄 License](#-license)
|
23 |
+
- [📚 Related Work](#-related-work)
|
24 |
+
- [👏 Acknowledgements](#-acknowledgements)
|
25 |
+
|
26 |
+
## 🔍 Overview
|
27 |
+

|
28 |
+
|
29 |
+

|
30 |
+
|
31 |
+
- We introduce a new task of 3D data-efficient point-language understanding, aiming to enable LLMs to achieve robust 3D understanding with minimal 3D data.
|
32 |
+
- We propose GreenPLM to tackle this 3D data-limited task from a novel perspective, enhancing point-LLM alignment with more free-text data.
|
33 |
+
- we introduce a 6M T3D dataset, design a 3-stage training strategy, and present a 0M-Pooling module for token pooling.
|
34 |
+
- We introduce the Accuracy-to-3D-Data Ratio (A3DR) to measure the efficiency of 3D data usage and establish an evaluation benchmark based on open-source LLMs.
|
35 |
+
- GreenPLM outperforms previous models using only 12\% of 3D data and even surpasses GPT4Point (660K 3D data) using only text, demonstrating superior 3D data efficiency.
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
## 📦 Training-and-Evaluation
|
40 |
+
|
41 |
+
### Download project
|
42 |
+
The **code, weights, and dataset** of the project have already been uploaded to [Hugging Face](https://huggingface.co/YuanTang96/GreenPLM). Simply download them once to get started with the project.
|
43 |
+
|
44 |
+
### Install Environment
|
45 |
+
Enter the project directory and execute the following command:
|
46 |
+
```bash
|
47 |
+
conda create -n greenplm python=3.10 -y
|
48 |
+
conda activate greenplm
|
49 |
+
bash envInstall.sh
|
50 |
+
```
|
51 |
+
|
52 |
+
### Project Directory Introduction
|
53 |
+
- `./greenplm/release` contains the paper's weights, training scripts, and testing scripts.
|
54 |
+
- `./pretrained_weight` stores the pre-trained weights required for the training and testing phases of the project.
|
55 |
+
- `./lava-vicuna_2024_4_Phi-3-mini-4k-instruct` is the weight directory for Phi-3.
|
56 |
+
- `./dataset/T3D` is the 6M dataset proposed in this project.
|
57 |
+
- `./dataset/T3D/stage_1/brief_1M_caption.json` is the dataset for Stage I.
|
58 |
+
- `./dataset/T3D/stage_2/stage_2_data_210k.json` is the dataset for Stage II.
|
59 |
+
|
60 |
+
### Dataset Preparation
|
61 |
+
|
62 |
+
`./dataset/Objaverse/8192_npy.zip` contains the point cloud data from Objaverse that is required for this project. To unzip the dataset:
|
63 |
+
|
64 |
+
```bash
|
65 |
+
unzip ./dataset/Objaverse/8192_npy.zip -d ./dataset/Objaverse/
|
66 |
+
```
|
67 |
+
|
68 |
+
### Inference
|
69 |
+
|
70 |
+
#### Paper Weights
|
71 |
+
##### GreenPLM-0
|
72 |
+
The model trained only on text data, i.e., (Stage I & Stage II).
|
73 |
+
|
74 |
+
```bash
|
75 |
+
bash ./release/paper/scripts/test/release_stage_2.sh
|
76 |
+
```
|
77 |
+
The output JSON results are saved in `./release/paper/result_json/stage_2`.
|
78 |
+
|
79 |
+
##### GreenPLM
|
80 |
+
The model trained on a small amount of 3D data, i.e., (Stage I & Stage II & Stage III).
|
81 |
+
|
82 |
+
```bash
|
83 |
+
bash ./release/paper/scripts/test/release_stage_3.sh
|
84 |
+
```
|
85 |
+
The output JSON results are saved in `./release/paper/result_json/stage_3`.
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
#### Weights Using All T3D Dataset
|
90 |
+
<details>
|
91 |
+
<summary>We also provide weights trained using the entire T3D dataset, meaning we use 5M data points from T3D in Stage II, instead of just 210k as in our paper. (click to expand)</summary>
|
92 |
+
|
93 |
+
##### GreenPLM-0
|
94 |
+
The model trained only on text data, i.e., (Stage I & Stage II).
|
95 |
+
|
96 |
+
```bash
|
97 |
+
bash ./release/5M_data_seting/scripts/test/release_5M_stage_2.sh
|
98 |
+
```
|
99 |
+
The output JSON results are saved in `./release/5M_data_seting/result_json/stage_2`.
|
100 |
+
|
101 |
+
##### GreenPLM
|
102 |
+
The model trained on a small amount of 3D data, i.e., (Stage I & Stage II & Stage III).
|
103 |
+
|
104 |
+
```bash
|
105 |
+
bash ./release/5M_data_seting/scripts/test/release_5M_stage_3.sh
|
106 |
+
```
|
107 |
+
The output JSON results are saved in `./release/5M_data_seting/result_json/stage_3`.
|
108 |
+
|
109 |
+
</details>
|
110 |
+
|
111 |
+
|
112 |
+
### Evaluation
|
113 |
+
#### Using LLM
|
114 |
+
|
115 |
+
- You can get the **DASHSCOPE_API_KEY** from [aliyun](https://bailian.console.aliyun.com/?apiKey=1#/api-key). The evaluation may require 9 CNY (~ 1.3 USD).
|
116 |
+
- If you have enough GPU resources, you can also build your own Qwen2-72B-Instruct service, following the [Qwen2](https://github.com/QwenLM/Qwen2?tab=readme-ov-file). Then evaluate the results for free!
|
117 |
+
|
118 |
+
1. Evaluate the open vocabulary classification on objaverse
|
119 |
+
```bash
|
120 |
+
export PYTHONPATH=$PWD
|
121 |
+
export DASHSCOPE_API_KEY=sk-xxx
|
122 |
+
python ./pointllm/eval/evaluator_opensource_llm_QwenAPI.py \
|
123 |
+
--results_path /path/to/evaluation/PointLLM_brief_description_val_200_GT_Objaverse_classification_prompt0.json \
|
124 |
+
--eval_type open-free-form-classification \
|
125 |
+
--model_type qwen2-72b-instruct \
|
126 |
+
--parallel --num_workers 4
|
127 |
+
```
|
128 |
+
|
129 |
+
```bash
|
130 |
+
export PYTHONPATH=$PWD
|
131 |
+
export DASHSCOPE_API_KEY=sk-xxx
|
132 |
+
python ./pointllm/eval/evaluator_opensource_llm_QwenAPI.py \
|
133 |
+
--results_path /path/to/evaluation/PointLLM_brief_description_val_200_GT_Objaverse_classification_prompt1.json \
|
134 |
+
--eval_type open-free-form-classification \
|
135 |
+
--model_type qwen2-72b-instruct \
|
136 |
+
--parallel --num_workers 4
|
137 |
+
```
|
138 |
+
|
139 |
+
2. Evaluate the close-set zero-shot classification on ModelNet40
|
140 |
+
|
141 |
+
```bash
|
142 |
+
export PYTHONPATH=$PWD
|
143 |
+
export DASHSCOPE_API_KEY=sk-xxx
|
144 |
+
python ./pointllm/eval/evaluator_opensource_llm_QwenAPI.py \
|
145 |
+
--results_path /path/to/evaluation/ModelNet_classification_prompt0.json \
|
146 |
+
--eval_type modelnet-close-set-classification \
|
147 |
+
--model_type qwen2-72b-instruct \
|
148 |
+
--parallel --num_workers 4
|
149 |
+
```
|
150 |
+
|
151 |
+
```bash
|
152 |
+
export PYTHONPATH=$PWD
|
153 |
+
export DASHSCOPE_API_KEY=sk-xxx
|
154 |
+
python ./pointllm/eval/evaluator_opensource_llm_QwenAPI.py \
|
155 |
+
--results_path /path/to/evaluation/ModelNet_classification_prompt1.json \
|
156 |
+
--eval_type modelnet-close-set-classification \
|
157 |
+
--model_type qwen2-72b-instruct \
|
158 |
+
--parallel --num_workers 4
|
159 |
+
```
|
160 |
+
|
161 |
+
3. Evaluate the object captioning on objaverse
|
162 |
+
|
163 |
+
```bash
|
164 |
+
export PYTHONPATH=$PWD
|
165 |
+
export DASHSCOPE_API_KEY=sk-xxx
|
166 |
+
python ./pointllm/eval/evaluator_opensource_llm_QwenAPI.py \
|
167 |
+
--results_path /path/to/evaluation/PointLLM_brief_description_val_200_GT_Objaverse_captioning_prompt2.json \
|
168 |
+
--eval_type object-captioning \
|
169 |
+
--model_type qwen2-72b-instruct \
|
170 |
+
--parallel --num_workers 4
|
171 |
+
```
|
172 |
+
|
173 |
+
#### Traditional Metric Evaluation
|
174 |
+
For the object captioning task, run the following command to evaluate model outputs with traditional metrics Sentence-BERT and SimCSE.
|
175 |
+
|
176 |
+
```bash
|
177 |
+
CUDA_VISIBLE_DEVICES=0 python pointllm/eval/traditional_evaluator.py --results_path /path/to/evaluation/PointLLM_brief_description_val_200_GT_Objaverse_captioning_prompt2.json
|
178 |
+
```
|
179 |
+
|
180 |
+
|
181 |
+
## Training
|
182 |
+
|
183 |
+
**Stage I**
|
184 |
+
```bash
|
185 |
+
bash ./release/paper/scripts/train/1.sh
|
186 |
+
```
|
187 |
+
|
188 |
+
**Stage II**: GreenPLM-0
|
189 |
+
```bash
|
190 |
+
bash ./release/paper/scripts/train/2.sh
|
191 |
+
```
|
192 |
+
|
193 |
+
**Stage III**: GreenPLM
|
194 |
+
```bash
|
195 |
+
bash ./release/paper/scripts/train/3.sh
|
196 |
+
```
|
197 |
+
|
198 |
+
<details>
|
199 |
+
<summary>We also provide training scripts using the entire T3D dataset, meaning we use 5M data from T3D in Stage II, instead of just 210k as in our paper. (click to expand)</summary>
|
200 |
+
|
201 |
+
**Stage II**: GreenPLM-0
|
202 |
+
```bash
|
203 |
+
bash ./release/5M_data_seting/scripts/train/2.sh
|
204 |
+
```
|
205 |
+
|
206 |
+
**Stage III**: GreenPLM
|
207 |
+
```bash
|
208 |
+
bash ./release/5M_data_seting/scripts/train/3.sh
|
209 |
+
```
|
210 |
+
|
211 |
+
</details>
|
212 |
+
|
213 |
+
**Note**: You can modify the `--output_dir` argument in the scripts to set the output directory for the trained weights.
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
## 🔗 Citation
|
228 |
+
If you find our work helpful, please consider citing:
|
229 |
+
```bibtex
|
230 |
+
@inproceedings{tang2025more,
|
231 |
+
title={More text, less point: Towards 3d data-efficient point-language understanding},
|
232 |
+
author={Tang, Yuan and Han, Xu and Li, Xianzhi and Yu, Qiao and Xu, Jinfeng and Hao, Yixue and Hu, Long and Chen, Min},
|
233 |
+
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
|
234 |
+
volume={39},
|
235 |
+
number={7},
|
236 |
+
pages={7284--7292},
|
237 |
+
year={2025}
|
238 |
+
}
|
239 |
+
```
|
240 |
+
|
241 |
+
## 📄 License
|
242 |
+
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png" /></a>
|
243 |
+
<br />
|
244 |
+
This work is under the <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
|
245 |
+
|
246 |
+
## 📚 Related Work
|
247 |
+
Together, Let's make LLM for 3D great!
|
248 |
+
- [Point-Bind & Point-LLM](https://arxiv.org/abs/2309.00615): aligns point clouds with Image-Bind to reason multi-modality input without 3D-instruction data training.
|
249 |
+
- [3D-LLM](https://arxiv.org/abs/2307.12981): employs 2D foundation models to encode multi-view images of 3D point clouds.
|
250 |
+
- [PointLLM](https://arxiv.org/abs/2308.16911): employs 3D point clouds with LLaVA.
|
251 |
+
- [ShapeLLM](http://arxiv.org/abs/2402.17766): combines a powerful point cloud encoder with LLM for embodied scenes.
|
252 |
+
- [MiniGPT-3D](https://arxiv.org/pdf/2405.01413) : takes the first step toward efficient 3D-LLM, requiring only a single RTX 3090 GPU and one day of training time.
|
253 |
+
|
254 |
+
|
255 |
+
## 👏 Acknowledgements
|
256 |
+
We would like to thank the authors of [PointLLM](https://github.com/OpenRobotLab/PointLLM), [Uni3D](https://github.com/baaivision/Uni3D), [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct), and [LLaVA-pp](https://github.com/mbzuai-oryx/LLaVA-pp) for their great works and repos.
|
cog.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e090913eb14a1290a253f8b3ecbd1f6bc24fb1c373afefbbf7896446961953c1
|
3 |
+
size 981
|
config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63700c458c2b4e1b461cef0f51ff7604c8d024e0edbe36c8b8419c23bb6bac2d
|
3 |
+
size 71
|
dataset/Objaverse/8192_npy.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf2bb1d3c7ff97d8d5bc62e5b623231adffc23e542a0e35ed8dd191fbd0dc542
|
3 |
+
size 6352316008
|
dataset/Objaverse/PointLLM_brief_description_val_200_GT.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc0dabd9767a574f8acf22a6f1791689c567e49102b275e92c8e0bd083327955
|
3 |
+
size 65315
|
dataset/Objaverse/PointLLM_complex_50k_brief_40k_all_90k.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8543396bfd46dbb3bce4ea67907fb3b68e5cbf2aa0cb1cc03b4c413ccdb8f48
|
3 |
+
size 43683846
|
dataset/T3D/stage_1/brief_1M_caption.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cf3a03d17a06d14488d6a6234de1fbc5c0970811b53ac88ed4b02a607459713
|
3 |
+
size 646557180
|
dataset/T3D/stage_2/stage_2_data_210k.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3565382fa139d4e8aca746f33aa74592c99f990a1a1d4dd141ab3571afe72a1
|
3 |
+
size 168421688
|
dataset/T3D/stage_2/stage_2_data_5M.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:268913bd44e0be2d58177713b6740e4cd2b2054e410d4479643fadff5f1c2c04
|
3 |
+
size 3966817060
|
dataset/modelnet40_data/modelnet40_test_8192pts_fps.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d85b2287a683bbb53842e712db1f77f5e772c371868381726d2541e33bd5cf87
|
3 |
+
size 485526630
|
envInstall.sh
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be91e45733f958b25c51e976a9260f114b5b6614c8948ddd9cb70589b8eca071
|
3 |
+
size 212
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/.gitattributes
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11ad7efa24975ee4b0c3c3a38ed18737f0658a5f75a0a96787b576a78a023361
|
3 |
+
size 1519
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Microsoft Open Source Code of Conduct
|
2 |
+
|
3 |
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
4 |
+
|
5 |
+
Resources:
|
6 |
+
|
7 |
+
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
8 |
+
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
9 |
+
- Contact [[email protected]](mailto:[email protected]) with questions or concerns
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/LICENSE
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa8235e5b48faca34e3ca98cf4f694ef08bd216d28b58071a1f85b1d50cb814d
|
3 |
+
size 1084
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/NOTICE.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NOTICES AND INFORMATION
|
2 |
+
Do Not Translate or Localize
|
3 |
+
|
4 |
+
This software incorporates material from third parties.
|
5 |
+
|
6 |
+
**Component.** https://github.com/Dao-AILab/flash-attention
|
7 |
+
|
8 |
+
**Open Source License/Copyright Notice.**
|
9 |
+
|
10 |
+
BSD 3-Clause License
|
11 |
+
|
12 |
+
Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
|
13 |
+
All rights reserved.
|
14 |
+
|
15 |
+
Redistribution and use in source and binary forms, with or without
|
16 |
+
modification, are permitted provided that the following conditions are met:
|
17 |
+
|
18 |
+
* Redistributions of source code must retain the above copyright notice, this
|
19 |
+
list of conditions and the following disclaimer.
|
20 |
+
|
21 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
22 |
+
this list of conditions and the following disclaimer in the documentation
|
23 |
+
and/or other materials provided with the distribution.
|
24 |
+
|
25 |
+
* Neither the name of the copyright holder nor the names of its
|
26 |
+
contributors may be used to endorse or promote products derived from
|
27 |
+
this software without specific prior written permission.
|
28 |
+
|
29 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
30 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
31 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
32 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
33 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
34 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
35 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
36 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
37 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
38 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/README.md
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
license_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE
|
4 |
+
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
pipeline_tag: text-generation
|
8 |
+
tags:
|
9 |
+
- nlp
|
10 |
+
- code
|
11 |
+
inference:
|
12 |
+
parameters:
|
13 |
+
temperature: 0.7
|
14 |
+
widget:
|
15 |
+
- messages:
|
16 |
+
- role: user
|
17 |
+
content: Can you provide ways to eat combinations of bananas and dragonfruits?
|
18 |
+
---
|
19 |
+
|
20 |
+
## Model Summary
|
21 |
+
|
22 |
+
The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.
|
23 |
+
The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support.
|
24 |
+
|
25 |
+
The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures.
|
26 |
+
When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters.
|
27 |
+
|
28 |
+
Resources and Technical Documentation:
|
29 |
+
|
30 |
+
+ [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april)
|
31 |
+
+ [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)
|
32 |
+
+ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)
|
33 |
+
+ Phi-3 GGUF: [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf)
|
34 |
+
+ Phi-3 ONNX: [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx)
|
35 |
+
|
36 |
+
## Intended Uses
|
37 |
+
|
38 |
+
**Primary use cases**
|
39 |
+
|
40 |
+
The model is intended for commercial and research use in English. The model provides uses for applications which require:
|
41 |
+
|
42 |
+
1) Memory/compute constrained environments
|
43 |
+
2) Latency bound scenarios
|
44 |
+
3) Strong reasoning (especially code, math and logic)
|
45 |
+
|
46 |
+
Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features.
|
47 |
+
|
48 |
+
**Use case considerations**
|
49 |
+
|
50 |
+
Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.
|
51 |
+
|
52 |
+
Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.
|
53 |
+
|
54 |
+
## How to Use
|
55 |
+
|
56 |
+
Phi-3 Mini-4K-Instruct has been integrated in the development version (4.41.0.dev0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:
|
57 |
+
|
58 |
+
* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.
|
59 |
+
|
60 |
+
* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.
|
61 |
+
|
62 |
+
The current `transformers` version can be verified with: `pip list | grep transformers`.
|
63 |
+
|
64 |
+
Phi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat).
|
65 |
+
|
66 |
+
### Tokenizer
|
67 |
+
|
68 |
+
Phi-3 Mini-4K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.
|
69 |
+
|
70 |
+
### Chat Format
|
71 |
+
|
72 |
+
Given the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows.
|
73 |
+
You can provide the prompt as a question with a generic template as follow:
|
74 |
+
```markdown
|
75 |
+
<|user|>\nQuestion <|end|>\n<|assistant|>
|
76 |
+
```
|
77 |
+
For example:
|
78 |
+
```markdown
|
79 |
+
<|user|>
|
80 |
+
How to explain Internet for a medieval knight?<|end|>
|
81 |
+
<|assistant|>
|
82 |
+
```
|
83 |
+
|
84 |
+
where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:
|
85 |
+
|
86 |
+
```markdown
|
87 |
+
<|user|>
|
88 |
+
I am going to Paris, what should I see?<|end|>
|
89 |
+
<|assistant|>
|
90 |
+
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|>
|
91 |
+
<|user|>
|
92 |
+
What is so great about #1?<|end|>
|
93 |
+
<|assistant|>
|
94 |
+
```
|
95 |
+
|
96 |
+
### Sample inference code
|
97 |
+
|
98 |
+
This code snippets show how to get quickly started with running the model on a GPU:
|
99 |
+
|
100 |
+
```python
|
101 |
+
import torch
|
102 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
103 |
+
|
104 |
+
torch.random.manual_seed(0)
|
105 |
+
|
106 |
+
model = AutoModelForCausalLM.from_pretrained(
|
107 |
+
"microsoft/Phi-3-mini-4k-instruct",
|
108 |
+
device_map="cuda",
|
109 |
+
torch_dtype="auto",
|
110 |
+
trust_remote_code=True,
|
111 |
+
)
|
112 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
113 |
+
|
114 |
+
messages = [
|
115 |
+
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
|
116 |
+
{"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
|
117 |
+
{"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
|
118 |
+
]
|
119 |
+
|
120 |
+
pipe = pipeline(
|
121 |
+
"text-generation",
|
122 |
+
model=model,
|
123 |
+
tokenizer=tokenizer,
|
124 |
+
)
|
125 |
+
|
126 |
+
generation_args = {
|
127 |
+
"max_new_tokens": 500,
|
128 |
+
"return_full_text": False,
|
129 |
+
"temperature": 0.0,
|
130 |
+
"do_sample": False,
|
131 |
+
}
|
132 |
+
|
133 |
+
output = pipe(messages, **generation_args)
|
134 |
+
print(output[0]['generated_text'])
|
135 |
+
```
|
136 |
+
|
137 |
+
*Some applications/frameworks might not include a BOS token (`<s>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.*
|
138 |
+
|
139 |
+
## Responsible AI Considerations
|
140 |
+
|
141 |
+
Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:
|
142 |
+
|
143 |
+
+ Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English.
|
144 |
+
+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.
|
145 |
+
+ Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case.
|
146 |
+
+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.
|
147 |
+
+ Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses.
|
148 |
+
|
149 |
+
Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:
|
150 |
+
|
151 |
+
+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.
|
152 |
+
+ High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.
|
153 |
+
+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).
|
154 |
+
+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.
|
155 |
+
+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.
|
156 |
+
|
157 |
+
|
158 |
+
## Training
|
159 |
+
|
160 |
+
### Model
|
161 |
+
|
162 |
+
* Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.
|
163 |
+
* Inputs: Text. It is best suited for prompts using chat format.
|
164 |
+
* Context length: 4K tokens
|
165 |
+
* GPUs: 512 H100-80G
|
166 |
+
* Training time: 7 days
|
167 |
+
* Training data: 3.3T tokens
|
168 |
+
* Outputs: Generated text in response to the input
|
169 |
+
* Dates: Our models were trained between February and April 2024
|
170 |
+
* Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.
|
171 |
+
|
172 |
+
### Datasets
|
173 |
+
|
174 |
+
Our training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of
|
175 |
+
1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code;
|
176 |
+
2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.);
|
177 |
+
3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.
|
178 |
+
|
179 |
+
### Fine-tuning
|
180 |
+
|
181 |
+
A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py).
|
182 |
+
|
183 |
+
## Benchmarks
|
184 |
+
|
185 |
+
We report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5.
|
186 |
+
|
187 |
+
All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.
|
188 |
+
|
189 |
+
As is now standard, we use few-shot prompts to evaluate the models, at temperature 0.
|
190 |
+
The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.
|
191 |
+
More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.
|
192 |
+
|
193 |
+
The number of k–shot examples is listed per-benchmark.
|
194 |
+
|
195 |
+
| | Phi-3-Mini-4K-In<br>3.8b | Phi-3-Small<br>7b (preview) | Phi-3-Medium<br>14b (preview) | Phi-2<br>2.7b | Mistral<br>7b | Gemma<br>7b | Llama-3-In<br>8b | Mixtral<br>8x7b | GPT-3.5<br>version 1106 |
|
196 |
+
|---|---|---|---|---|---|---|---|---|---|
|
197 |
+
| MMLU <br>5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 |
|
198 |
+
| HellaSwag <br> 5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 |
|
199 |
+
| ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 |
|
200 |
+
| GSM-8K <br> 0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 |
|
201 |
+
| MedQA <br> 2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 |
|
202 |
+
| AGIEval <br> 0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 |
|
203 |
+
| TriviaQA <br> 5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 |
|
204 |
+
| Arc-C <br> 10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 |
|
205 |
+
| Arc-E <br> 10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 |
|
206 |
+
| PIQA <br> 5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 |
|
207 |
+
| SociQA <br> 5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 |
|
208 |
+
| BigBench-Hard <br> 0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 |
|
209 |
+
| WinoGrande <br> 5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 |
|
210 |
+
| OpenBookQA <br> 10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 |
|
211 |
+
| BoolQ <br> 0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 |
|
212 |
+
| CommonSenseQA <br> 10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 |
|
213 |
+
| TruthfulQA <br> 10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 |
|
214 |
+
| HumanEval <br> 0-Shot | 59.1 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 |
|
215 |
+
| MBPP <br> 3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 |
|
216 |
+
|
217 |
+
## Software
|
218 |
+
|
219 |
+
* [PyTorch](https://github.com/pytorch/pytorch)
|
220 |
+
* [DeepSpeed](https://github.com/microsoft/DeepSpeed)
|
221 |
+
* [Transformers](https://github.com/huggingface/transformers)
|
222 |
+
* [Flash-Attention](https://github.com/HazyResearch/flash-attention)
|
223 |
+
|
224 |
+
## Hardware
|
225 |
+
Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:
|
226 |
+
* NVIDIA A100
|
227 |
+
* NVIDIA A6000
|
228 |
+
* NVIDIA H100
|
229 |
+
|
230 |
+
If you want to run the model on:
|
231 |
+
* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager"
|
232 |
+
* CPU: use the **GGUF** quantized models [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf)
|
233 |
+
+ Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx)
|
234 |
+
|
235 |
+
|
236 |
+
## Cross Platform Support
|
237 |
+
|
238 |
+
ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx).
|
239 |
+
|
240 |
+
Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs.
|
241 |
+
Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile.
|
242 |
+
|
243 |
+
Here are some of the optimized configurations we have added:
|
244 |
+
|
245 |
+
1. ONNX models for int4 DML: Quantized to int4 via AWQ
|
246 |
+
2. ONNX model for fp16 CUDA
|
247 |
+
3. ONNX model for int4 CUDA: Quantized to int4 via RTN
|
248 |
+
4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN
|
249 |
+
|
250 |
+
## License
|
251 |
+
|
252 |
+
The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE).
|
253 |
+
|
254 |
+
## Trademarks
|
255 |
+
|
256 |
+
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/SECURITY.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
|
2 |
+
|
3 |
+
## Security
|
4 |
+
|
5 |
+
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
|
6 |
+
|
7 |
+
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
|
8 |
+
|
9 |
+
## Reporting Security Issues
|
10 |
+
|
11 |
+
**Please do not report security vulnerabilities through public GitHub issues.**
|
12 |
+
|
13 |
+
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
|
14 |
+
|
15 |
+
If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
|
16 |
+
|
17 |
+
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
|
18 |
+
|
19 |
+
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
20 |
+
|
21 |
+
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
22 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
23 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
24 |
+
* Any special configuration required to reproduce the issue
|
25 |
+
* Step-by-step instructions to reproduce the issue
|
26 |
+
* Proof-of-concept or exploit code (if possible)
|
27 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
28 |
+
|
29 |
+
This information will help us triage your report more quickly.
|
30 |
+
|
31 |
+
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
|
32 |
+
|
33 |
+
## Preferred Languages
|
34 |
+
|
35 |
+
We prefer all communications to be in English.
|
36 |
+
|
37 |
+
## Policy
|
38 |
+
|
39 |
+
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
|
40 |
+
|
41 |
+
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f5b652d997cf841b1a79f2557e235b33140006dac569e76edb8ab3437d138c3
|
3 |
+
size 293
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d81d79531772a0cafe4615997c659cbf53b831622daed49494b4a6937eef9dd3
|
3 |
+
size 904
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/configuration_phi3.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
""" Phi-3 model configuration"""
|
17 |
+
|
18 |
+
|
19 |
+
from transformers.configuration_utils import PretrainedConfig
|
20 |
+
from transformers.utils import logging
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
26 |
+
"microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json",
|
27 |
+
"microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json",
|
28 |
+
}
|
29 |
+
|
30 |
+
|
31 |
+
class Phi3Config(PretrainedConfig):
|
32 |
+
r"""
|
33 |
+
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
|
34 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
35 |
+
defaults will yield a similar configuration to that of the
|
36 |
+
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
|
37 |
+
|
38 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
39 |
+
documentation from [`PretrainedConfig`] for more information.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
vocab_size (`int`, *optional*, defaults to 32064):
|
43 |
+
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
|
44 |
+
`inputs_ids` passed when calling [`Phi3Model`].
|
45 |
+
hidden_size (`int`, *optional*, defaults to 3072):
|
46 |
+
Dimension of the hidden representations.
|
47 |
+
intermediate_size (`int`, *optional*, defaults to 8192):
|
48 |
+
Dimension of the MLP representations.
|
49 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
50 |
+
Number of hidden layers in the Transformer decoder.
|
51 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
52 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
53 |
+
num_key_value_heads (`int`, *optional*):
|
54 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
55 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
56 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
57 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
58 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
59 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
60 |
+
`num_attention_heads`.
|
61 |
+
resid_pdrop (`float`, *optional*, defaults to 0.0):
|
62 |
+
Dropout probability for mlp outputs.
|
63 |
+
embd_pdrop (`int`, *optional*, defaults to 0.0):
|
64 |
+
The dropout ratio for the embeddings.
|
65 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
66 |
+
The dropout ratio after computing the attention scores.
|
67 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
68 |
+
The non-linear activation function (function or string) in the decoder.
|
69 |
+
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
70 |
+
The maximum sequence length that this model might ever be used with.
|
71 |
+
original_max_position_embeddings (`int`, *optional*, defaults to 4096):
|
72 |
+
The maximum sequence length that this model was trained with. This is used to determine the size of the
|
73 |
+
original RoPE embeddings when using long scaling.
|
74 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
75 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
76 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
77 |
+
The epsilon value used for the RMSNorm.
|
78 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
79 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
80 |
+
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
|
81 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
82 |
+
Whether to tie weight embeddings
|
83 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
84 |
+
The base period of the RoPE embeddings.
|
85 |
+
rope_scaling (`dict`, *optional*):
|
86 |
+
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
|
87 |
+
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
|
88 |
+
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
|
89 |
+
divided by the number of attention heads divided by 2.
|
90 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
91 |
+
The id of the "beginning-of-sequence" token.
|
92 |
+
eos_token_id (`int`, *optional*, defaults to 32000):
|
93 |
+
The id of the "end-of-sequence" token.
|
94 |
+
pad_token_id (`int`, *optional*, defaults to 32000):
|
95 |
+
The id of the padding token.
|
96 |
+
sliding_window (`int`, *optional*):
|
97 |
+
Sliding window attention window size. If `None`, no sliding window is applied.
|
98 |
+
|
99 |
+
Example:
|
100 |
+
|
101 |
+
```python
|
102 |
+
>>> from transformers import Phi3Model, Phi3Config
|
103 |
+
|
104 |
+
>>> # Initializing a Phi-3 style configuration
|
105 |
+
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
106 |
+
|
107 |
+
>>> # Initializing a model from the configuration
|
108 |
+
>>> model = Phi3Model(configuration)
|
109 |
+
|
110 |
+
>>> # Accessing the model configuration
|
111 |
+
>>> configuration = model.config
|
112 |
+
```"""
|
113 |
+
|
114 |
+
model_type = "phi3"
|
115 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
116 |
+
|
117 |
+
def __init__(
|
118 |
+
self,
|
119 |
+
vocab_size=32064,
|
120 |
+
hidden_size=3072,
|
121 |
+
intermediate_size=8192,
|
122 |
+
num_hidden_layers=32,
|
123 |
+
num_attention_heads=32,
|
124 |
+
num_key_value_heads=None,
|
125 |
+
resid_pdrop=0.0,
|
126 |
+
embd_pdrop=0.0,
|
127 |
+
attention_dropout=0.0,
|
128 |
+
hidden_act="silu",
|
129 |
+
max_position_embeddings=4096,
|
130 |
+
original_max_position_embeddings=4096,
|
131 |
+
initializer_range=0.02,
|
132 |
+
rms_norm_eps=1e-5,
|
133 |
+
use_cache=True,
|
134 |
+
tie_word_embeddings=False,
|
135 |
+
rope_theta=10000.0,
|
136 |
+
rope_scaling=None,
|
137 |
+
bos_token_id=1,
|
138 |
+
eos_token_id=32000,
|
139 |
+
pad_token_id=32000,
|
140 |
+
sliding_window=None,
|
141 |
+
**kwargs,
|
142 |
+
):
|
143 |
+
self.vocab_size = vocab_size
|
144 |
+
self.hidden_size = hidden_size
|
145 |
+
self.intermediate_size = intermediate_size
|
146 |
+
self.num_hidden_layers = num_hidden_layers
|
147 |
+
self.num_attention_heads = num_attention_heads
|
148 |
+
|
149 |
+
if num_key_value_heads is None:
|
150 |
+
num_key_value_heads = num_attention_heads
|
151 |
+
|
152 |
+
self.num_key_value_heads = num_key_value_heads
|
153 |
+
self.resid_pdrop = resid_pdrop
|
154 |
+
self.embd_pdrop = embd_pdrop
|
155 |
+
self.attention_dropout = attention_dropout
|
156 |
+
self.hidden_act = hidden_act
|
157 |
+
self.max_position_embeddings = max_position_embeddings
|
158 |
+
self.original_max_position_embeddings = original_max_position_embeddings
|
159 |
+
self.initializer_range = initializer_range
|
160 |
+
self.rms_norm_eps = rms_norm_eps
|
161 |
+
self.use_cache = use_cache
|
162 |
+
self.rope_theta = rope_theta
|
163 |
+
self.rope_scaling = rope_scaling
|
164 |
+
self._rope_scaling_validation()
|
165 |
+
self.sliding_window = sliding_window
|
166 |
+
|
167 |
+
super().__init__(
|
168 |
+
bos_token_id=bos_token_id,
|
169 |
+
eos_token_id=eos_token_id,
|
170 |
+
pad_token_id=pad_token_id,
|
171 |
+
tie_word_embeddings=tie_word_embeddings,
|
172 |
+
**kwargs,
|
173 |
+
)
|
174 |
+
|
175 |
+
def _rope_scaling_validation(self):
|
176 |
+
"""
|
177 |
+
Validate the `rope_scaling` configuration.
|
178 |
+
"""
|
179 |
+
if self.rope_scaling is None:
|
180 |
+
return
|
181 |
+
|
182 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
|
183 |
+
raise ValueError(
|
184 |
+
"`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
|
185 |
+
f"got {self.rope_scaling}"
|
186 |
+
)
|
187 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
188 |
+
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
|
189 |
+
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
|
190 |
+
if rope_scaling_type is None or rope_scaling_type not in ["su", "yarn"]:
|
191 |
+
raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
|
192 |
+
if not (
|
193 |
+
isinstance(rope_scaling_short_factor, list)
|
194 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
|
195 |
+
):
|
196 |
+
raise ValueError(
|
197 |
+
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
|
198 |
+
)
|
199 |
+
if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
|
200 |
+
raise ValueError(
|
201 |
+
f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
|
202 |
+
)
|
203 |
+
if not (
|
204 |
+
isinstance(rope_scaling_long_factor, list)
|
205 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
|
206 |
+
):
|
207 |
+
raise ValueError(
|
208 |
+
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
|
209 |
+
)
|
210 |
+
if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
|
211 |
+
raise ValueError(
|
212 |
+
f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
|
213 |
+
)
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/generation_config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:466c2f0dc6eb59aa1593d3dd30e5d6614b8bf9e5d0c3b94f268ce4e341345009
|
3 |
+
size 172
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f95c89449ba404df51c3f633df65c52cabea9dbfae7e21977d32b5daa397cc91
|
3 |
+
size 4972489328
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5de695669421c1e12d2d1588a4795de6a5900bd1513c06be357b65a041b3590e
|
3 |
+
size 2669692552
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/model.safetensors.index.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6eec78ee2a442445d9d342a7cd9a763da40a49bd91d8611b7db5e9a29c90a428
|
3 |
+
size 16331
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/modeling_phi3.py
ADDED
@@ -0,0 +1,1606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
""" PyTorch Phi-3 model."""
|
17 |
+
|
18 |
+
import inspect
|
19 |
+
import math
|
20 |
+
import warnings
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.nn.functional as F
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from torch import nn
|
27 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
28 |
+
|
29 |
+
from transformers.activations import ACT2FN
|
30 |
+
from transformers.cache_utils import Cache, DynamicCache
|
31 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
32 |
+
from transformers.modeling_outputs import (
|
33 |
+
BaseModelOutputWithPast,
|
34 |
+
CausalLMOutputWithPast,
|
35 |
+
SequenceClassifierOutputWithPast,
|
36 |
+
TokenClassifierOutput,
|
37 |
+
)
|
38 |
+
from transformers.modeling_utils import PreTrainedModel
|
39 |
+
from transformers.utils import (
|
40 |
+
add_code_sample_docstrings,
|
41 |
+
add_start_docstrings,
|
42 |
+
add_start_docstrings_to_model_forward,
|
43 |
+
is_flash_attn_2_available,
|
44 |
+
is_flash_attn_greater_or_equal_2_10,
|
45 |
+
logging,
|
46 |
+
replace_return_docstrings,
|
47 |
+
)
|
48 |
+
from .configuration_phi3 import Phi3Config
|
49 |
+
|
50 |
+
|
51 |
+
logger = logging.get_logger(__name__)
|
52 |
+
|
53 |
+
# Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
|
54 |
+
# if is_flash_attn_2_available():
|
55 |
+
_flash_supports_window_size = False
|
56 |
+
try:
|
57 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
58 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
59 |
+
|
60 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
61 |
+
except ImportError as error:
|
62 |
+
logger.warning(
|
63 |
+
f"`flash-attention` package not found, consider installing for better performance: {error}."
|
64 |
+
)
|
65 |
+
if not _flash_supports_window_size:
|
66 |
+
logger.warning(
|
67 |
+
"Current `flash-attention` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
|
68 |
+
)
|
69 |
+
|
70 |
+
_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
|
71 |
+
_CONFIG_FOR_DOC = "Phi3Config"
|
72 |
+
|
73 |
+
PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
74 |
+
"microsoft/Phi-3-mini-4k-instruct",
|
75 |
+
"microsoft/Phi-3-mini-128k-instruct",
|
76 |
+
# See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
|
77 |
+
]
|
78 |
+
|
79 |
+
|
80 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
|
81 |
+
class Phi3RMSNorm(nn.Module):
|
82 |
+
def __init__(self, hidden_size, eps=1e-6):
|
83 |
+
"""
|
84 |
+
Phi3RMSNorm is equivalent to T5LayerNorm
|
85 |
+
"""
|
86 |
+
super().__init__()
|
87 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
88 |
+
self.variance_epsilon = eps
|
89 |
+
|
90 |
+
def forward(self, hidden_states):
|
91 |
+
input_dtype = hidden_states.dtype
|
92 |
+
hidden_states = hidden_states.to(torch.float32)
|
93 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
94 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
95 |
+
return self.weight * hidden_states.to(input_dtype)
|
96 |
+
|
97 |
+
|
98 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
99 |
+
def _get_unpad_data(attention_mask):
|
100 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
101 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
102 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
103 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
104 |
+
return (
|
105 |
+
indices,
|
106 |
+
cu_seqlens,
|
107 |
+
max_seqlen_in_batch,
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
# Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
|
112 |
+
class Phi3RotaryEmbedding(nn.Module):
|
113 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
114 |
+
super().__init__()
|
115 |
+
|
116 |
+
self.dim = dim
|
117 |
+
self.max_position_embeddings = max_position_embeddings
|
118 |
+
self.base = base
|
119 |
+
self.register_buffer("inv_freq", None, persistent=False)
|
120 |
+
|
121 |
+
@torch.no_grad()
|
122 |
+
def forward(self, x, position_ids, seq_len=None):
|
123 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
124 |
+
if self.inv_freq is None:
|
125 |
+
self.inv_freq = 1.0 / (
|
126 |
+
self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
|
127 |
+
)
|
128 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
129 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
130 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
131 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
132 |
+
device_type = x.device.type
|
133 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
134 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
135 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
136 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
137 |
+
cos = emb.cos()
|
138 |
+
sin = emb.sin()
|
139 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
140 |
+
|
141 |
+
|
142 |
+
class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
143 |
+
def __init__(self, dim, config, device=None):
|
144 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
145 |
+
|
146 |
+
self.short_factor = config.rope_scaling["short_factor"]
|
147 |
+
self.long_factor = config.rope_scaling["long_factor"]
|
148 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
149 |
+
|
150 |
+
@torch.no_grad()
|
151 |
+
def forward(self, x, position_ids, seq_len=None):
|
152 |
+
seq_len = torch.max(position_ids) + 1
|
153 |
+
if seq_len > self.original_max_position_embeddings:
|
154 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
155 |
+
else:
|
156 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
157 |
+
|
158 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
159 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
160 |
+
|
161 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
162 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
163 |
+
|
164 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
165 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
166 |
+
device_type = x.device.type
|
167 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
168 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
169 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
170 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
171 |
+
|
172 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
173 |
+
if scale <= 1.0:
|
174 |
+
scaling_factor = 1.0
|
175 |
+
else:
|
176 |
+
scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
|
177 |
+
|
178 |
+
cos = emb.cos() * scaling_factor
|
179 |
+
sin = emb.sin() * scaling_factor
|
180 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
181 |
+
|
182 |
+
|
183 |
+
class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
184 |
+
def __init__(self, dim, config, device=None):
|
185 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
186 |
+
|
187 |
+
self.short_factor = config.rope_scaling["short_factor"]
|
188 |
+
self.long_factor = config.rope_scaling["long_factor"]
|
189 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
190 |
+
|
191 |
+
@torch.no_grad()
|
192 |
+
def forward(self, x, position_ids, seq_len=None):
|
193 |
+
seq_len = torch.max(position_ids) + 1
|
194 |
+
if seq_len > self.original_max_position_embeddings:
|
195 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
196 |
+
else:
|
197 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
198 |
+
|
199 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
200 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
201 |
+
|
202 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
203 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
204 |
+
|
205 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
206 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
207 |
+
device_type = x.device.type
|
208 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
209 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
210 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
211 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
212 |
+
|
213 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
214 |
+
if scale <= 1.0:
|
215 |
+
scaling_factor = 1.0
|
216 |
+
else:
|
217 |
+
scaling_factor = 0.1 * math.log(scale) + 1.0
|
218 |
+
|
219 |
+
cos = emb.cos() * scaling_factor
|
220 |
+
sin = emb.sin() * scaling_factor
|
221 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
222 |
+
|
223 |
+
|
224 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
225 |
+
def rotate_half(x):
|
226 |
+
"""Rotates half the hidden dims of the input."""
|
227 |
+
x1 = x[..., : x.shape[-1] // 2]
|
228 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
229 |
+
return torch.cat((-x2, x1), dim=-1)
|
230 |
+
|
231 |
+
|
232 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
233 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
234 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
235 |
+
|
236 |
+
Args:
|
237 |
+
q (`torch.Tensor`): The query tensor.
|
238 |
+
k (`torch.Tensor`): The key tensor.
|
239 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
240 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
241 |
+
position_ids (`torch.Tensor`, *optional*):
|
242 |
+
Deprecated and unused.
|
243 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
244 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
245 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
246 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
247 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
248 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
249 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
250 |
+
Returns:
|
251 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
252 |
+
"""
|
253 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
254 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
255 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
256 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
257 |
+
return q_embed, k_embed
|
258 |
+
|
259 |
+
|
260 |
+
class Phi3MLP(nn.Module):
|
261 |
+
def __init__(self, config):
|
262 |
+
super().__init__()
|
263 |
+
|
264 |
+
self.config = config
|
265 |
+
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
|
266 |
+
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
267 |
+
|
268 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
269 |
+
|
270 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
271 |
+
up_states = self.gate_up_proj(hidden_states)
|
272 |
+
|
273 |
+
gate, up_states = up_states.chunk(2, dim=-1)
|
274 |
+
up_states = up_states * self.activation_fn(gate)
|
275 |
+
|
276 |
+
return self.down_proj(up_states)
|
277 |
+
|
278 |
+
|
279 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
|
280 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
281 |
+
"""
|
282 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
283 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
284 |
+
"""
|
285 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
286 |
+
if n_rep == 1:
|
287 |
+
return hidden_states
|
288 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
289 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
290 |
+
|
291 |
+
|
292 |
+
class Phi3Attention(nn.Module):
|
293 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
294 |
+
|
295 |
+
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
|
296 |
+
super().__init__()
|
297 |
+
self.config = config
|
298 |
+
self.layer_idx = layer_idx
|
299 |
+
if layer_idx is None:
|
300 |
+
logger.warning_once(
|
301 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
302 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
303 |
+
"when creating this class."
|
304 |
+
)
|
305 |
+
|
306 |
+
self.attention_dropout = config.attention_dropout
|
307 |
+
self.hidden_size = config.hidden_size
|
308 |
+
self.num_heads = config.num_attention_heads
|
309 |
+
self.head_dim = self.hidden_size // self.num_heads
|
310 |
+
self.num_key_value_heads = config.num_key_value_heads
|
311 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
312 |
+
self.max_position_embeddings = config.max_position_embeddings
|
313 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
314 |
+
self.rope_theta = config.rope_theta
|
315 |
+
self.rope_scaling = config.rope_scaling
|
316 |
+
self.is_causal = True
|
317 |
+
|
318 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
319 |
+
raise ValueError(
|
320 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
321 |
+
f" and `num_heads`: {self.num_heads})."
|
322 |
+
)
|
323 |
+
|
324 |
+
op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
|
325 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
326 |
+
self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
|
327 |
+
self._init_rope()
|
328 |
+
|
329 |
+
def _init_rope(self):
|
330 |
+
if self.rope_scaling is None:
|
331 |
+
self.rotary_emb = Phi3RotaryEmbedding(
|
332 |
+
self.head_dim,
|
333 |
+
max_position_embeddings=self.max_position_embeddings,
|
334 |
+
base=self.rope_theta,
|
335 |
+
)
|
336 |
+
else:
|
337 |
+
scaling_type = self.config.rope_scaling["type"]
|
338 |
+
if scaling_type == "su":
|
339 |
+
self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
|
340 |
+
elif scaling_type == "yarn":
|
341 |
+
self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
|
342 |
+
else:
|
343 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
344 |
+
|
345 |
+
def forward(
|
346 |
+
self,
|
347 |
+
hidden_states: torch.Tensor,
|
348 |
+
attention_mask: Optional[torch.Tensor] = None,
|
349 |
+
position_ids: Optional[torch.LongTensor] = None,
|
350 |
+
past_key_value: Optional[Cache] = None,
|
351 |
+
output_attentions: bool = False,
|
352 |
+
use_cache: bool = False,
|
353 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
354 |
+
logger.warning_once("You are not running the flash-attention implementation, expect numerical differences.")
|
355 |
+
|
356 |
+
bsz, q_len, _ = hidden_states.size()
|
357 |
+
|
358 |
+
qkv = self.qkv_proj(hidden_states)
|
359 |
+
query_pos = self.num_heads * self.head_dim
|
360 |
+
query_states = qkv[..., :query_pos]
|
361 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
362 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
363 |
+
|
364 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
365 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
366 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
367 |
+
|
368 |
+
kv_seq_len = key_states.shape[-2]
|
369 |
+
if past_key_value is not None:
|
370 |
+
if self.layer_idx is None:
|
371 |
+
raise ValueError(
|
372 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
373 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
374 |
+
"with a layer index."
|
375 |
+
)
|
376 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
377 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
378 |
+
|
379 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
380 |
+
|
381 |
+
if past_key_value is not None:
|
382 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
383 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
384 |
+
|
385 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
386 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
387 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
388 |
+
|
389 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
390 |
+
|
391 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
392 |
+
raise ValueError(
|
393 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
394 |
+
f" {attn_weights.size()}"
|
395 |
+
)
|
396 |
+
|
397 |
+
if attention_mask is not None:
|
398 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
399 |
+
raise ValueError(
|
400 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
401 |
+
)
|
402 |
+
attn_weights = attn_weights + attention_mask
|
403 |
+
|
404 |
+
# upcast attention to fp32
|
405 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
|
406 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
407 |
+
|
408 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
409 |
+
|
410 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
411 |
+
raise ValueError(
|
412 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
413 |
+
f" {attn_output.size()}"
|
414 |
+
)
|
415 |
+
|
416 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
417 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
418 |
+
|
419 |
+
attn_output = self.o_proj(attn_output)
|
420 |
+
|
421 |
+
if not output_attentions:
|
422 |
+
attn_weights = None
|
423 |
+
|
424 |
+
return attn_output, attn_weights, past_key_value
|
425 |
+
|
426 |
+
|
427 |
+
class Phi3FlashAttention2(Phi3Attention):
|
428 |
+
"""
|
429 |
+
Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
|
430 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
431 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
432 |
+
"""
|
433 |
+
|
434 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
435 |
+
def __init__(self, *args, **kwargs):
|
436 |
+
super().__init__(*args, **kwargs)
|
437 |
+
|
438 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
439 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
440 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
441 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
442 |
+
|
443 |
+
def forward(
|
444 |
+
self,
|
445 |
+
hidden_states: torch.Tensor,
|
446 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
447 |
+
position_ids: Optional[torch.LongTensor] = None,
|
448 |
+
past_key_value: Optional[Cache] = None,
|
449 |
+
output_attentions: bool = False,
|
450 |
+
use_cache: bool = False,
|
451 |
+
**kwargs,
|
452 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
453 |
+
# Phi3FlashAttention2 attention does not support output_attentions
|
454 |
+
|
455 |
+
if not _flash_supports_window_size:
|
456 |
+
logger.warning_once(
|
457 |
+
"The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
|
458 |
+
)
|
459 |
+
raise ValueError("The current flash attention version does not support sliding window attention.")
|
460 |
+
|
461 |
+
output_attentions = False
|
462 |
+
|
463 |
+
if "padding_mask" in kwargs:
|
464 |
+
warnings.warn(
|
465 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
466 |
+
)
|
467 |
+
|
468 |
+
# overwrite attention_mask with padding_mask
|
469 |
+
attention_mask = kwargs.pop("padding_mask")
|
470 |
+
|
471 |
+
bsz, q_len, _ = hidden_states.size()
|
472 |
+
|
473 |
+
qkv = self.qkv_proj(hidden_states)
|
474 |
+
query_pos = self.num_heads * self.head_dim
|
475 |
+
query_states = qkv[..., :query_pos]
|
476 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
477 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
478 |
+
|
479 |
+
# Flash attention requires the input to have the shape
|
480 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
481 |
+
# therefore we just need to keep the original shape
|
482 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
483 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
484 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
485 |
+
|
486 |
+
kv_seq_len = key_states.shape[-2]
|
487 |
+
if past_key_value is not None:
|
488 |
+
if self.layer_idx is None:
|
489 |
+
raise ValueError(
|
490 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
491 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
492 |
+
"with a layer index."
|
493 |
+
)
|
494 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
495 |
+
|
496 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
497 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
498 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
|
499 |
+
|
500 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
501 |
+
|
502 |
+
use_sliding_windows = (
|
503 |
+
_flash_supports_window_size
|
504 |
+
and getattr(self.config, "sliding_window", None) is not None
|
505 |
+
and kv_seq_len > self.config.sliding_window
|
506 |
+
)
|
507 |
+
|
508 |
+
if past_key_value is not None:
|
509 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
510 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
511 |
+
if (
|
512 |
+
getattr(self.config, "sliding_window", None) is not None
|
513 |
+
and kv_seq_len > self.config.sliding_window
|
514 |
+
and cache_has_contents
|
515 |
+
):
|
516 |
+
slicing_tokens = 1 - self.config.sliding_window
|
517 |
+
|
518 |
+
past_key = past_key_value[self.layer_idx][0]
|
519 |
+
past_value = past_key_value[self.layer_idx][1]
|
520 |
+
|
521 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
522 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
523 |
+
|
524 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
525 |
+
raise ValueError(
|
526 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
527 |
+
f" {past_key.shape}"
|
528 |
+
)
|
529 |
+
|
530 |
+
if attention_mask is not None:
|
531 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
532 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
533 |
+
|
534 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
535 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
536 |
+
|
537 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
538 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
539 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
540 |
+
|
541 |
+
attn_dropout = self.attention_dropout if self.training else 0.0
|
542 |
+
|
543 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
544 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
545 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
546 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
547 |
+
# in fp32.
|
548 |
+
|
549 |
+
if query_states.dtype == torch.float32:
|
550 |
+
if torch.is_autocast_enabled():
|
551 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
552 |
+
# Handle the case where the model is quantized
|
553 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
554 |
+
target_dtype = self.config._pre_quantization_dtype
|
555 |
+
else:
|
556 |
+
target_dtype = self.qkv_proj.weight.dtype
|
557 |
+
|
558 |
+
logger.warning_once(
|
559 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
560 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
561 |
+
f" {target_dtype}."
|
562 |
+
)
|
563 |
+
|
564 |
+
query_states = query_states.to(target_dtype)
|
565 |
+
key_states = key_states.to(target_dtype)
|
566 |
+
value_states = value_states.to(target_dtype)
|
567 |
+
|
568 |
+
# Reashape to the expected shape for Flash Attention
|
569 |
+
query_states = query_states.transpose(1, 2)
|
570 |
+
key_states = key_states.transpose(1, 2)
|
571 |
+
value_states = value_states.transpose(1, 2)
|
572 |
+
|
573 |
+
attn_output = self._flash_attention_forward(
|
574 |
+
query_states,
|
575 |
+
key_states,
|
576 |
+
value_states,
|
577 |
+
attention_mask,
|
578 |
+
q_len,
|
579 |
+
dropout=attn_dropout,
|
580 |
+
use_sliding_windows=use_sliding_windows,
|
581 |
+
)
|
582 |
+
|
583 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
584 |
+
attn_output = self.o_proj(attn_output)
|
585 |
+
|
586 |
+
if not output_attentions:
|
587 |
+
attn_weights = None
|
588 |
+
|
589 |
+
return attn_output, attn_weights, past_key_value
|
590 |
+
|
591 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
|
592 |
+
def _flash_attention_forward(
|
593 |
+
self,
|
594 |
+
query_states,
|
595 |
+
key_states,
|
596 |
+
value_states,
|
597 |
+
attention_mask,
|
598 |
+
query_length,
|
599 |
+
dropout=0.0,
|
600 |
+
softmax_scale=None,
|
601 |
+
use_sliding_windows=False,
|
602 |
+
):
|
603 |
+
"""
|
604 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
605 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
606 |
+
|
607 |
+
Args:
|
608 |
+
query_states (`torch.Tensor`):
|
609 |
+
Input query states to be passed to Flash Attention API
|
610 |
+
key_states (`torch.Tensor`):
|
611 |
+
Input key states to be passed to Flash Attention API
|
612 |
+
value_states (`torch.Tensor`):
|
613 |
+
Input value states to be passed to Flash Attention API
|
614 |
+
attention_mask (`torch.Tensor`):
|
615 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
616 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
617 |
+
dropout (`float`):
|
618 |
+
Attention dropout
|
619 |
+
softmax_scale (`float`, *optional*):
|
620 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
621 |
+
use_sliding_windows (`bool`, *optional*):
|
622 |
+
Whether to activate sliding window attention.
|
623 |
+
"""
|
624 |
+
if not self._flash_attn_uses_top_left_mask:
|
625 |
+
causal = self.is_causal
|
626 |
+
else:
|
627 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
628 |
+
causal = self.is_causal and query_length != 1
|
629 |
+
|
630 |
+
# Contains at least one padding token in the sequence
|
631 |
+
if attention_mask is not None:
|
632 |
+
batch_size = query_states.shape[0]
|
633 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
634 |
+
query_states, key_states, value_states, attention_mask, query_length
|
635 |
+
)
|
636 |
+
|
637 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
638 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
639 |
+
|
640 |
+
if not use_sliding_windows:
|
641 |
+
attn_output_unpad = flash_attn_varlen_func(
|
642 |
+
query_states,
|
643 |
+
key_states,
|
644 |
+
value_states,
|
645 |
+
cu_seqlens_q=cu_seqlens_q,
|
646 |
+
cu_seqlens_k=cu_seqlens_k,
|
647 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
648 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
649 |
+
dropout_p=dropout,
|
650 |
+
softmax_scale=softmax_scale,
|
651 |
+
causal=causal,
|
652 |
+
)
|
653 |
+
else:
|
654 |
+
attn_output_unpad = flash_attn_varlen_func(
|
655 |
+
query_states,
|
656 |
+
key_states,
|
657 |
+
value_states,
|
658 |
+
cu_seqlens_q=cu_seqlens_q,
|
659 |
+
cu_seqlens_k=cu_seqlens_k,
|
660 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
661 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
662 |
+
dropout_p=dropout,
|
663 |
+
softmax_scale=softmax_scale,
|
664 |
+
causal=causal,
|
665 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
666 |
+
)
|
667 |
+
|
668 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
669 |
+
else:
|
670 |
+
if not use_sliding_windows:
|
671 |
+
attn_output = flash_attn_func(
|
672 |
+
query_states,
|
673 |
+
key_states,
|
674 |
+
value_states,
|
675 |
+
dropout,
|
676 |
+
softmax_scale=softmax_scale,
|
677 |
+
causal=causal,
|
678 |
+
)
|
679 |
+
else:
|
680 |
+
attn_output = flash_attn_func(
|
681 |
+
query_states,
|
682 |
+
key_states,
|
683 |
+
value_states,
|
684 |
+
dropout,
|
685 |
+
softmax_scale=softmax_scale,
|
686 |
+
causal=causal,
|
687 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
688 |
+
)
|
689 |
+
|
690 |
+
return attn_output
|
691 |
+
|
692 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
693 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
694 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
695 |
+
|
696 |
+
# On the first iteration we need to properly re-create the padding mask
|
697 |
+
# by slicing it on the proper place
|
698 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
699 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
700 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
701 |
+
|
702 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
703 |
+
|
704 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
705 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
706 |
+
|
707 |
+
if query_length == kv_seq_len:
|
708 |
+
query_layer = index_first_axis(
|
709 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
710 |
+
)
|
711 |
+
cu_seqlens_q = cu_seqlens_k
|
712 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
713 |
+
indices_q = indices_k
|
714 |
+
elif query_length == 1:
|
715 |
+
max_seqlen_in_batch_q = 1
|
716 |
+
cu_seqlens_q = torch.arange(
|
717 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
718 |
+
) # There is a memcpy here, that is very bad.
|
719 |
+
indices_q = cu_seqlens_q[:-1]
|
720 |
+
query_layer = query_layer.squeeze(1)
|
721 |
+
else:
|
722 |
+
# The -q_len: slice assumes left padding.
|
723 |
+
attention_mask = attention_mask[:, -query_length:]
|
724 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
725 |
+
|
726 |
+
return (
|
727 |
+
query_layer,
|
728 |
+
key_layer,
|
729 |
+
value_layer,
|
730 |
+
indices_q,
|
731 |
+
(cu_seqlens_q, cu_seqlens_k),
|
732 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
733 |
+
)
|
734 |
+
|
735 |
+
|
736 |
+
# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
|
737 |
+
# TODO @Arthur no longer copied from LLama after static cache
|
738 |
+
class Phi3SdpaAttention(Phi3Attention):
|
739 |
+
"""
|
740 |
+
Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
741 |
+
`Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
742 |
+
SDPA API.
|
743 |
+
"""
|
744 |
+
|
745 |
+
# Adapted from Phi3Attention.forward
|
746 |
+
def forward(
|
747 |
+
self,
|
748 |
+
hidden_states: torch.Tensor,
|
749 |
+
attention_mask: Optional[torch.Tensor] = None,
|
750 |
+
position_ids: Optional[torch.LongTensor] = None,
|
751 |
+
past_key_value: Optional[Cache] = None,
|
752 |
+
output_attentions: bool = False,
|
753 |
+
use_cache: bool = False,
|
754 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
755 |
+
if output_attentions:
|
756 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
757 |
+
logger.warning_once(
|
758 |
+
"Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
759 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
760 |
+
)
|
761 |
+
return super().forward(
|
762 |
+
hidden_states=hidden_states,
|
763 |
+
attention_mask=attention_mask,
|
764 |
+
position_ids=position_ids,
|
765 |
+
past_key_value=past_key_value,
|
766 |
+
output_attentions=output_attentions,
|
767 |
+
use_cache=use_cache,
|
768 |
+
)
|
769 |
+
|
770 |
+
bsz, q_len, _ = hidden_states.size()
|
771 |
+
|
772 |
+
qkv = self.qkv_proj(hidden_states)
|
773 |
+
query_pos = self.num_heads * self.head_dim
|
774 |
+
query_states = qkv[..., :query_pos]
|
775 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
776 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
777 |
+
|
778 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
779 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
780 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
781 |
+
|
782 |
+
kv_seq_len = key_states.shape[-2]
|
783 |
+
if past_key_value is not None:
|
784 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
785 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
786 |
+
|
787 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
788 |
+
|
789 |
+
if past_key_value is not None:
|
790 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
791 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
792 |
+
|
793 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
794 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
795 |
+
|
796 |
+
if attention_mask is not None:
|
797 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
798 |
+
raise ValueError(
|
799 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
800 |
+
)
|
801 |
+
|
802 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
803 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
804 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
805 |
+
query_states = query_states.contiguous()
|
806 |
+
key_states = key_states.contiguous()
|
807 |
+
value_states = value_states.contiguous()
|
808 |
+
|
809 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
810 |
+
query_states,
|
811 |
+
key_states,
|
812 |
+
value_states,
|
813 |
+
attn_mask=attention_mask,
|
814 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
815 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
816 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
817 |
+
)
|
818 |
+
|
819 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
820 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
821 |
+
|
822 |
+
attn_output = self.o_proj(attn_output)
|
823 |
+
|
824 |
+
return attn_output, None, past_key_value
|
825 |
+
|
826 |
+
|
827 |
+
PHI3_ATTENTION_CLASSES = {
|
828 |
+
"eager": Phi3Attention,
|
829 |
+
"flash_attention_2": Phi3FlashAttention2,
|
830 |
+
"sdpa": Phi3SdpaAttention,
|
831 |
+
}
|
832 |
+
|
833 |
+
|
834 |
+
class Phi3DecoderLayer(nn.Module):
|
835 |
+
def __init__(self, config: Phi3Config, layer_idx: int):
|
836 |
+
super().__init__()
|
837 |
+
|
838 |
+
self.config = config
|
839 |
+
self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
840 |
+
|
841 |
+
self.mlp = Phi3MLP(config)
|
842 |
+
self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
843 |
+
|
844 |
+
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
|
845 |
+
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
|
846 |
+
self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
847 |
+
|
848 |
+
def forward(
|
849 |
+
self,
|
850 |
+
hidden_states: torch.Tensor,
|
851 |
+
attention_mask: Optional[torch.Tensor] = None,
|
852 |
+
position_ids: Optional[torch.LongTensor] = None,
|
853 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
854 |
+
output_attentions: Optional[bool] = False,
|
855 |
+
use_cache: Optional[bool] = False,
|
856 |
+
**kwargs,
|
857 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
858 |
+
if "padding_mask" in kwargs:
|
859 |
+
warnings.warn(
|
860 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
861 |
+
)
|
862 |
+
"""
|
863 |
+
Args:
|
864 |
+
hidden_states (`torch.FloatTensor`):
|
865 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
866 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
867 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
868 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
869 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
|
870 |
+
`[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
871 |
+
output_attentions (`bool`, *optional*):
|
872 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
873 |
+
returned tensors for more detail.
|
874 |
+
use_cache (`bool`, *optional*):
|
875 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
876 |
+
(see `past_key_values`).
|
877 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
878 |
+
"""
|
879 |
+
|
880 |
+
residual = hidden_states
|
881 |
+
|
882 |
+
hidden_states = self.input_layernorm(hidden_states)
|
883 |
+
|
884 |
+
# Self Attention
|
885 |
+
attn_outputs, self_attn_weights, present_key_value = self.self_attn(
|
886 |
+
hidden_states=hidden_states,
|
887 |
+
attention_mask=attention_mask,
|
888 |
+
position_ids=position_ids,
|
889 |
+
past_key_value=past_key_value,
|
890 |
+
output_attentions=output_attentions,
|
891 |
+
use_cache=use_cache,
|
892 |
+
)
|
893 |
+
|
894 |
+
hidden_states = residual + self.resid_attn_dropout(attn_outputs)
|
895 |
+
|
896 |
+
residual = hidden_states
|
897 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
898 |
+
hidden_states = self.mlp(hidden_states)
|
899 |
+
hidden_states = residual + self.resid_mlp_dropout(hidden_states)
|
900 |
+
|
901 |
+
outputs = (hidden_states,)
|
902 |
+
|
903 |
+
if output_attentions:
|
904 |
+
outputs += (self_attn_weights,)
|
905 |
+
|
906 |
+
if use_cache:
|
907 |
+
outputs += (present_key_value,)
|
908 |
+
|
909 |
+
return outputs
|
910 |
+
|
911 |
+
|
912 |
+
PHI3_START_DOCSTRING = r"""
|
913 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
914 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
915 |
+
etc.)
|
916 |
+
|
917 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
918 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
919 |
+
and behavior.
|
920 |
+
|
921 |
+
Parameters:
|
922 |
+
config ([`Phi3Config`]):
|
923 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
924 |
+
load the weights associated with the model, only the configuration. Check out the
|
925 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
926 |
+
"""
|
927 |
+
|
928 |
+
|
929 |
+
@add_start_docstrings(
|
930 |
+
"The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
|
931 |
+
PHI3_START_DOCSTRING,
|
932 |
+
)
|
933 |
+
class Phi3PreTrainedModel(PreTrainedModel):
|
934 |
+
config_class = Phi3Config
|
935 |
+
base_model_prefix = "model"
|
936 |
+
supports_gradient_checkpointing = True
|
937 |
+
_no_split_modules = ["Phi3DecoderLayer"]
|
938 |
+
_skip_keys_device_placement = "past_key_values"
|
939 |
+
_supports_flash_attn_2 = True
|
940 |
+
_supports_sdpa = False
|
941 |
+
_supports_cache_class = True
|
942 |
+
|
943 |
+
_version = "0.0.5"
|
944 |
+
|
945 |
+
def _init_weights(self, module):
|
946 |
+
std = self.config.initializer_range
|
947 |
+
if isinstance(module, nn.Linear):
|
948 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
949 |
+
if module.bias is not None:
|
950 |
+
module.bias.data.zero_()
|
951 |
+
elif isinstance(module, nn.Embedding):
|
952 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
953 |
+
if module.padding_idx is not None:
|
954 |
+
module.weight.data[module.padding_idx].zero_()
|
955 |
+
|
956 |
+
|
957 |
+
PHI3_INPUTS_DOCSTRING = r"""
|
958 |
+
Args:
|
959 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
960 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
961 |
+
it.
|
962 |
+
|
963 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
964 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
965 |
+
|
966 |
+
[What are input IDs?](../glossary#input-ids)
|
967 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
968 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
969 |
+
|
970 |
+
- 1 for tokens that are **not masked**,
|
971 |
+
- 0 for tokens that are **masked**.
|
972 |
+
|
973 |
+
[What are attention masks?](../glossary#attention-mask)
|
974 |
+
|
975 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
976 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
977 |
+
|
978 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
979 |
+
`past_key_values`).
|
980 |
+
|
981 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
982 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
983 |
+
information on the default strategy.
|
984 |
+
|
985 |
+
- 1 indicates the head is **not masked**,
|
986 |
+
- 0 indicates the head is **masked**.
|
987 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
988 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
989 |
+
config.n_positions - 1]`.
|
990 |
+
|
991 |
+
[What are position IDs?](../glossary#position-ids)
|
992 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
993 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
994 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
995 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
996 |
+
|
997 |
+
Two formats are allowed:
|
998 |
+
- a [`~cache_utils.Cache`] instance;
|
999 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
1000 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
1001 |
+
cache format.
|
1002 |
+
|
1003 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
1004 |
+
legacy cache format will be returned.
|
1005 |
+
|
1006 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
1007 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
1008 |
+
of shape `(batch_size, sequence_length)`.
|
1009 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1010 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
1011 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
1012 |
+
model's internal embedding lookup matrix.
|
1013 |
+
use_cache (`bool`, *optional*):
|
1014 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1015 |
+
`past_key_values`).
|
1016 |
+
output_attentions (`bool`, *optional*):
|
1017 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
1018 |
+
tensors for more detail.
|
1019 |
+
output_hidden_states (`bool`, *optional*):
|
1020 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
1021 |
+
more detail.
|
1022 |
+
return_dict (`bool`, *optional*):
|
1023 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1024 |
+
"""
|
1025 |
+
|
1026 |
+
|
1027 |
+
@add_start_docstrings(
|
1028 |
+
"The bare Phi-3 model outputting raw hidden-states without any specific head on top.",
|
1029 |
+
PHI3_START_DOCSTRING,
|
1030 |
+
)
|
1031 |
+
class Phi3Model(Phi3PreTrainedModel):
|
1032 |
+
"""
|
1033 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
|
1034 |
+
|
1035 |
+
Args:
|
1036 |
+
config: Phi3Config
|
1037 |
+
"""
|
1038 |
+
|
1039 |
+
def __init__(self, config: Phi3Config):
|
1040 |
+
super().__init__(config)
|
1041 |
+
self.padding_idx = config.pad_token_id
|
1042 |
+
self.vocab_size = config.vocab_size
|
1043 |
+
|
1044 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
1045 |
+
self.embed_dropout = nn.Dropout(config.embd_pdrop)
|
1046 |
+
self.layers = nn.ModuleList(
|
1047 |
+
[Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
1048 |
+
)
|
1049 |
+
self._attn_implementation = config._attn_implementation
|
1050 |
+
self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1051 |
+
|
1052 |
+
self.gradient_checkpointing = False
|
1053 |
+
# Initialize weights and apply final processing
|
1054 |
+
self.post_init()
|
1055 |
+
|
1056 |
+
def get_input_embeddings(self):
|
1057 |
+
return self.embed_tokens
|
1058 |
+
|
1059 |
+
def set_input_embeddings(self, value):
|
1060 |
+
self.embed_tokens = value
|
1061 |
+
|
1062 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1063 |
+
def forward(
|
1064 |
+
self,
|
1065 |
+
input_ids: torch.LongTensor = None,
|
1066 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1067 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1068 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1069 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1070 |
+
use_cache: Optional[bool] = None,
|
1071 |
+
output_attentions: Optional[bool] = None,
|
1072 |
+
output_hidden_states: Optional[bool] = None,
|
1073 |
+
return_dict: Optional[bool] = None,
|
1074 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
1075 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1076 |
+
output_hidden_states = (
|
1077 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1078 |
+
)
|
1079 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1080 |
+
|
1081 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1082 |
+
|
1083 |
+
# retrieve input_ids and inputs_embeds
|
1084 |
+
if input_ids is not None and inputs_embeds is not None:
|
1085 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1086 |
+
elif input_ids is not None:
|
1087 |
+
batch_size, seq_length = input_ids.shape[:2]
|
1088 |
+
elif inputs_embeds is not None:
|
1089 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
1090 |
+
else:
|
1091 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1092 |
+
|
1093 |
+
past_key_values_length = 0
|
1094 |
+
|
1095 |
+
if self.gradient_checkpointing and self.training:
|
1096 |
+
if use_cache:
|
1097 |
+
logger.warning_once(
|
1098 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
1099 |
+
)
|
1100 |
+
use_cache = False
|
1101 |
+
|
1102 |
+
if use_cache:
|
1103 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
1104 |
+
if use_legacy_cache:
|
1105 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
1106 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
1107 |
+
|
1108 |
+
if position_ids is None:
|
1109 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1110 |
+
position_ids = torch.arange(
|
1111 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
1112 |
+
)
|
1113 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
1114 |
+
else:
|
1115 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
1116 |
+
|
1117 |
+
if inputs_embeds is None:
|
1118 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1119 |
+
|
1120 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
1121 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
1122 |
+
if is_padding_right:
|
1123 |
+
raise ValueError(
|
1124 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
1125 |
+
" this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
|
1126 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
1127 |
+
)
|
1128 |
+
|
1129 |
+
if self._attn_implementation == "flash_attention_2":
|
1130 |
+
# 2d mask is passed through the layers
|
1131 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
1132 |
+
else:
|
1133 |
+
# 4d mask is passed through the layers
|
1134 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1135 |
+
attention_mask,
|
1136 |
+
(batch_size, seq_length),
|
1137 |
+
inputs_embeds,
|
1138 |
+
past_key_values_length,
|
1139 |
+
sliding_window=self.config.sliding_window,
|
1140 |
+
)
|
1141 |
+
|
1142 |
+
hidden_states = inputs_embeds
|
1143 |
+
|
1144 |
+
# decoder layers
|
1145 |
+
all_hidden_states = () if output_hidden_states else None
|
1146 |
+
all_self_attns = () if output_attentions else None
|
1147 |
+
next_decoder_cache = None
|
1148 |
+
|
1149 |
+
for decoder_layer in self.layers:
|
1150 |
+
if output_hidden_states:
|
1151 |
+
all_hidden_states += (hidden_states,)
|
1152 |
+
|
1153 |
+
if self.gradient_checkpointing and self.training:
|
1154 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1155 |
+
decoder_layer.__call__,
|
1156 |
+
hidden_states,
|
1157 |
+
attention_mask,
|
1158 |
+
position_ids,
|
1159 |
+
past_key_values,
|
1160 |
+
output_attentions,
|
1161 |
+
use_cache,
|
1162 |
+
)
|
1163 |
+
else:
|
1164 |
+
layer_outputs = decoder_layer(
|
1165 |
+
hidden_states,
|
1166 |
+
attention_mask=attention_mask,
|
1167 |
+
position_ids=position_ids,
|
1168 |
+
past_key_value=past_key_values,
|
1169 |
+
output_attentions=output_attentions,
|
1170 |
+
use_cache=use_cache,
|
1171 |
+
)
|
1172 |
+
|
1173 |
+
hidden_states = layer_outputs[0]
|
1174 |
+
|
1175 |
+
if use_cache:
|
1176 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
1177 |
+
|
1178 |
+
if output_attentions:
|
1179 |
+
all_self_attns += (layer_outputs[1],)
|
1180 |
+
|
1181 |
+
hidden_states = self.norm(hidden_states)
|
1182 |
+
|
1183 |
+
# add hidden states from the last decoder layer
|
1184 |
+
if output_hidden_states:
|
1185 |
+
all_hidden_states += (hidden_states,)
|
1186 |
+
|
1187 |
+
next_cache = None
|
1188 |
+
if use_cache:
|
1189 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
1190 |
+
if not return_dict:
|
1191 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1192 |
+
return BaseModelOutputWithPast(
|
1193 |
+
last_hidden_state=hidden_states,
|
1194 |
+
past_key_values=next_cache,
|
1195 |
+
hidden_states=all_hidden_states,
|
1196 |
+
attentions=all_self_attns,
|
1197 |
+
)
|
1198 |
+
|
1199 |
+
|
1200 |
+
class Phi3ForCausalLM(Phi3PreTrainedModel):
|
1201 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1202 |
+
|
1203 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
|
1204 |
+
def __init__(self, config):
|
1205 |
+
super().__init__(config)
|
1206 |
+
self.model = Phi3Model(config)
|
1207 |
+
self.vocab_size = config.vocab_size
|
1208 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1209 |
+
|
1210 |
+
# Initialize weights and apply final processing
|
1211 |
+
self.post_init()
|
1212 |
+
|
1213 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
|
1214 |
+
def get_input_embeddings(self):
|
1215 |
+
return self.model.embed_tokens
|
1216 |
+
|
1217 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
|
1218 |
+
def set_input_embeddings(self, value):
|
1219 |
+
self.model.embed_tokens = value
|
1220 |
+
|
1221 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
|
1222 |
+
def get_output_embeddings(self):
|
1223 |
+
return self.lm_head
|
1224 |
+
|
1225 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
|
1226 |
+
def set_output_embeddings(self, new_embeddings):
|
1227 |
+
self.lm_head = new_embeddings
|
1228 |
+
|
1229 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
|
1230 |
+
def set_decoder(self, decoder):
|
1231 |
+
self.model = decoder
|
1232 |
+
|
1233 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
|
1234 |
+
def get_decoder(self):
|
1235 |
+
return self.model
|
1236 |
+
|
1237 |
+
# Ignore copy
|
1238 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1239 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1240 |
+
def forward(
|
1241 |
+
self,
|
1242 |
+
input_ids: torch.LongTensor = None,
|
1243 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1244 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1245 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1246 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1247 |
+
labels: Optional[torch.LongTensor] = None,
|
1248 |
+
use_cache: Optional[bool] = None,
|
1249 |
+
output_attentions: Optional[bool] = None,
|
1250 |
+
output_hidden_states: Optional[bool] = None,
|
1251 |
+
return_dict: Optional[bool] = None,
|
1252 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1253 |
+
r"""
|
1254 |
+
Args:
|
1255 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1256 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1257 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1258 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1259 |
+
|
1260 |
+
Returns:
|
1261 |
+
|
1262 |
+
Example:
|
1263 |
+
|
1264 |
+
```python
|
1265 |
+
>>> from transformers import AutoTokenizer, Phi3ForCausalLM
|
1266 |
+
|
1267 |
+
>>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
1268 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
1269 |
+
|
1270 |
+
>>> prompt = "This is an example script ."
|
1271 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1272 |
+
|
1273 |
+
>>> # Generate
|
1274 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1275 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1276 |
+
'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
|
1277 |
+
```"""
|
1278 |
+
|
1279 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1280 |
+
output_hidden_states = (
|
1281 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1282 |
+
)
|
1283 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1284 |
+
|
1285 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1286 |
+
outputs = self.model(
|
1287 |
+
input_ids=input_ids,
|
1288 |
+
attention_mask=attention_mask,
|
1289 |
+
position_ids=position_ids,
|
1290 |
+
past_key_values=past_key_values,
|
1291 |
+
inputs_embeds=inputs_embeds,
|
1292 |
+
use_cache=use_cache,
|
1293 |
+
output_attentions=output_attentions,
|
1294 |
+
output_hidden_states=output_hidden_states,
|
1295 |
+
return_dict=return_dict,
|
1296 |
+
)
|
1297 |
+
|
1298 |
+
hidden_states = outputs[0]
|
1299 |
+
logits = self.lm_head(hidden_states)
|
1300 |
+
logits = logits.float()
|
1301 |
+
|
1302 |
+
loss = None
|
1303 |
+
if labels is not None:
|
1304 |
+
# Shift so that tokens < n predict n
|
1305 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1306 |
+
shift_labels = labels[..., 1:].contiguous()
|
1307 |
+
# Flatten the tokens
|
1308 |
+
loss_fct = CrossEntropyLoss()
|
1309 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1310 |
+
shift_labels = shift_labels.view(-1)
|
1311 |
+
# Enable model parallelism
|
1312 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1313 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1314 |
+
|
1315 |
+
if not return_dict:
|
1316 |
+
output = (logits,) + outputs[1:]
|
1317 |
+
return (loss,) + output if loss is not None else output
|
1318 |
+
|
1319 |
+
return CausalLMOutputWithPast(
|
1320 |
+
loss=loss,
|
1321 |
+
logits=logits,
|
1322 |
+
past_key_values=outputs.past_key_values,
|
1323 |
+
hidden_states=outputs.hidden_states,
|
1324 |
+
attentions=outputs.attentions,
|
1325 |
+
)
|
1326 |
+
|
1327 |
+
# Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
|
1328 |
+
def prepare_inputs_for_generation(
|
1329 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1330 |
+
):
|
1331 |
+
if past_key_values is not None:
|
1332 |
+
if isinstance(past_key_values, Cache):
|
1333 |
+
cache_length = past_key_values.get_seq_length()
|
1334 |
+
past_length = past_key_values.seen_tokens
|
1335 |
+
max_cache_length = past_key_values.get_max_length()
|
1336 |
+
else:
|
1337 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
1338 |
+
max_cache_length = None
|
1339 |
+
|
1340 |
+
# Keep only the unprocessed tokens:
|
1341 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1342 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
1343 |
+
# input)
|
1344 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1345 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1346 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1347 |
+
# input_ids based on the past_length.
|
1348 |
+
elif past_length < input_ids.shape[1]:
|
1349 |
+
input_ids = input_ids[:, past_length:]
|
1350 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1351 |
+
|
1352 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1353 |
+
if (
|
1354 |
+
max_cache_length is not None
|
1355 |
+
and attention_mask is not None
|
1356 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1357 |
+
):
|
1358 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1359 |
+
|
1360 |
+
position_ids = kwargs.get("position_ids", None)
|
1361 |
+
if attention_mask is not None and position_ids is None:
|
1362 |
+
# create position_ids on the fly for batch generation
|
1363 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1364 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1365 |
+
if past_key_values:
|
1366 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1367 |
+
|
1368 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1369 |
+
if inputs_embeds is not None and past_key_values is None:
|
1370 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1371 |
+
else:
|
1372 |
+
model_inputs = {"input_ids": input_ids}
|
1373 |
+
|
1374 |
+
model_inputs.update(
|
1375 |
+
{
|
1376 |
+
"position_ids": position_ids,
|
1377 |
+
"past_key_values": past_key_values,
|
1378 |
+
"use_cache": kwargs.get("use_cache"),
|
1379 |
+
"attention_mask": attention_mask,
|
1380 |
+
}
|
1381 |
+
)
|
1382 |
+
return model_inputs
|
1383 |
+
|
1384 |
+
@staticmethod
|
1385 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
|
1386 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1387 |
+
reordered_past = ()
|
1388 |
+
for layer_past in past_key_values:
|
1389 |
+
reordered_past += (
|
1390 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1391 |
+
)
|
1392 |
+
return reordered_past
|
1393 |
+
|
1394 |
+
|
1395 |
+
@add_start_docstrings(
|
1396 |
+
"""
|
1397 |
+
The [`Phi3Model`] with a sequence classification head on top (linear layer).
|
1398 |
+
|
1399 |
+
[`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1400 |
+
(e.g. GPT-2) do.
|
1401 |
+
|
1402 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1403 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1404 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1405 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1406 |
+
each row of the batch).
|
1407 |
+
""",
|
1408 |
+
PHI3_START_DOCSTRING,
|
1409 |
+
)
|
1410 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
|
1411 |
+
class Phi3ForSequenceClassification(Phi3PreTrainedModel):
|
1412 |
+
def __init__(self, config):
|
1413 |
+
super().__init__(config)
|
1414 |
+
self.num_labels = config.num_labels
|
1415 |
+
self.model = Phi3Model(config)
|
1416 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1417 |
+
|
1418 |
+
# Initialize weights and apply final processing
|
1419 |
+
self.post_init()
|
1420 |
+
|
1421 |
+
def get_input_embeddings(self):
|
1422 |
+
return self.model.embed_tokens
|
1423 |
+
|
1424 |
+
def set_input_embeddings(self, value):
|
1425 |
+
self.model.embed_tokens = value
|
1426 |
+
|
1427 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1428 |
+
def forward(
|
1429 |
+
self,
|
1430 |
+
input_ids: torch.LongTensor = None,
|
1431 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1432 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1433 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1434 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1435 |
+
labels: Optional[torch.LongTensor] = None,
|
1436 |
+
use_cache: Optional[bool] = None,
|
1437 |
+
output_attentions: Optional[bool] = None,
|
1438 |
+
output_hidden_states: Optional[bool] = None,
|
1439 |
+
return_dict: Optional[bool] = None,
|
1440 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1441 |
+
r"""
|
1442 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1443 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1444 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1445 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1446 |
+
"""
|
1447 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1448 |
+
|
1449 |
+
model_outputs = self.model(
|
1450 |
+
input_ids,
|
1451 |
+
attention_mask=attention_mask,
|
1452 |
+
position_ids=position_ids,
|
1453 |
+
past_key_values=past_key_values,
|
1454 |
+
inputs_embeds=inputs_embeds,
|
1455 |
+
use_cache=use_cache,
|
1456 |
+
output_attentions=output_attentions,
|
1457 |
+
output_hidden_states=output_hidden_states,
|
1458 |
+
return_dict=return_dict,
|
1459 |
+
)
|
1460 |
+
hidden_states = model_outputs[0]
|
1461 |
+
logits = self.score(hidden_states)
|
1462 |
+
|
1463 |
+
if input_ids is not None:
|
1464 |
+
batch_size = input_ids.shape[0]
|
1465 |
+
else:
|
1466 |
+
batch_size = inputs_embeds.shape[0]
|
1467 |
+
|
1468 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1469 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1470 |
+
if self.config.pad_token_id is None:
|
1471 |
+
sequence_lengths = -1
|
1472 |
+
else:
|
1473 |
+
if input_ids is not None:
|
1474 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1475 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1476 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1477 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1478 |
+
else:
|
1479 |
+
sequence_lengths = -1
|
1480 |
+
|
1481 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1482 |
+
|
1483 |
+
loss = None
|
1484 |
+
if labels is not None:
|
1485 |
+
labels = labels.to(logits.device)
|
1486 |
+
if self.config.problem_type is None:
|
1487 |
+
if self.num_labels == 1:
|
1488 |
+
self.config.problem_type = "regression"
|
1489 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1490 |
+
self.config.problem_type = "single_label_classification"
|
1491 |
+
else:
|
1492 |
+
self.config.problem_type = "multi_label_classification"
|
1493 |
+
|
1494 |
+
if self.config.problem_type == "regression":
|
1495 |
+
loss_fct = MSELoss()
|
1496 |
+
if self.num_labels == 1:
|
1497 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1498 |
+
else:
|
1499 |
+
loss = loss_fct(pooled_logits, labels)
|
1500 |
+
elif self.config.problem_type == "single_label_classification":
|
1501 |
+
loss_fct = CrossEntropyLoss()
|
1502 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1503 |
+
elif self.config.problem_type == "multi_label_classification":
|
1504 |
+
loss_fct = BCEWithLogitsLoss()
|
1505 |
+
loss = loss_fct(pooled_logits, labels)
|
1506 |
+
if not return_dict:
|
1507 |
+
output = (pooled_logits,) + model_outputs[1:]
|
1508 |
+
return ((loss,) + output) if loss is not None else output
|
1509 |
+
|
1510 |
+
return SequenceClassifierOutputWithPast(
|
1511 |
+
loss=loss,
|
1512 |
+
logits=pooled_logits,
|
1513 |
+
past_key_values=model_outputs.past_key_values,
|
1514 |
+
hidden_states=model_outputs.hidden_states,
|
1515 |
+
attentions=model_outputs.attentions,
|
1516 |
+
)
|
1517 |
+
|
1518 |
+
|
1519 |
+
@add_start_docstrings(
|
1520 |
+
"""
|
1521 |
+
[`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
1522 |
+
Named-Entity-Recognition (NER) tasks.
|
1523 |
+
""",
|
1524 |
+
PHI3_START_DOCSTRING,
|
1525 |
+
)
|
1526 |
+
# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
|
1527 |
+
class Phi3ForTokenClassification(Phi3PreTrainedModel):
|
1528 |
+
def __init__(self, config: Phi3Config):
|
1529 |
+
super().__init__(config)
|
1530 |
+
self.num_labels = config.num_labels
|
1531 |
+
|
1532 |
+
self.model = Phi3Model(config)
|
1533 |
+
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
|
1534 |
+
classifier_dropout = config.classifier_dropout
|
1535 |
+
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
|
1536 |
+
classifier_dropout = config.hidden_dropout
|
1537 |
+
else:
|
1538 |
+
classifier_dropout = 0.1
|
1539 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1540 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
1541 |
+
|
1542 |
+
# Initialize weights and apply final processing
|
1543 |
+
self.post_init()
|
1544 |
+
|
1545 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1546 |
+
@add_code_sample_docstrings(
|
1547 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1548 |
+
output_type=TokenClassifierOutput,
|
1549 |
+
config_class=_CONFIG_FOR_DOC,
|
1550 |
+
)
|
1551 |
+
def forward(
|
1552 |
+
self,
|
1553 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1554 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
1555 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1556 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1557 |
+
labels: Optional[torch.Tensor] = None,
|
1558 |
+
use_cache: Optional[bool] = None,
|
1559 |
+
output_attentions: Optional[bool] = None,
|
1560 |
+
output_hidden_states: Optional[bool] = None,
|
1561 |
+
return_dict: Optional[bool] = None,
|
1562 |
+
**deprecated_arguments,
|
1563 |
+
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
|
1564 |
+
r"""
|
1565 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1566 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1567 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1568 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1569 |
+
"""
|
1570 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1571 |
+
|
1572 |
+
model_outputs = self.model(
|
1573 |
+
input_ids,
|
1574 |
+
past_key_values=past_key_values,
|
1575 |
+
attention_mask=attention_mask,
|
1576 |
+
inputs_embeds=inputs_embeds,
|
1577 |
+
use_cache=use_cache,
|
1578 |
+
output_attentions=output_attentions,
|
1579 |
+
output_hidden_states=output_hidden_states,
|
1580 |
+
return_dict=return_dict,
|
1581 |
+
)
|
1582 |
+
|
1583 |
+
hidden_states = model_outputs[0]
|
1584 |
+
hidden_states = self.dropout(hidden_states)
|
1585 |
+
logits = self.classifier(hidden_states)
|
1586 |
+
|
1587 |
+
loss = None
|
1588 |
+
if labels is not None:
|
1589 |
+
# move labels to correct device to enable model parallelism
|
1590 |
+
labels = labels.to(logits.device)
|
1591 |
+
batch_size, seq_length = labels.shape
|
1592 |
+
loss_fct = CrossEntropyLoss()
|
1593 |
+
loss = loss_fct(
|
1594 |
+
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
|
1595 |
+
)
|
1596 |
+
|
1597 |
+
if not return_dict:
|
1598 |
+
output = (logits,) + model_outputs[2:]
|
1599 |
+
return ((loss,) + output) if loss is not None else output
|
1600 |
+
|
1601 |
+
return TokenClassifierOutput(
|
1602 |
+
loss=loss,
|
1603 |
+
logits=logits,
|
1604 |
+
hidden_states=model_outputs.hidden_states,
|
1605 |
+
attentions=model_outputs.attentions,
|
1606 |
+
)
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/sample_finetune.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import logging
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
from datasets import load_dataset
|
6 |
+
from peft import LoraConfig
|
7 |
+
import torch
|
8 |
+
import transformers
|
9 |
+
from trl import SFTTrainer
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
|
11 |
+
|
12 |
+
"""
|
13 |
+
A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
|
14 |
+
a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
|
15 |
+
This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
|
16 |
+
script can be run on V100 or later generation GPUs. Here are some suggestions on
|
17 |
+
futher reducing memory consumption:
|
18 |
+
- reduce batch size
|
19 |
+
- decrease lora dimension
|
20 |
+
- restrict lora target modules
|
21 |
+
Please follow these steps to run the script:
|
22 |
+
1. Install dependencies:
|
23 |
+
conda install -c conda-forge accelerate
|
24 |
+
pip3 install -i https://pypi.org/simple/ bitsandbytes
|
25 |
+
pip3 install peft transformers trl datasets
|
26 |
+
pip3 install deepspeed
|
27 |
+
2. Setup accelerate and deepspeed config based on the machine used:
|
28 |
+
accelerate config
|
29 |
+
Here is a sample config for deepspeed zero3:
|
30 |
+
compute_environment: LOCAL_MACHINE
|
31 |
+
debug: false
|
32 |
+
deepspeed_config:
|
33 |
+
gradient_accumulation_steps: 1
|
34 |
+
offload_optimizer_device: none
|
35 |
+
offload_param_device: none
|
36 |
+
zero3_init_flag: true
|
37 |
+
zero3_save_16bit_model: true
|
38 |
+
zero_stage: 3
|
39 |
+
distributed_type: DEEPSPEED
|
40 |
+
downcast_bf16: 'no'
|
41 |
+
enable_cpu_affinity: false
|
42 |
+
machine_rank: 0
|
43 |
+
main_training_function: main
|
44 |
+
mixed_precision: bf16
|
45 |
+
num_machines: 1
|
46 |
+
num_processes: 4
|
47 |
+
rdzv_backend: static
|
48 |
+
same_network: true
|
49 |
+
tpu_env: []
|
50 |
+
tpu_use_cluster: false
|
51 |
+
tpu_use_sudo: false
|
52 |
+
use_cpu: false
|
53 |
+
3. check accelerate config:
|
54 |
+
accelerate env
|
55 |
+
4. Run the code:
|
56 |
+
accelerate launch sample_finetune.py
|
57 |
+
"""
|
58 |
+
|
59 |
+
logger = logging.getLogger(__name__)
|
60 |
+
|
61 |
+
|
62 |
+
###################
|
63 |
+
# Hyper-parameters
|
64 |
+
###################
|
65 |
+
training_config = {
|
66 |
+
"bf16": True,
|
67 |
+
"do_eval": False,
|
68 |
+
"learning_rate": 5.0e-06,
|
69 |
+
"log_level": "info",
|
70 |
+
"logging_steps": 20,
|
71 |
+
"logging_strategy": "steps",
|
72 |
+
"lr_scheduler_type": "cosine",
|
73 |
+
"num_train_epochs": 1,
|
74 |
+
"max_steps": -1,
|
75 |
+
"output_dir": "./checkpoint_dir",
|
76 |
+
"overwrite_output_dir": True,
|
77 |
+
"per_device_eval_batch_size": 4,
|
78 |
+
"per_device_train_batch_size": 4,
|
79 |
+
"remove_unused_columns": True,
|
80 |
+
"save_steps": 100,
|
81 |
+
"save_total_limit": 1,
|
82 |
+
"seed": 0,
|
83 |
+
"gradient_checkpointing": True,
|
84 |
+
"gradient_checkpointing_kwargs":{"use_reentrant": False},
|
85 |
+
"gradient_accumulation_steps": 1,
|
86 |
+
"warmup_ratio": 0.2,
|
87 |
+
}
|
88 |
+
|
89 |
+
peft_config = {
|
90 |
+
"r": 16,
|
91 |
+
"lora_alpha": 32,
|
92 |
+
"lora_dropout": 0.05,
|
93 |
+
"bias": "none",
|
94 |
+
"task_type": "CAUSAL_LM",
|
95 |
+
"target_modules": "all-linear",
|
96 |
+
"modules_to_save": None,
|
97 |
+
}
|
98 |
+
train_conf = TrainingArguments(**training_config)
|
99 |
+
peft_conf = LoraConfig(**peft_config)
|
100 |
+
|
101 |
+
|
102 |
+
###############
|
103 |
+
# Setup logging
|
104 |
+
###############
|
105 |
+
logging.basicConfig(
|
106 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
107 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
108 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
109 |
+
)
|
110 |
+
log_level = train_conf.get_process_log_level()
|
111 |
+
logger.setLevel(log_level)
|
112 |
+
datasets.utils.logging.set_verbosity(log_level)
|
113 |
+
transformers.utils.logging.set_verbosity(log_level)
|
114 |
+
transformers.utils.logging.enable_default_handler()
|
115 |
+
transformers.utils.logging.enable_explicit_format()
|
116 |
+
|
117 |
+
# Log on each process a small summary
|
118 |
+
logger.warning(
|
119 |
+
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
|
120 |
+
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
|
121 |
+
)
|
122 |
+
logger.info(f"Training/evaluation parameters {train_conf}")
|
123 |
+
logger.info(f"PEFT parameters {peft_conf}")
|
124 |
+
|
125 |
+
|
126 |
+
################
|
127 |
+
# Modle Loading
|
128 |
+
################
|
129 |
+
checkpoint_path = "microsoft/Phi-3-mini-4k-instruct"
|
130 |
+
# checkpoint_path = "microsoft/Phi-3-mini-128k-instruct"
|
131 |
+
model_kwargs = dict(
|
132 |
+
use_cache=False,
|
133 |
+
trust_remote_code=True,
|
134 |
+
attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
|
135 |
+
torch_dtype=torch.bfloat16,
|
136 |
+
device_map=None
|
137 |
+
)
|
138 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
|
139 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
140 |
+
tokenizer.model_max_length = 2048
|
141 |
+
tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
|
142 |
+
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
|
143 |
+
tokenizer.padding_side = 'right'
|
144 |
+
|
145 |
+
|
146 |
+
##################
|
147 |
+
# Data Processing
|
148 |
+
##################
|
149 |
+
def apply_chat_template(
|
150 |
+
example,
|
151 |
+
tokenizer,
|
152 |
+
):
|
153 |
+
messages = example["messages"]
|
154 |
+
# Add an empty system message if there is none
|
155 |
+
if messages[0]["role"] != "system":
|
156 |
+
messages.insert(0, {"role": "system", "content": ""})
|
157 |
+
example["text"] = tokenizer.apply_chat_template(
|
158 |
+
messages, tokenize=False, add_generation_prompt=False)
|
159 |
+
return example
|
160 |
+
|
161 |
+
raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
|
162 |
+
train_dataset = raw_dataset["train_sft"]
|
163 |
+
test_dataset = raw_dataset["test_sft"]
|
164 |
+
column_names = list(train_dataset.features)
|
165 |
+
|
166 |
+
processed_train_dataset = train_dataset.map(
|
167 |
+
apply_chat_template,
|
168 |
+
fn_kwargs={"tokenizer": tokenizer},
|
169 |
+
num_proc=10,
|
170 |
+
remove_columns=column_names,
|
171 |
+
desc="Applying chat template to train_sft",
|
172 |
+
)
|
173 |
+
|
174 |
+
processed_test_dataset = test_dataset.map(
|
175 |
+
apply_chat_template,
|
176 |
+
fn_kwargs={"tokenizer": tokenizer},
|
177 |
+
num_proc=10,
|
178 |
+
remove_columns=column_names,
|
179 |
+
desc="Applying chat template to test_sft",
|
180 |
+
)
|
181 |
+
|
182 |
+
|
183 |
+
###########
|
184 |
+
# Training
|
185 |
+
###########
|
186 |
+
trainer = SFTTrainer(
|
187 |
+
model=model,
|
188 |
+
args=train_conf,
|
189 |
+
peft_config=peft_conf,
|
190 |
+
train_dataset=processed_train_dataset,
|
191 |
+
eval_dataset=processed_test_dataset,
|
192 |
+
max_seq_length=2048,
|
193 |
+
dataset_text_field="text",
|
194 |
+
tokenizer=tokenizer,
|
195 |
+
packing=True
|
196 |
+
)
|
197 |
+
train_result = trainer.train()
|
198 |
+
metrics = train_result.metrics
|
199 |
+
trainer.log_metrics("train", metrics)
|
200 |
+
trainer.save_metrics("train", metrics)
|
201 |
+
trainer.save_state()
|
202 |
+
|
203 |
+
|
204 |
+
#############
|
205 |
+
# Evaluation
|
206 |
+
#############
|
207 |
+
tokenizer.padding_side = 'left'
|
208 |
+
metrics = trainer.evaluate()
|
209 |
+
metrics["eval_samples"] = len(processed_test_dataset)
|
210 |
+
trainer.log_metrics("eval", metrics)
|
211 |
+
trainer.save_metrics("eval", metrics)
|
212 |
+
|
213 |
+
|
214 |
+
# ############
|
215 |
+
# # Save model
|
216 |
+
# ############
|
217 |
+
trainer.save_model(train_conf.output_dir)
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/special_tokens_map.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51d7c72bbb0e5dbc001ba6cb799c53dee0539303d4e9c483583cf12e9fe48e48
|
3 |
+
size 568
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0bbddd4b39b594027b022cf22c47669dcd9e05ffc3b6d4a972b39a713750f823
|
3 |
+
size 1844409
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
lava-vicuna_2024_4_Phi-3-mini-4k-instruct/tokenizer_config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:441a655644c244ab6fb6aae4320e5b01793bc5a9ef03dd94e9f6dedf337ec01b
|
3 |
+
size 3169
|
llava/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .model import LlavaPhiForCausalLM
|
llava/__pycache__/__init__.cpython-310.pyc
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:477a627025b05218587d4685fdce9a89d1188091f8ff133905d99cbb0c91ed76
|
3 |
+
size 191
|
llava/__pycache__/constants.cpython-310.pyc
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dea6328de8a9b83efa7e659a28a8f2a63e9f6c2d919ce1163b018759aa706ce9
|
3 |
+
size 534
|
llava/__pycache__/conversation.cpython-310.pyc
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d16ca7eeefdfe3a131acacd089db4ca20c5a43e8cde8067e8a2c9ab76e0fc4f2
|
3 |
+
size 10902
|
llava/__pycache__/mm_utils.cpython-310.pyc
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8af51e494f02363457a814aad8fb0e89241e3fcb5dbf56514ac2f242a52c6d8e
|
3 |
+
size 8773
|
llava/bpe_simple_vocab_16e6.txt.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
3 |
+
size 1356917
|
llava/constants.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = 30
|
2 |
+
WORKER_HEART_BEAT_INTERVAL = 15
|
3 |
+
|
4 |
+
LOGDIR = "."
|
5 |
+
|
6 |
+
# Model Constants
|
7 |
+
IGNORE_INDEX = -100
|
8 |
+
IMAGE_TOKEN_INDEX = -200
|
9 |
+
DEFAULT_PC_TOKEN = "<point>"
|
10 |
+
DEFAULT_IMAGE_TOKEN = "<image>"
|
11 |
+
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
12 |
+
DEFAULT_IM_START_TOKEN = "<im_start>"
|
13 |
+
DEFAULT_IM_END_TOKEN = "<im_end>"
|
14 |
+
IMAGE_PLACEHOLDER = "<image-placeholder>"
|
llava/conversation.py
ADDED
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified from LLaVA: https://github.com/haotian-liu/LLaVA.git
|
2 |
+
import dataclasses
|
3 |
+
from enum import auto, Enum
|
4 |
+
from typing import List, Tuple
|
5 |
+
import base64
|
6 |
+
from io import BytesIO
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
|
10 |
+
class SeparatorStyle(Enum):
|
11 |
+
"""Different separator style."""
|
12 |
+
SINGLE = auto()
|
13 |
+
TWO = auto()
|
14 |
+
MPT = auto()
|
15 |
+
PLAIN = auto()
|
16 |
+
LLAMA_2 = auto()
|
17 |
+
|
18 |
+
|
19 |
+
@dataclasses.dataclass
|
20 |
+
class Conversation:
|
21 |
+
"""A class that keeps all conversation history."""
|
22 |
+
system: str
|
23 |
+
roles: List[str]
|
24 |
+
messages: List[List[str]]
|
25 |
+
offset: int
|
26 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
27 |
+
sep: str = "###"
|
28 |
+
sep2: str = None
|
29 |
+
version: str = "Unknown"
|
30 |
+
|
31 |
+
skip_next: bool = False
|
32 |
+
|
33 |
+
def get_prompt(self):
|
34 |
+
messages = self.messages
|
35 |
+
if len(messages) > 0 and type(messages[0][1]) is tuple:
|
36 |
+
messages = self.messages.copy()
|
37 |
+
init_role, init_msg = messages[0].copy()
|
38 |
+
init_msg = init_msg[0].replace("<image>", "").strip()
|
39 |
+
if 'mmtag' in self.version:
|
40 |
+
messages[0] = (init_role, init_msg)
|
41 |
+
messages.insert(0, (self.roles[0], "<Image><image></Image>"))
|
42 |
+
messages.insert(1, (self.roles[1], "Received."))
|
43 |
+
else:
|
44 |
+
messages[0] = (init_role, "<image>\n" + init_msg)
|
45 |
+
|
46 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
47 |
+
ret = self.system + self.sep
|
48 |
+
for role, message in messages:
|
49 |
+
if message:
|
50 |
+
if type(message) is tuple:
|
51 |
+
message, _, _ = message
|
52 |
+
ret += role + ": " + message + self.sep
|
53 |
+
else:
|
54 |
+
ret += role + ":"
|
55 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
56 |
+
seps = [self.sep, self.sep2]
|
57 |
+
ret = self.system + seps[0]
|
58 |
+
for i, (role, message) in enumerate(messages):
|
59 |
+
if message:
|
60 |
+
if type(message) is tuple:
|
61 |
+
message, _, _ = message
|
62 |
+
ret += role + ": " + message + seps[i % 2]
|
63 |
+
else:
|
64 |
+
ret += role + ":"
|
65 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
66 |
+
ret = self.system + self.sep
|
67 |
+
for role, message in messages:
|
68 |
+
if message:
|
69 |
+
if type(message) is tuple:
|
70 |
+
message, _, _ = message
|
71 |
+
ret += role + message + self.sep
|
72 |
+
else:
|
73 |
+
ret += role
|
74 |
+
elif self.sep_style == SeparatorStyle.LLAMA_2:
|
75 |
+
wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
|
76 |
+
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
|
77 |
+
ret = ""
|
78 |
+
|
79 |
+
for i, (role, message) in enumerate(messages):
|
80 |
+
if i == 0:
|
81 |
+
assert message, "first message should not be none"
|
82 |
+
assert role == self.roles[0], "first message should come from user"
|
83 |
+
if message:
|
84 |
+
if type(message) is tuple:
|
85 |
+
message, _, _ = message
|
86 |
+
if i == 0: message = wrap_sys(self.system) + message
|
87 |
+
if i % 2 == 0:
|
88 |
+
message = wrap_inst(message)
|
89 |
+
ret += self.sep + message
|
90 |
+
else:
|
91 |
+
ret += " " + message + " " + self.sep2
|
92 |
+
else:
|
93 |
+
ret += ""
|
94 |
+
ret = ret.lstrip(self.sep)
|
95 |
+
elif self.sep_style == SeparatorStyle.PLAIN:
|
96 |
+
seps = [self.sep, self.sep2]
|
97 |
+
ret = self.system
|
98 |
+
for i, (role, message) in enumerate(messages):
|
99 |
+
if message:
|
100 |
+
if type(message) is tuple:
|
101 |
+
message, _, _ = message
|
102 |
+
ret += message + seps[i % 2]
|
103 |
+
else:
|
104 |
+
ret += ""
|
105 |
+
else:
|
106 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
107 |
+
|
108 |
+
return ret
|
109 |
+
|
110 |
+
def append_message(self, role, message):
|
111 |
+
self.messages.append([role, message])
|
112 |
+
|
113 |
+
def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=1344, min_len=672):
|
114 |
+
if image_process_mode == "Pad":
|
115 |
+
def expand2square(pil_img, background_color=(122, 116, 104)):
|
116 |
+
width, height = pil_img.size
|
117 |
+
if width == height:
|
118 |
+
return pil_img
|
119 |
+
elif width > height:
|
120 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
121 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
122 |
+
return result
|
123 |
+
else:
|
124 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
125 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
126 |
+
return result
|
127 |
+
image = expand2square(image)
|
128 |
+
elif image_process_mode in ["Default", "Crop"]:
|
129 |
+
pass
|
130 |
+
elif image_process_mode == "Resize":
|
131 |
+
image = image.resize((336, 336))
|
132 |
+
else:
|
133 |
+
raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
|
134 |
+
if max(image.size) > max_len:
|
135 |
+
max_hw, min_hw = max(image.size), min(image.size)
|
136 |
+
aspect_ratio = max_hw / min_hw
|
137 |
+
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
138 |
+
longest_edge = int(shortest_edge * aspect_ratio)
|
139 |
+
W, H = image.size
|
140 |
+
if H > W:
|
141 |
+
H, W = longest_edge, shortest_edge
|
142 |
+
else:
|
143 |
+
H, W = shortest_edge, longest_edge
|
144 |
+
image = image.resize((W, H))
|
145 |
+
if return_pil:
|
146 |
+
return image
|
147 |
+
else:
|
148 |
+
buffered = BytesIO()
|
149 |
+
image.save(buffered, format=image_format)
|
150 |
+
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
151 |
+
return img_b64_str
|
152 |
+
|
153 |
+
def get_images(self, return_pil=False):
|
154 |
+
images = []
|
155 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
156 |
+
if i % 2 == 0:
|
157 |
+
if type(msg) is tuple:
|
158 |
+
msg, image, image_process_mode = msg
|
159 |
+
image = self.process_image(image, image_process_mode, return_pil=return_pil)
|
160 |
+
images.append(image)
|
161 |
+
return images
|
162 |
+
|
163 |
+
def to_gradio_chatbot(self):
|
164 |
+
ret = []
|
165 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
166 |
+
if i % 2 == 0:
|
167 |
+
if type(msg) is tuple:
|
168 |
+
msg, image, image_process_mode = msg
|
169 |
+
img_b64_str = self.process_image(
|
170 |
+
image, "Default", return_pil=False,
|
171 |
+
image_format='JPEG')
|
172 |
+
img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" alt="user upload image" />'
|
173 |
+
msg = img_str + msg.replace('<image>', '').strip()
|
174 |
+
ret.append([msg, None])
|
175 |
+
else:
|
176 |
+
ret.append([msg, None])
|
177 |
+
else:
|
178 |
+
ret[-1][-1] = msg
|
179 |
+
return ret
|
180 |
+
|
181 |
+
def copy(self):
|
182 |
+
return Conversation(
|
183 |
+
system=self.system,
|
184 |
+
roles=self.roles,
|
185 |
+
messages=[[x, y] for x, y in self.messages],
|
186 |
+
offset=self.offset,
|
187 |
+
sep_style=self.sep_style,
|
188 |
+
sep=self.sep,
|
189 |
+
sep2=self.sep2,
|
190 |
+
version=self.version)
|
191 |
+
|
192 |
+
def dict(self):
|
193 |
+
if len(self.get_images()) > 0:
|
194 |
+
return {
|
195 |
+
"system": self.system,
|
196 |
+
"roles": self.roles,
|
197 |
+
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
|
198 |
+
"offset": self.offset,
|
199 |
+
"sep": self.sep,
|
200 |
+
"sep2": self.sep2,
|
201 |
+
}
|
202 |
+
return {
|
203 |
+
"system": self.system,
|
204 |
+
"roles": self.roles,
|
205 |
+
"messages": self.messages,
|
206 |
+
"offset": self.offset,
|
207 |
+
"sep": self.sep,
|
208 |
+
"sep2": self.sep2,
|
209 |
+
}
|
210 |
+
|
211 |
+
|
212 |
+
conv_vicuna_v0 = Conversation(
|
213 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
214 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
215 |
+
roles=("Human", "Assistant"),
|
216 |
+
messages=(
|
217 |
+
("Human", "What are the key differences between renewable and non-renewable energy sources?"),
|
218 |
+
("Assistant",
|
219 |
+
"Renewable energy sources are those that can be replenished naturally in a relatively "
|
220 |
+
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
|
221 |
+
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
|
222 |
+
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
|
223 |
+
"renewable and non-renewable energy sources:\n"
|
224 |
+
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
|
225 |
+
"energy sources are finite and will eventually run out.\n"
|
226 |
+
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
|
227 |
+
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
|
228 |
+
"and other negative effects.\n"
|
229 |
+
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
|
230 |
+
"have lower operational costs than non-renewable sources.\n"
|
231 |
+
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
|
232 |
+
"locations than non-renewable sources.\n"
|
233 |
+
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
|
234 |
+
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
|
235 |
+
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
|
236 |
+
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
|
237 |
+
),
|
238 |
+
offset=2,
|
239 |
+
sep_style=SeparatorStyle.SINGLE,
|
240 |
+
sep="###",
|
241 |
+
)
|
242 |
+
|
243 |
+
conv_vicuna_v1 = Conversation(
|
244 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
245 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
246 |
+
roles=("USER", "ASSISTANT"),
|
247 |
+
version="v1",
|
248 |
+
messages=(),
|
249 |
+
offset=0,
|
250 |
+
sep_style=SeparatorStyle.TWO,
|
251 |
+
sep=" ",
|
252 |
+
sep2="</s>",
|
253 |
+
)
|
254 |
+
|
255 |
+
conv_llama_2 = Conversation(
|
256 |
+
system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
257 |
+
|
258 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
|
259 |
+
roles=("USER", "ASSISTANT"),
|
260 |
+
version="llama_v2",
|
261 |
+
messages=(),
|
262 |
+
offset=0,
|
263 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
264 |
+
sep="<s>",
|
265 |
+
sep2="</s>",
|
266 |
+
)
|
267 |
+
|
268 |
+
conv_llava_llama_2 = Conversation(
|
269 |
+
system="You are a helpful language and vision assistant. "
|
270 |
+
"You are able to understand the visual content that the user provides, "
|
271 |
+
"and assist the user with a variety of tasks using natural language.",
|
272 |
+
roles=("USER", "ASSISTANT"),
|
273 |
+
version="llama_v2",
|
274 |
+
messages=(),
|
275 |
+
offset=0,
|
276 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
277 |
+
sep="<s>",
|
278 |
+
sep2="</s>",
|
279 |
+
)
|
280 |
+
|
281 |
+
conv_mpt = Conversation(
|
282 |
+
system="""<|im_start|>system
|
283 |
+
A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
284 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
285 |
+
version="mpt",
|
286 |
+
messages=(),
|
287 |
+
offset=0,
|
288 |
+
sep_style=SeparatorStyle.MPT,
|
289 |
+
sep="<|im_end|>",
|
290 |
+
)
|
291 |
+
|
292 |
+
conv_llava_plain = Conversation(
|
293 |
+
system="",
|
294 |
+
roles=("", ""),
|
295 |
+
messages=(
|
296 |
+
),
|
297 |
+
offset=0,
|
298 |
+
sep_style=SeparatorStyle.PLAIN,
|
299 |
+
sep="\n",
|
300 |
+
)
|
301 |
+
|
302 |
+
conv_llava_v0 = Conversation(
|
303 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
304 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
305 |
+
roles=("Human", "Assistant"),
|
306 |
+
messages=(
|
307 |
+
),
|
308 |
+
offset=0,
|
309 |
+
sep_style=SeparatorStyle.SINGLE,
|
310 |
+
sep="###",
|
311 |
+
)
|
312 |
+
|
313 |
+
conv_llava_v0_mmtag = Conversation(
|
314 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
315 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
316 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
317 |
+
roles=("Human", "Assistant"),
|
318 |
+
messages=(
|
319 |
+
),
|
320 |
+
offset=0,
|
321 |
+
sep_style=SeparatorStyle.SINGLE,
|
322 |
+
sep="###",
|
323 |
+
version="v0_mmtag",
|
324 |
+
)
|
325 |
+
|
326 |
+
conv_llava_v1 = Conversation(
|
327 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
328 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
329 |
+
roles=("USER", "ASSISTANT"),
|
330 |
+
version="v1",
|
331 |
+
messages=(),
|
332 |
+
offset=0,
|
333 |
+
sep_style=SeparatorStyle.TWO,
|
334 |
+
sep=" ",
|
335 |
+
sep2="</s>",
|
336 |
+
)
|
337 |
+
|
338 |
+
conv_llava_v1_mmtag = Conversation(
|
339 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
340 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
341 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
342 |
+
roles=("USER", "ASSISTANT"),
|
343 |
+
messages=(),
|
344 |
+
offset=0,
|
345 |
+
sep_style=SeparatorStyle.TWO,
|
346 |
+
sep=" ",
|
347 |
+
sep2="</s>",
|
348 |
+
version="v1_mmtag",
|
349 |
+
)
|
350 |
+
|
351 |
+
conv_mistral_instruct = Conversation(
|
352 |
+
system="",
|
353 |
+
roles=("USER", "ASSISTANT"),
|
354 |
+
version="llama_v2",
|
355 |
+
messages=(),
|
356 |
+
offset=0,
|
357 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
358 |
+
sep="",
|
359 |
+
sep2="</s>",
|
360 |
+
)
|
361 |
+
|
362 |
+
conv_chatml_direct = Conversation(
|
363 |
+
system="""<|im_start|>system
|
364 |
+
Answer the questions.""",
|
365 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
366 |
+
version="mpt",
|
367 |
+
messages=(),
|
368 |
+
offset=0,
|
369 |
+
sep_style=SeparatorStyle.MPT,
|
370 |
+
sep="<|im_end|>",
|
371 |
+
)
|
372 |
+
|
373 |
+
conv_phi3_instruct = Conversation(
|
374 |
+
system="""<|system|>\nYou are a helpful AI assistant.""",
|
375 |
+
roles=("\n<|user|>\n", "\n<|assistant|>\n"),
|
376 |
+
version="phi3",
|
377 |
+
messages=(),
|
378 |
+
offset=0,
|
379 |
+
sep_style=SeparatorStyle.MPT,
|
380 |
+
sep="<|end|>",
|
381 |
+
)
|
382 |
+
|
383 |
+
|
384 |
+
conv_phi3_instruct_v2 = Conversation(
|
385 |
+
system="""<|system|>\nProvide a detailed answer,Provide a detailed answer,Provide a detailed answer""",
|
386 |
+
roles=("\n<|user|>\n", "\n<|assistant|>\n"),
|
387 |
+
version="phi3",
|
388 |
+
messages=(),
|
389 |
+
offset=0,
|
390 |
+
sep_style=SeparatorStyle.MPT,
|
391 |
+
sep="<|end|>",
|
392 |
+
)
|
393 |
+
|
394 |
+
|
395 |
+
|
396 |
+
default_conversation = conv_vicuna_v1
|
397 |
+
conv_templates = {
|
398 |
+
"default": conv_vicuna_v0,
|
399 |
+
"v0": conv_vicuna_v0,
|
400 |
+
"v1": conv_vicuna_v1,
|
401 |
+
"vicuna_v1": conv_vicuna_v1,
|
402 |
+
"llama_2": conv_llama_2,
|
403 |
+
"mistral_instruct": conv_mistral_instruct,
|
404 |
+
"chatml_direct": conv_chatml_direct,
|
405 |
+
"mistral_direct": conv_chatml_direct,
|
406 |
+
|
407 |
+
"plain": conv_llava_plain,
|
408 |
+
"v0_plain": conv_llava_plain,
|
409 |
+
"llava_v0": conv_llava_v0,
|
410 |
+
"v0_mmtag": conv_llava_v0_mmtag,
|
411 |
+
"llava_v1": conv_llava_v1,
|
412 |
+
"v1_mmtag": conv_llava_v1_mmtag,
|
413 |
+
"llava_llama_2": conv_llava_llama_2,
|
414 |
+
"phi3_instruct": conv_phi3_instruct,
|
415 |
+
"phi3_instruct_v2": conv_phi3_instruct_v2,
|
416 |
+
|
417 |
+
"mpt": conv_mpt,
|
418 |
+
}
|
419 |
+
|
420 |
+
|
421 |
+
if __name__ == "__main__":
|
422 |
+
print(default_conversation.get_prompt())
|
llava/eval/eval_gpt_review.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import tqdm
|
7 |
+
import ray
|
8 |
+
import time
|
9 |
+
|
10 |
+
NUM_SECONDS_TO_SLEEP = 3
|
11 |
+
|
12 |
+
@ray.remote(num_cpus=4)
|
13 |
+
def get_eval(content: str, max_tokens: int):
|
14 |
+
while True:
|
15 |
+
try:
|
16 |
+
response = openai.ChatCompletion.create(
|
17 |
+
model='gpt-4',
|
18 |
+
messages=[{
|
19 |
+
'role': 'system',
|
20 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
21 |
+
}, {
|
22 |
+
'role': 'user',
|
23 |
+
'content': content,
|
24 |
+
}],
|
25 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
26 |
+
max_tokens=max_tokens,
|
27 |
+
)
|
28 |
+
break
|
29 |
+
except openai.error.RateLimitError:
|
30 |
+
pass
|
31 |
+
except Exception as e:
|
32 |
+
print(e)
|
33 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
34 |
+
|
35 |
+
print('success!')
|
36 |
+
return response['choices'][0]['message']['content']
|
37 |
+
|
38 |
+
|
39 |
+
def parse_score(review):
|
40 |
+
try:
|
41 |
+
score_pair = review.split('\n')[0]
|
42 |
+
score_pair = score_pair.replace(',', ' ')
|
43 |
+
sp = score_pair.split(' ')
|
44 |
+
if len(sp) == 2:
|
45 |
+
return [float(sp[0]), float(sp[1])]
|
46 |
+
else:
|
47 |
+
print('error', review)
|
48 |
+
return [-1, -1]
|
49 |
+
except Exception as e:
|
50 |
+
print(e)
|
51 |
+
print('error', review)
|
52 |
+
return [-1, -1]
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == '__main__':
|
56 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
57 |
+
parser.add_argument('-q', '--question')
|
58 |
+
# parser.add_argument('-a', '--answer')
|
59 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
60 |
+
parser.add_argument('-r', '--rule')
|
61 |
+
parser.add_argument('-o', '--output')
|
62 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
63 |
+
args = parser.parse_args()
|
64 |
+
|
65 |
+
ray.init()
|
66 |
+
|
67 |
+
f_q = open(os.path.expanduser(args.question))
|
68 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
69 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
70 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'w')
|
73 |
+
|
74 |
+
js_list = []
|
75 |
+
handles = []
|
76 |
+
idx = 0
|
77 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
78 |
+
# if idx == 1:
|
79 |
+
# break
|
80 |
+
|
81 |
+
ques = json.loads(ques_js)
|
82 |
+
ans1 = json.loads(ans1_js)
|
83 |
+
ans2 = json.loads(ans2_js)
|
84 |
+
|
85 |
+
category = json.loads(ques_js)['category']
|
86 |
+
if category in rule_dict:
|
87 |
+
rule = rule_dict[category]
|
88 |
+
else:
|
89 |
+
rule = rule_dict['default']
|
90 |
+
prompt = rule['prompt']
|
91 |
+
role = rule['role']
|
92 |
+
content = (f'[Question]\n{ques["text"]}\n\n'
|
93 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
94 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
95 |
+
f'[System]\n{prompt}\n\n')
|
96 |
+
js_list.append({
|
97 |
+
'id': idx+1,
|
98 |
+
'question_id': ques['question_id'],
|
99 |
+
'answer1_id': ans1['answer_id'],
|
100 |
+
'answer2_id': ans2['answer_id'],
|
101 |
+
'category': category})
|
102 |
+
idx += 1
|
103 |
+
handles.append(get_eval.remote(content, args.max_tokens))
|
104 |
+
# To avoid the rate limit set by OpenAI
|
105 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
106 |
+
|
107 |
+
reviews = ray.get(handles)
|
108 |
+
for idx, review in enumerate(reviews):
|
109 |
+
scores = parse_score(review)
|
110 |
+
js_list[idx]['content'] = review
|
111 |
+
js_list[idx]['tuple'] = scores
|
112 |
+
review_file.write(json.dumps(js_list[idx]) + '\n')
|
113 |
+
review_file.close()
|
llava/eval/eval_gpt_review_bench.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import time
|
7 |
+
|
8 |
+
NUM_SECONDS_TO_SLEEP = 0.5
|
9 |
+
|
10 |
+
|
11 |
+
def get_eval(content: str, max_tokens: int):
|
12 |
+
while True:
|
13 |
+
try:
|
14 |
+
response = openai.ChatCompletion.create(
|
15 |
+
model='gpt-4-0314',
|
16 |
+
messages=[{
|
17 |
+
'role': 'system',
|
18 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
19 |
+
}, {
|
20 |
+
'role': 'user',
|
21 |
+
'content': content,
|
22 |
+
}],
|
23 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
)
|
26 |
+
break
|
27 |
+
except openai.error.RateLimitError:
|
28 |
+
pass
|
29 |
+
except Exception as e:
|
30 |
+
print(e)
|
31 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
32 |
+
|
33 |
+
return response['choices'][0]['message']['content']
|
34 |
+
|
35 |
+
|
36 |
+
def parse_score(review):
|
37 |
+
try:
|
38 |
+
score_pair = review.split('\n')[0]
|
39 |
+
score_pair = score_pair.replace(',', ' ')
|
40 |
+
sp = score_pair.split(' ')
|
41 |
+
if len(sp) == 2:
|
42 |
+
return [float(sp[0]), float(sp[1])]
|
43 |
+
else:
|
44 |
+
print('error', review)
|
45 |
+
return [-1, -1]
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
print('error', review)
|
49 |
+
return [-1, -1]
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == '__main__':
|
53 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
54 |
+
parser.add_argument('-q', '--question')
|
55 |
+
parser.add_argument('-c', '--context')
|
56 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
57 |
+
parser.add_argument('-r', '--rule')
|
58 |
+
parser.add_argument('-o', '--output')
|
59 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
60 |
+
args = parser.parse_args()
|
61 |
+
|
62 |
+
f_q = open(os.path.expanduser(args.question))
|
63 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
64 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
65 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
66 |
+
|
67 |
+
if os.path.isfile(os.path.expanduser(args.output)):
|
68 |
+
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
|
69 |
+
else:
|
70 |
+
cur_reviews = []
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'a')
|
73 |
+
|
74 |
+
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
|
75 |
+
image_to_context = {context['image']: context for context in context_list}
|
76 |
+
|
77 |
+
handles = []
|
78 |
+
idx = 0
|
79 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
80 |
+
ques = json.loads(ques_js)
|
81 |
+
ans1 = json.loads(ans1_js)
|
82 |
+
ans2 = json.loads(ans2_js)
|
83 |
+
|
84 |
+
inst = image_to_context[ques['image']]
|
85 |
+
|
86 |
+
if isinstance(inst['caption'], list):
|
87 |
+
cap_str = '\n'.join(inst['caption'])
|
88 |
+
else:
|
89 |
+
cap_str = inst['caption']
|
90 |
+
|
91 |
+
category = 'llava_bench_' + json.loads(ques_js)['category']
|
92 |
+
if category in rule_dict:
|
93 |
+
rule = rule_dict[category]
|
94 |
+
else:
|
95 |
+
assert False, f"Visual QA category not found in rule file: {category}."
|
96 |
+
prompt = rule['prompt']
|
97 |
+
role = rule['role']
|
98 |
+
content = (f'[Context]\n{cap_str}\n\n'
|
99 |
+
f'[Question]\n{ques["text"]}\n\n'
|
100 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
101 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
102 |
+
f'[System]\n{prompt}\n\n')
|
103 |
+
cur_js = {
|
104 |
+
'id': idx+1,
|
105 |
+
'question_id': ques['question_id'],
|
106 |
+
'answer1_id': ans1.get('answer_id', ans1['question_id']),
|
107 |
+
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
|
108 |
+
'category': category
|
109 |
+
}
|
110 |
+
if idx >= len(cur_reviews):
|
111 |
+
review = get_eval(content, args.max_tokens)
|
112 |
+
scores = parse_score(review)
|
113 |
+
cur_js['content'] = review
|
114 |
+
cur_js['tuple'] = scores
|
115 |
+
review_file.write(json.dumps(cur_js) + '\n')
|
116 |
+
review_file.flush()
|
117 |
+
else:
|
118 |
+
print(f'Skipping {idx} as we already have it.')
|
119 |
+
idx += 1
|
120 |
+
print(idx)
|
121 |
+
review_file.close()
|
llava/eval/eval_gpt_review_visual.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import time
|
7 |
+
|
8 |
+
NUM_SECONDS_TO_SLEEP = 0.5
|
9 |
+
|
10 |
+
|
11 |
+
def get_eval(content: str, max_tokens: int):
|
12 |
+
while True:
|
13 |
+
try:
|
14 |
+
response = openai.ChatCompletion.create(
|
15 |
+
model='gpt-4-0314',
|
16 |
+
messages=[{
|
17 |
+
'role': 'system',
|
18 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
19 |
+
}, {
|
20 |
+
'role': 'user',
|
21 |
+
'content': content,
|
22 |
+
}],
|
23 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
)
|
26 |
+
break
|
27 |
+
except openai.error.RateLimitError:
|
28 |
+
pass
|
29 |
+
except Exception as e:
|
30 |
+
print(e)
|
31 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
32 |
+
|
33 |
+
return response['choices'][0]['message']['content']
|
34 |
+
|
35 |
+
|
36 |
+
def parse_score(review):
|
37 |
+
try:
|
38 |
+
score_pair = review.split('\n')[0]
|
39 |
+
score_pair = score_pair.replace(',', ' ')
|
40 |
+
sp = score_pair.split(' ')
|
41 |
+
if len(sp) == 2:
|
42 |
+
return [float(sp[0]), float(sp[1])]
|
43 |
+
else:
|
44 |
+
print('error', review)
|
45 |
+
return [-1, -1]
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
print('error', review)
|
49 |
+
return [-1, -1]
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == '__main__':
|
53 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
54 |
+
parser.add_argument('-q', '--question')
|
55 |
+
parser.add_argument('-c', '--context')
|
56 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
57 |
+
parser.add_argument('-r', '--rule')
|
58 |
+
parser.add_argument('-o', '--output')
|
59 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
60 |
+
args = parser.parse_args()
|
61 |
+
|
62 |
+
f_q = open(os.path.expanduser(args.question))
|
63 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
64 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
65 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
66 |
+
|
67 |
+
if os.path.isfile(os.path.expanduser(args.output)):
|
68 |
+
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
|
69 |
+
else:
|
70 |
+
cur_reviews = []
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'a')
|
73 |
+
|
74 |
+
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
|
75 |
+
image_to_context = {context['image']: context for context in context_list}
|
76 |
+
|
77 |
+
handles = []
|
78 |
+
idx = 0
|
79 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
80 |
+
ques = json.loads(ques_js)
|
81 |
+
ans1 = json.loads(ans1_js)
|
82 |
+
ans2 = json.loads(ans2_js)
|
83 |
+
|
84 |
+
inst = image_to_context[ques['image']]
|
85 |
+
cap_str = '\n'.join(inst['captions'])
|
86 |
+
box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
|
87 |
+
|
88 |
+
category = json.loads(ques_js)['category']
|
89 |
+
if category in rule_dict:
|
90 |
+
rule = rule_dict[category]
|
91 |
+
else:
|
92 |
+
assert False, f"Visual QA category not found in rule file: {category}."
|
93 |
+
prompt = rule['prompt']
|
94 |
+
role = rule['role']
|
95 |
+
content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
|
96 |
+
f'[Question]\n{ques["text"]}\n\n'
|
97 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
98 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
99 |
+
f'[System]\n{prompt}\n\n')
|
100 |
+
cur_js = {
|
101 |
+
'id': idx+1,
|
102 |
+
'question_id': ques['question_id'],
|
103 |
+
'answer1_id': ans1.get('answer_id', ans1['question_id']),
|
104 |
+
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
|
105 |
+
'category': category
|
106 |
+
}
|
107 |
+
if idx >= len(cur_reviews):
|
108 |
+
review = get_eval(content, args.max_tokens)
|
109 |
+
scores = parse_score(review)
|
110 |
+
cur_js['content'] = review
|
111 |
+
cur_js['tuple'] = scores
|
112 |
+
review_file.write(json.dumps(cur_js) + '\n')
|
113 |
+
review_file.flush()
|
114 |
+
else:
|
115 |
+
print(f'Skipping {idx} as we already have it.')
|
116 |
+
idx += 1
|
117 |
+
print(idx)
|
118 |
+
review_file.close()
|
llava/eval/eval_pope.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import argparse
|
4 |
+
|
5 |
+
def eval_pope(answers, label_file):
|
6 |
+
label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
|
7 |
+
|
8 |
+
for answer in answers:
|
9 |
+
text = answer['text']
|
10 |
+
|
11 |
+
# Only keep the first sentence
|
12 |
+
if text.find('.') != -1:
|
13 |
+
text = text.split('.')[0]
|
14 |
+
|
15 |
+
text = text.replace(',', '')
|
16 |
+
words = text.split(' ')
|
17 |
+
if 'No' in words or 'not' in words or 'no' in words:
|
18 |
+
answer['text'] = 'no'
|
19 |
+
else:
|
20 |
+
answer['text'] = 'yes'
|
21 |
+
|
22 |
+
for i in range(len(label_list)):
|
23 |
+
if label_list[i] == 'no':
|
24 |
+
label_list[i] = 0
|
25 |
+
else:
|
26 |
+
label_list[i] = 1
|
27 |
+
|
28 |
+
pred_list = []
|
29 |
+
for answer in answers:
|
30 |
+
if answer['text'] == 'no':
|
31 |
+
pred_list.append(0)
|
32 |
+
else:
|
33 |
+
pred_list.append(1)
|
34 |
+
|
35 |
+
pos = 1
|
36 |
+
neg = 0
|
37 |
+
yes_ratio = pred_list.count(1) / len(pred_list)
|
38 |
+
|
39 |
+
TP, TN, FP, FN = 0, 0, 0, 0
|
40 |
+
for pred, label in zip(pred_list, label_list):
|
41 |
+
if pred == pos and label == pos:
|
42 |
+
TP += 1
|
43 |
+
elif pred == pos and label == neg:
|
44 |
+
FP += 1
|
45 |
+
elif pred == neg and label == neg:
|
46 |
+
TN += 1
|
47 |
+
elif pred == neg and label == pos:
|
48 |
+
FN += 1
|
49 |
+
|
50 |
+
print('TP\tFP\tTN\tFN\t')
|
51 |
+
print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
|
52 |
+
|
53 |
+
precision = float(TP) / float(TP + FP)
|
54 |
+
recall = float(TP) / float(TP + FN)
|
55 |
+
f1 = 2*precision*recall / (precision + recall)
|
56 |
+
acc = (TP + TN) / (TP + TN + FP + FN)
|
57 |
+
print('Accuracy: {}'.format(acc))
|
58 |
+
print('Precision: {}'.format(precision))
|
59 |
+
print('Recall: {}'.format(recall))
|
60 |
+
print('F1 score: {}'.format(f1))
|
61 |
+
print('Yes ratio: {}'.format(yes_ratio))
|
62 |
+
print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
parser = argparse.ArgumentParser()
|
66 |
+
parser.add_argument("--annotation-dir", type=str)
|
67 |
+
parser.add_argument("--question-file", type=str)
|
68 |
+
parser.add_argument("--result-file", type=str)
|
69 |
+
args = parser.parse_args()
|
70 |
+
|
71 |
+
questions = [json.loads(line) for line in open(args.question_file)]
|
72 |
+
questions = {question['question_id']: question for question in questions}
|
73 |
+
answers = [json.loads(q) for q in open(args.result_file)]
|
74 |
+
for file in os.listdir(args.annotation_dir):
|
75 |
+
assert file.startswith('coco_pope_')
|
76 |
+
assert file.endswith('.json')
|
77 |
+
category = file[10:-5]
|
78 |
+
cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
|
79 |
+
print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
|
80 |
+
eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
|
81 |
+
print("====================================")
|
llava/eval/eval_science_qa.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
|
7 |
+
|
8 |
+
def get_args():
|
9 |
+
parser = argparse.ArgumentParser()
|
10 |
+
parser.add_argument('--base-dir', type=str)
|
11 |
+
parser.add_argument('--result-file', type=str)
|
12 |
+
parser.add_argument('--output-file', type=str)
|
13 |
+
parser.add_argument('--output-result', type=str)
|
14 |
+
parser.add_argument('--split', type=str, default='test')
|
15 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
def convert_caps(results):
|
20 |
+
fakecaps = []
|
21 |
+
for result in results:
|
22 |
+
image_id = result['question_id']
|
23 |
+
caption = result['text']
|
24 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
25 |
+
return fakecaps
|
26 |
+
|
27 |
+
|
28 |
+
def get_pred_idx(prediction, choices, options):
|
29 |
+
"""
|
30 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
31 |
+
"""
|
32 |
+
if prediction in options[:len(choices)]:
|
33 |
+
return options.index(prediction)
|
34 |
+
else:
|
35 |
+
return -1
|
36 |
+
return random.choice(range(len(choices)))
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
base_dir = args.base_dir
|
43 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
44 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
45 |
+
predictions = [json.loads(line) for line in open(args.result_file)]
|
46 |
+
predictions = {pred['question_id']: pred for pred in predictions}
|
47 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
48 |
+
|
49 |
+
results = {'correct': [], 'incorrect': []}
|
50 |
+
sqa_results = {}
|
51 |
+
sqa_results['acc'] = None
|
52 |
+
sqa_results['correct'] = None
|
53 |
+
sqa_results['count'] = None
|
54 |
+
sqa_results['results'] = {}
|
55 |
+
sqa_results['outputs'] = {}
|
56 |
+
|
57 |
+
for prob_id, prob in split_problems.items():
|
58 |
+
if prob_id not in predictions:
|
59 |
+
pred = {'text': 'FAILED', 'prompt': 'Unknown'}
|
60 |
+
pred_text = 'FAILED'
|
61 |
+
else:
|
62 |
+
pred = predictions[prob_id]
|
63 |
+
pred_text = pred['text']
|
64 |
+
|
65 |
+
if pred_text in args.options:
|
66 |
+
answer = pred_text
|
67 |
+
elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ":
|
68 |
+
answer = pred_text[0]
|
69 |
+
else:
|
70 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
71 |
+
res = pattern.findall(pred_text)
|
72 |
+
if len(res) == 1:
|
73 |
+
answer = res[0] # 'A', 'B', ...
|
74 |
+
else:
|
75 |
+
answer = "FAILED"
|
76 |
+
|
77 |
+
pred_idx = get_pred_idx(answer, prob['choices'], args.options)
|
78 |
+
|
79 |
+
analysis = {
|
80 |
+
'question_id': prob_id,
|
81 |
+
'parsed_ans': answer,
|
82 |
+
'ground_truth': args.options[prob['answer']],
|
83 |
+
'question': pred['prompt'],
|
84 |
+
'pred': pred_text,
|
85 |
+
'is_multimodal': '<image>' in pred['prompt'],
|
86 |
+
}
|
87 |
+
|
88 |
+
sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
|
89 |
+
sqa_results['outputs'][prob_id] = pred_text
|
90 |
+
|
91 |
+
if pred_idx == prob['answer']:
|
92 |
+
results['correct'].append(analysis)
|
93 |
+
else:
|
94 |
+
results['incorrect'].append(analysis)
|
95 |
+
|
96 |
+
correct = len(results['correct'])
|
97 |
+
total = len(results['correct']) + len(results['incorrect'])
|
98 |
+
|
99 |
+
###### IMG ######
|
100 |
+
multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']])
|
101 |
+
multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']])
|
102 |
+
multimodal_total = multimodal_correct + multimodal_incorrect
|
103 |
+
###### IMG ######
|
104 |
+
|
105 |
+
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%')
|
106 |
+
|
107 |
+
sqa_results['acc'] = correct / total * 100
|
108 |
+
sqa_results['correct'] = correct
|
109 |
+
sqa_results['count'] = total
|
110 |
+
|
111 |
+
with open(args.output_file, 'w') as f:
|
112 |
+
json.dump(results, f, indent=2)
|
113 |
+
with open(args.output_result, 'w') as f:
|
114 |
+
json.dump(sqa_results, f, indent=2)
|
llava/eval/eval_science_qa_gpt4.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
from collections import defaultdict
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--base-dir', type=str)
|
12 |
+
parser.add_argument('--gpt4-result', type=str)
|
13 |
+
parser.add_argument('--our-result', type=str)
|
14 |
+
parser.add_argument('--split', type=str, default='test')
|
15 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
def convert_caps(results):
|
20 |
+
fakecaps = []
|
21 |
+
for result in results:
|
22 |
+
image_id = result['question_id']
|
23 |
+
caption = result['text']
|
24 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
25 |
+
return fakecaps
|
26 |
+
|
27 |
+
|
28 |
+
def get_pred_idx(prediction, choices, options):
|
29 |
+
"""
|
30 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
31 |
+
"""
|
32 |
+
if prediction in options[:len(choices)]:
|
33 |
+
return options.index(prediction)
|
34 |
+
else:
|
35 |
+
return random.choice(range(len(choices)))
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
args = get_args()
|
40 |
+
|
41 |
+
base_dir = args.base_dir
|
42 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
43 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
44 |
+
our_predictions = [json.loads(line) for line in open(args.our_result)]
|
45 |
+
our_predictions = {pred['question_id']: pred for pred in our_predictions}
|
46 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
47 |
+
|
48 |
+
gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
|
49 |
+
|
50 |
+
results = defaultdict(lambda: 0)
|
51 |
+
|
52 |
+
for prob_id, prob in split_problems.items():
|
53 |
+
if prob_id not in our_predictions:
|
54 |
+
continue
|
55 |
+
if prob_id not in gpt4_predictions:
|
56 |
+
continue
|
57 |
+
our_pred = our_predictions[prob_id]['text']
|
58 |
+
gpt4_pred = gpt4_predictions[prob_id]
|
59 |
+
|
60 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
61 |
+
our_res = pattern.findall(our_pred)
|
62 |
+
if len(our_res) == 1:
|
63 |
+
our_answer = our_res[0] # 'A', 'B', ...
|
64 |
+
else:
|
65 |
+
our_answer = "FAILED"
|
66 |
+
gpt4_res = pattern.findall(gpt4_pred)
|
67 |
+
if len(gpt4_res) == 1:
|
68 |
+
gpt4_answer = gpt4_res[0] # 'A', 'B', ...
|
69 |
+
else:
|
70 |
+
gpt4_answer = "FAILED"
|
71 |
+
|
72 |
+
our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
|
73 |
+
gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
|
74 |
+
|
75 |
+
if gpt4_answer == 'FAILED':
|
76 |
+
results['gpt4_failed'] += 1
|
77 |
+
# continue
|
78 |
+
gpt4_pred_idx = our_pred_idx
|
79 |
+
# if our_pred_idx != prob['answer']:
|
80 |
+
# print(our_predictions[prob_id]['prompt'])
|
81 |
+
# print('-----------------')
|
82 |
+
# print(f'LECTURE: {prob["lecture"]}')
|
83 |
+
# print(f'SOLUTION: {prob["solution"]}')
|
84 |
+
# print('=====================')
|
85 |
+
else:
|
86 |
+
# continue
|
87 |
+
pass
|
88 |
+
# gpt4_pred_idx = our_pred_idx
|
89 |
+
|
90 |
+
if gpt4_pred_idx == prob['answer']:
|
91 |
+
results['correct'] += 1
|
92 |
+
else:
|
93 |
+
results['incorrect'] += 1
|
94 |
+
|
95 |
+
|
96 |
+
if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
|
97 |
+
results['correct_upperbound'] += 1
|
98 |
+
|
99 |
+
correct = results['correct']
|
100 |
+
total = results['correct'] + results['incorrect']
|
101 |
+
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
|
102 |
+
print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
|
103 |
+
print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
|
104 |
+
|
llava/eval/eval_science_qa_gpt4_requery.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
from collections import defaultdict
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--base-dir', type=str)
|
12 |
+
parser.add_argument('--gpt4-result', type=str)
|
13 |
+
parser.add_argument('--requery-result', type=str)
|
14 |
+
parser.add_argument('--our-result', type=str)
|
15 |
+
parser.add_argument('--output-result', type=str)
|
16 |
+
parser.add_argument('--split', type=str, default='test')
|
17 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
18 |
+
return parser.parse_args()
|
19 |
+
|
20 |
+
|
21 |
+
def convert_caps(results):
|
22 |
+
fakecaps = []
|
23 |
+
for result in results:
|
24 |
+
image_id = result['question_id']
|
25 |
+
caption = result['text']
|
26 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
27 |
+
return fakecaps
|
28 |
+
|
29 |
+
|
30 |
+
def get_pred_idx(prediction, choices, options):
|
31 |
+
"""
|
32 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
33 |
+
"""
|
34 |
+
if prediction in options[:len(choices)]:
|
35 |
+
return options.index(prediction)
|
36 |
+
else:
|
37 |
+
return random.choice(range(len(choices)))
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
args = get_args()
|
42 |
+
|
43 |
+
base_dir = args.base_dir
|
44 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
45 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
46 |
+
our_predictions = [json.loads(line) for line in open(args.our_result)]
|
47 |
+
our_predictions = {pred['question_id']: pred for pred in our_predictions}
|
48 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
49 |
+
|
50 |
+
requery_predictions = [json.loads(line) for line in open(args.requery_result)]
|
51 |
+
requery_predictions = {pred['question_id']: pred for pred in requery_predictions}
|
52 |
+
|
53 |
+
gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
|
54 |
+
|
55 |
+
results = defaultdict(lambda: 0)
|
56 |
+
|
57 |
+
sqa_results = {}
|
58 |
+
sqa_results['acc'] = None
|
59 |
+
sqa_results['correct'] = None
|
60 |
+
sqa_results['count'] = None
|
61 |
+
sqa_results['results'] = {}
|
62 |
+
sqa_results['outputs'] = {}
|
63 |
+
|
64 |
+
for prob_id, prob in split_problems.items():
|
65 |
+
if prob_id not in our_predictions:
|
66 |
+
assert False
|
67 |
+
if prob_id not in gpt4_predictions:
|
68 |
+
assert False
|
69 |
+
our_pred = our_predictions[prob_id]['text']
|
70 |
+
gpt4_pred = gpt4_predictions[prob_id]
|
71 |
+
if prob_id not in requery_predictions:
|
72 |
+
results['missing_requery'] += 1
|
73 |
+
requery_pred = "MISSING"
|
74 |
+
else:
|
75 |
+
requery_pred = requery_predictions[prob_id]['text']
|
76 |
+
|
77 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
78 |
+
our_res = pattern.findall(our_pred)
|
79 |
+
if len(our_res) == 1:
|
80 |
+
our_answer = our_res[0] # 'A', 'B', ...
|
81 |
+
else:
|
82 |
+
our_answer = "FAILED"
|
83 |
+
|
84 |
+
requery_res = pattern.findall(requery_pred)
|
85 |
+
if len(requery_res) == 1:
|
86 |
+
requery_answer = requery_res[0] # 'A', 'B', ...
|
87 |
+
else:
|
88 |
+
requery_answer = "FAILED"
|
89 |
+
|
90 |
+
gpt4_res = pattern.findall(gpt4_pred)
|
91 |
+
if len(gpt4_res) == 1:
|
92 |
+
gpt4_answer = gpt4_res[0] # 'A', 'B', ...
|
93 |
+
else:
|
94 |
+
gpt4_answer = "FAILED"
|
95 |
+
|
96 |
+
our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
|
97 |
+
gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
|
98 |
+
requery_pred_idx = get_pred_idx(requery_answer, prob['choices'], args.options)
|
99 |
+
|
100 |
+
results['total'] += 1
|
101 |
+
|
102 |
+
if gpt4_answer == 'FAILED':
|
103 |
+
results['gpt4_failed'] += 1
|
104 |
+
if gpt4_pred_idx == prob['answer']:
|
105 |
+
results['gpt4_correct'] += 1
|
106 |
+
if our_pred_idx == prob['answer']:
|
107 |
+
results['gpt4_ourvisual_correct'] += 1
|
108 |
+
elif gpt4_pred_idx == prob['answer']:
|
109 |
+
results['gpt4_correct'] += 1
|
110 |
+
results['gpt4_ourvisual_correct'] += 1
|
111 |
+
|
112 |
+
if our_pred_idx == prob['answer']:
|
113 |
+
results['our_correct'] += 1
|
114 |
+
|
115 |
+
if requery_answer == 'FAILED':
|
116 |
+
sqa_results['results'][prob_id] = our_pred_idx
|
117 |
+
if our_pred_idx == prob['answer']:
|
118 |
+
results['requery_correct'] += 1
|
119 |
+
else:
|
120 |
+
sqa_results['results'][prob_id] = requery_pred_idx
|
121 |
+
if requery_pred_idx == prob['answer']:
|
122 |
+
results['requery_correct'] += 1
|
123 |
+
else:
|
124 |
+
print(f"""
|
125 |
+
Question ({args.options[prob['answer']]}): {our_predictions[prob_id]['prompt']}
|
126 |
+
Our ({our_answer}): {our_pred}
|
127 |
+
GPT-4 ({gpt4_answer}): {gpt4_pred}
|
128 |
+
Requery ({requery_answer}): {requery_pred}
|
129 |
+
print("=====================================")
|
130 |
+
""")
|
131 |
+
|
132 |
+
if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
|
133 |
+
results['correct_upperbound'] += 1
|
134 |
+
|
135 |
+
total = results['total']
|
136 |
+
print(f'Total: {total}, Our-Correct: {results["our_correct"]}, Accuracy: {results["our_correct"] / total * 100:.2f}%')
|
137 |
+
print(f'Total: {total}, GPT-4-Correct: {results["gpt4_correct"]}, Accuracy: {results["gpt4_correct"] / total * 100:.2f}%')
|
138 |
+
print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
|
139 |
+
print(f'Total: {total}, GPT-4-OursVisual-Correct: {results["gpt4_ourvisual_correct"]}, Accuracy: {results["gpt4_ourvisual_correct"] / total * 100:.2f}%')
|
140 |
+
print(f'Total: {total}, Requery-Correct: {results["requery_correct"]}, Accuracy: {results["requery_correct"] / total * 100:.2f}%')
|
141 |
+
print(f'Total: {total}, Correct upper: {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
|
142 |
+
|
143 |
+
sqa_results['acc'] = results["requery_correct"] / total * 100
|
144 |
+
sqa_results['correct'] = results["requery_correct"]
|
145 |
+
sqa_results['count'] = total
|
146 |
+
|
147 |
+
with open(args.output_result, 'w') as f:
|
148 |
+
json.dump(sqa_results, f, indent=2)
|
149 |
+
|
llava/eval/eval_textvqa.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
|
6 |
+
from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--annotation-file', type=str)
|
12 |
+
parser.add_argument('--result-file', type=str)
|
13 |
+
parser.add_argument('--result-dir', type=str)
|
14 |
+
return parser.parse_args()
|
15 |
+
|
16 |
+
|
17 |
+
def prompt_processor(prompt):
|
18 |
+
if prompt.startswith('OCR tokens: '):
|
19 |
+
pattern = r"Question: (.*?) Short answer:"
|
20 |
+
match = re.search(pattern, prompt, re.DOTALL)
|
21 |
+
question = match.group(1)
|
22 |
+
elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
|
23 |
+
if prompt.startswith('Reference OCR token:'):
|
24 |
+
question = prompt.split('\n')[1]
|
25 |
+
else:
|
26 |
+
question = prompt.split('\n')[0]
|
27 |
+
elif len(prompt.split('\n')) == 2:
|
28 |
+
question = prompt.split('\n')[0]
|
29 |
+
else:
|
30 |
+
assert False
|
31 |
+
|
32 |
+
return question.lower()
|
33 |
+
|
34 |
+
|
35 |
+
def eval_single(annotation_file, result_file):
|
36 |
+
experiment_name = os.path.splitext(os.path.basename(result_file))[0]
|
37 |
+
print(experiment_name)
|
38 |
+
annotations = json.load(open(annotation_file))['data']
|
39 |
+
annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
|
40 |
+
results = [json.loads(line) for line in open(result_file)]
|
41 |
+
|
42 |
+
pred_list = []
|
43 |
+
for result in results:
|
44 |
+
annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
|
45 |
+
pred_list.append({
|
46 |
+
"pred_answer": result['text'],
|
47 |
+
"gt_answers": annotation['answers'],
|
48 |
+
})
|
49 |
+
|
50 |
+
evaluator = TextVQAAccuracyEvaluator()
|
51 |
+
print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
args = get_args()
|
56 |
+
|
57 |
+
if args.result_file is not None:
|
58 |
+
eval_single(args.annotation_file, args.result_file)
|
59 |
+
|
60 |
+
if args.result_dir is not None:
|
61 |
+
for result_file in sorted(os.listdir(args.result_dir)):
|
62 |
+
if not result_file.endswith('.jsonl'):
|
63 |
+
print(f'Skipping {result_file}')
|
64 |
+
continue
|
65 |
+
eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
|
llava/eval/generate_webpage_data_from_table.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Generate json file for webpage."""
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
|
6 |
+
# models = ['llama', 'alpaca', 'gpt35', 'bard']
|
7 |
+
models = ['vicuna']
|
8 |
+
|
9 |
+
|
10 |
+
def read_jsonl(path: str, key: str=None):
|
11 |
+
data = []
|
12 |
+
with open(os.path.expanduser(path)) as f:
|
13 |
+
for line in f:
|
14 |
+
if not line:
|
15 |
+
continue
|
16 |
+
data.append(json.loads(line))
|
17 |
+
if key is not None:
|
18 |
+
data.sort(key=lambda x: x[key])
|
19 |
+
data = {item[key]: item for item in data}
|
20 |
+
return data
|
21 |
+
|
22 |
+
|
23 |
+
def trim_hanging_lines(s: str, n: int) -> str:
|
24 |
+
s = s.strip()
|
25 |
+
for _ in range(n):
|
26 |
+
s = s.split('\n', 1)[1].strip()
|
27 |
+
return s
|
28 |
+
|
29 |
+
|
30 |
+
if __name__ == '__main__':
|
31 |
+
questions = read_jsonl('table/question.jsonl', key='question_id')
|
32 |
+
|
33 |
+
# alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id')
|
34 |
+
# bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id')
|
35 |
+
# gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id')
|
36 |
+
# llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id')
|
37 |
+
vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id')
|
38 |
+
ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id')
|
39 |
+
|
40 |
+
review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id')
|
41 |
+
# review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id')
|
42 |
+
# review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id')
|
43 |
+
# review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id')
|
44 |
+
# review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id')
|
45 |
+
|
46 |
+
records = []
|
47 |
+
for qid in questions.keys():
|
48 |
+
r = {
|
49 |
+
'id': qid,
|
50 |
+
'category': questions[qid]['category'],
|
51 |
+
'question': questions[qid]['text'],
|
52 |
+
'answers': {
|
53 |
+
# 'alpaca': alpaca_answers[qid]['text'],
|
54 |
+
# 'llama': llama_answers[qid]['text'],
|
55 |
+
# 'bard': bard_answers[qid]['text'],
|
56 |
+
# 'gpt35': gpt35_answers[qid]['text'],
|
57 |
+
'vicuna': vicuna_answers[qid]['text'],
|
58 |
+
'ours': ours_answers[qid]['text'],
|
59 |
+
},
|
60 |
+
'evaluations': {
|
61 |
+
# 'alpaca': review_alpaca[qid]['text'],
|
62 |
+
# 'llama': review_llama[qid]['text'],
|
63 |
+
# 'bard': review_bard[qid]['text'],
|
64 |
+
'vicuna': review_vicuna[qid]['content'],
|
65 |
+
# 'gpt35': review_gpt35[qid]['text'],
|
66 |
+
},
|
67 |
+
'scores': {
|
68 |
+
'vicuna': review_vicuna[qid]['tuple'],
|
69 |
+
# 'alpaca': review_alpaca[qid]['score'],
|
70 |
+
# 'llama': review_llama[qid]['score'],
|
71 |
+
# 'bard': review_bard[qid]['score'],
|
72 |
+
# 'gpt35': review_gpt35[qid]['score'],
|
73 |
+
},
|
74 |
+
}
|
75 |
+
|
76 |
+
# cleanup data
|
77 |
+
cleaned_evals = {}
|
78 |
+
for k, v in r['evaluations'].items():
|
79 |
+
v = v.strip()
|
80 |
+
lines = v.split('\n')
|
81 |
+
# trim the first line if it's a pair of numbers
|
82 |
+
if re.match(r'\d+[, ]+\d+', lines[0]):
|
83 |
+
lines = lines[1:]
|
84 |
+
v = '\n'.join(lines)
|
85 |
+
cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**')
|
86 |
+
|
87 |
+
r['evaluations'] = cleaned_evals
|
88 |
+
records.append(r)
|
89 |
+
|
90 |
+
# Reorder the records, this is optional
|
91 |
+
for r in records:
|
92 |
+
if r['id'] <= 20:
|
93 |
+
r['id'] += 60
|
94 |
+
else:
|
95 |
+
r['id'] -= 20
|
96 |
+
for r in records:
|
97 |
+
if r['id'] <= 50:
|
98 |
+
r['id'] += 10
|
99 |
+
elif 50 < r['id'] <= 60:
|
100 |
+
r['id'] -= 50
|
101 |
+
for r in records:
|
102 |
+
if r['id'] == 7:
|
103 |
+
r['id'] = 1
|
104 |
+
elif r['id'] < 7:
|
105 |
+
r['id'] += 1
|
106 |
+
|
107 |
+
records.sort(key=lambda x: x['id'])
|
108 |
+
|
109 |
+
# Write to file
|
110 |
+
with open('webpage/data.json', 'w') as f:
|
111 |
+
json.dump({'questions': records, 'models': models}, f, indent=2)
|
llava/eval/m4c_evaluator.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
import re
|
3 |
+
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
|
7 |
+
class EvalAIAnswerProcessor:
|
8 |
+
"""
|
9 |
+
Processes an answer similar to Eval AI
|
10 |
+
copied from
|
11 |
+
https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
|
12 |
+
"""
|
13 |
+
|
14 |
+
CONTRACTIONS = {
|
15 |
+
"aint": "ain't",
|
16 |
+
"arent": "aren't",
|
17 |
+
"cant": "can't",
|
18 |
+
"couldve": "could've",
|
19 |
+
"couldnt": "couldn't",
|
20 |
+
"couldn'tve": "couldn't've",
|
21 |
+
"couldnt've": "couldn't've",
|
22 |
+
"didnt": "didn't",
|
23 |
+
"doesnt": "doesn't",
|
24 |
+
"dont": "don't",
|
25 |
+
"hadnt": "hadn't",
|
26 |
+
"hadnt've": "hadn't've",
|
27 |
+
"hadn'tve": "hadn't've",
|
28 |
+
"hasnt": "hasn't",
|
29 |
+
"havent": "haven't",
|
30 |
+
"hed": "he'd",
|
31 |
+
"hed've": "he'd've",
|
32 |
+
"he'dve": "he'd've",
|
33 |
+
"hes": "he's",
|
34 |
+
"howd": "how'd",
|
35 |
+
"howll": "how'll",
|
36 |
+
"hows": "how's",
|
37 |
+
"Id've": "I'd've",
|
38 |
+
"I'dve": "I'd've",
|
39 |
+
"Im": "I'm",
|
40 |
+
"Ive": "I've",
|
41 |
+
"isnt": "isn't",
|
42 |
+
"itd": "it'd",
|
43 |
+
"itd've": "it'd've",
|
44 |
+
"it'dve": "it'd've",
|
45 |
+
"itll": "it'll",
|
46 |
+
"let's": "let's",
|
47 |
+
"maam": "ma'am",
|
48 |
+
"mightnt": "mightn't",
|
49 |
+
"mightnt've": "mightn't've",
|
50 |
+
"mightn'tve": "mightn't've",
|
51 |
+
"mightve": "might've",
|
52 |
+
"mustnt": "mustn't",
|
53 |
+
"mustve": "must've",
|
54 |
+
"neednt": "needn't",
|
55 |
+
"notve": "not've",
|
56 |
+
"oclock": "o'clock",
|
57 |
+
"oughtnt": "oughtn't",
|
58 |
+
"ow's'at": "'ow's'at",
|
59 |
+
"'ows'at": "'ow's'at",
|
60 |
+
"'ow'sat": "'ow's'at",
|
61 |
+
"shant": "shan't",
|
62 |
+
"shed've": "she'd've",
|
63 |
+
"she'dve": "she'd've",
|
64 |
+
"she's": "she's",
|
65 |
+
"shouldve": "should've",
|
66 |
+
"shouldnt": "shouldn't",
|
67 |
+
"shouldnt've": "shouldn't've",
|
68 |
+
"shouldn'tve": "shouldn't've",
|
69 |
+
"somebody'd": "somebodyd",
|
70 |
+
"somebodyd've": "somebody'd've",
|
71 |
+
"somebody'dve": "somebody'd've",
|
72 |
+
"somebodyll": "somebody'll",
|
73 |
+
"somebodys": "somebody's",
|
74 |
+
"someoned": "someone'd",
|
75 |
+
"someoned've": "someone'd've",
|
76 |
+
"someone'dve": "someone'd've",
|
77 |
+
"someonell": "someone'll",
|
78 |
+
"someones": "someone's",
|
79 |
+
"somethingd": "something'd",
|
80 |
+
"somethingd've": "something'd've",
|
81 |
+
"something'dve": "something'd've",
|
82 |
+
"somethingll": "something'll",
|
83 |
+
"thats": "that's",
|
84 |
+
"thered": "there'd",
|
85 |
+
"thered've": "there'd've",
|
86 |
+
"there'dve": "there'd've",
|
87 |
+
"therere": "there're",
|
88 |
+
"theres": "there's",
|
89 |
+
"theyd": "they'd",
|
90 |
+
"theyd've": "they'd've",
|
91 |
+
"they'dve": "they'd've",
|
92 |
+
"theyll": "they'll",
|
93 |
+
"theyre": "they're",
|
94 |
+
"theyve": "they've",
|
95 |
+
"twas": "'twas",
|
96 |
+
"wasnt": "wasn't",
|
97 |
+
"wed've": "we'd've",
|
98 |
+
"we'dve": "we'd've",
|
99 |
+
"weve": "we've",
|
100 |
+
"werent": "weren't",
|
101 |
+
"whatll": "what'll",
|
102 |
+
"whatre": "what're",
|
103 |
+
"whats": "what's",
|
104 |
+
"whatve": "what've",
|
105 |
+
"whens": "when's",
|
106 |
+
"whered": "where'd",
|
107 |
+
"wheres": "where's",
|
108 |
+
"whereve": "where've",
|
109 |
+
"whod": "who'd",
|
110 |
+
"whod've": "who'd've",
|
111 |
+
"who'dve": "who'd've",
|
112 |
+
"wholl": "who'll",
|
113 |
+
"whos": "who's",
|
114 |
+
"whove": "who've",
|
115 |
+
"whyll": "why'll",
|
116 |
+
"whyre": "why're",
|
117 |
+
"whys": "why's",
|
118 |
+
"wont": "won't",
|
119 |
+
"wouldve": "would've",
|
120 |
+
"wouldnt": "wouldn't",
|
121 |
+
"wouldnt've": "wouldn't've",
|
122 |
+
"wouldn'tve": "wouldn't've",
|
123 |
+
"yall": "y'all",
|
124 |
+
"yall'll": "y'all'll",
|
125 |
+
"y'allll": "y'all'll",
|
126 |
+
"yall'd've": "y'all'd've",
|
127 |
+
"y'alld've": "y'all'd've",
|
128 |
+
"y'all'dve": "y'all'd've",
|
129 |
+
"youd": "you'd",
|
130 |
+
"youd've": "you'd've",
|
131 |
+
"you'dve": "you'd've",
|
132 |
+
"youll": "you'll",
|
133 |
+
"youre": "you're",
|
134 |
+
"youve": "you've",
|
135 |
+
}
|
136 |
+
|
137 |
+
NUMBER_MAP = {
|
138 |
+
"none": "0",
|
139 |
+
"zero": "0",
|
140 |
+
"one": "1",
|
141 |
+
"two": "2",
|
142 |
+
"three": "3",
|
143 |
+
"four": "4",
|
144 |
+
"five": "5",
|
145 |
+
"six": "6",
|
146 |
+
"seven": "7",
|
147 |
+
"eight": "8",
|
148 |
+
"nine": "9",
|
149 |
+
"ten": "10",
|
150 |
+
}
|
151 |
+
ARTICLES = ["a", "an", "the"]
|
152 |
+
PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
|
153 |
+
COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
|
154 |
+
PUNCTUATIONS = [
|
155 |
+
";",
|
156 |
+
r"/",
|
157 |
+
"[",
|
158 |
+
"]",
|
159 |
+
'"',
|
160 |
+
"{",
|
161 |
+
"}",
|
162 |
+
"(",
|
163 |
+
")",
|
164 |
+
"=",
|
165 |
+
"+",
|
166 |
+
"\\",
|
167 |
+
"_",
|
168 |
+
"-",
|
169 |
+
">",
|
170 |
+
"<",
|
171 |
+
"@",
|
172 |
+
"`",
|
173 |
+
",",
|
174 |
+
"?",
|
175 |
+
"!",
|
176 |
+
]
|
177 |
+
|
178 |
+
def __init__(self, *args, **kwargs):
|
179 |
+
pass
|
180 |
+
|
181 |
+
def word_tokenize(self, word):
|
182 |
+
word = word.lower()
|
183 |
+
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
|
184 |
+
return word.strip()
|
185 |
+
|
186 |
+
def process_punctuation(self, in_text):
|
187 |
+
out_text = in_text
|
188 |
+
for p in self.PUNCTUATIONS:
|
189 |
+
if (p + " " in in_text or " " + p in in_text) or (
|
190 |
+
re.search(self.COMMA_STRIP, in_text) is not None
|
191 |
+
):
|
192 |
+
out_text = out_text.replace(p, "")
|
193 |
+
else:
|
194 |
+
out_text = out_text.replace(p, " ")
|
195 |
+
out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
|
196 |
+
return out_text
|
197 |
+
|
198 |
+
def process_digit_article(self, in_text):
|
199 |
+
out_text = []
|
200 |
+
temp_text = in_text.lower().split()
|
201 |
+
for word in temp_text:
|
202 |
+
word = self.NUMBER_MAP.setdefault(word, word)
|
203 |
+
if word not in self.ARTICLES:
|
204 |
+
out_text.append(word)
|
205 |
+
else:
|
206 |
+
pass
|
207 |
+
for word_id, word in enumerate(out_text):
|
208 |
+
if word in self.CONTRACTIONS:
|
209 |
+
out_text[word_id] = self.CONTRACTIONS[word]
|
210 |
+
out_text = " ".join(out_text)
|
211 |
+
return out_text
|
212 |
+
|
213 |
+
def __call__(self, item):
|
214 |
+
item = self.word_tokenize(item)
|
215 |
+
item = item.replace("\n", " ").replace("\t", " ").strip()
|
216 |
+
item = self.process_punctuation(item)
|
217 |
+
item = self.process_digit_article(item)
|
218 |
+
return item
|
219 |
+
|
220 |
+
|
221 |
+
class TextVQAAccuracyEvaluator:
|
222 |
+
def __init__(self):
|
223 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
224 |
+
|
225 |
+
def _compute_answer_scores(self, raw_answers):
|
226 |
+
"""
|
227 |
+
compute the accuracy (soft score) of human answers
|
228 |
+
"""
|
229 |
+
answers = [self.answer_processor(a) for a in raw_answers]
|
230 |
+
assert len(answers) == 10
|
231 |
+
gt_answers = list(enumerate(answers))
|
232 |
+
unique_answers = set(answers)
|
233 |
+
unique_answer_scores = {}
|
234 |
+
|
235 |
+
for unique_answer in unique_answers:
|
236 |
+
accs = []
|
237 |
+
for gt_answer in gt_answers:
|
238 |
+
other_answers = [item for item in gt_answers if item != gt_answer]
|
239 |
+
matching_answers = [
|
240 |
+
item for item in other_answers if item[1] == unique_answer
|
241 |
+
]
|
242 |
+
acc = min(1, float(len(matching_answers)) / 3)
|
243 |
+
accs.append(acc)
|
244 |
+
unique_answer_scores[unique_answer] = sum(accs) / len(accs)
|
245 |
+
|
246 |
+
return unique_answer_scores
|
247 |
+
|
248 |
+
def eval_pred_list(self, pred_list):
|
249 |
+
pred_scores = []
|
250 |
+
for entry in tqdm(pred_list):
|
251 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
252 |
+
unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
|
253 |
+
score = unique_answer_scores.get(pred_answer, 0.0)
|
254 |
+
pred_scores.append(score)
|
255 |
+
|
256 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
257 |
+
return accuracy
|
258 |
+
|
259 |
+
|
260 |
+
class STVQAAccuracyEvaluator:
|
261 |
+
def __init__(self):
|
262 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
263 |
+
|
264 |
+
def eval_pred_list(self, pred_list):
|
265 |
+
pred_scores = []
|
266 |
+
for entry in pred_list:
|
267 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
268 |
+
gts = [self.answer_processor(a) for a in entry["gt_answers"]]
|
269 |
+
score = 1.0 if pred_answer in gts else 0.0
|
270 |
+
pred_scores.append(score)
|
271 |
+
|
272 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
273 |
+
return accuracy
|
274 |
+
|
275 |
+
|
276 |
+
class STVQAANLSEvaluator:
|
277 |
+
def __init__(self):
|
278 |
+
import editdistance # install with `pip install editdistance`
|
279 |
+
|
280 |
+
self.get_edit_distance = editdistance.eval
|
281 |
+
|
282 |
+
def get_anls(self, s1, s2):
|
283 |
+
s1 = s1.lower().strip()
|
284 |
+
s2 = s2.lower().strip()
|
285 |
+
iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
|
286 |
+
anls = iou if iou >= 0.5 else 0.0
|
287 |
+
return anls
|
288 |
+
|
289 |
+
def eval_pred_list(self, pred_list):
|
290 |
+
pred_scores = []
|
291 |
+
for entry in pred_list:
|
292 |
+
anls = max(
|
293 |
+
self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
|
294 |
+
)
|
295 |
+
pred_scores.append(anls)
|
296 |
+
|
297 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
298 |
+
return accuracy
|
299 |
+
|
300 |
+
|
301 |
+
class TextCapsBleu4Evaluator:
|
302 |
+
def __init__(self):
|
303 |
+
# The following script requires Java 1.8.0 and pycocotools installed.
|
304 |
+
# The pycocoevalcap can be installed with pip as
|
305 |
+
# pip install git+https://github.com/ronghanghu/coco-caption.git@python23
|
306 |
+
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
|
307 |
+
# but has no python3 support yet.
|
308 |
+
try:
|
309 |
+
from pycocoevalcap.bleu.bleu import Bleu
|
310 |
+
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
|
311 |
+
except ModuleNotFoundError:
|
312 |
+
print(
|
313 |
+
"Please install pycocoevalcap module using "
|
314 |
+
"pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
|
315 |
+
)
|
316 |
+
raise
|
317 |
+
|
318 |
+
self.tokenizer = PTBTokenizer()
|
319 |
+
self.scorer = Bleu(4)
|
320 |
+
|
321 |
+
def eval_pred_list(self, pred_list):
|
322 |
+
# Create reference and hypotheses captions.
|
323 |
+
gts = {}
|
324 |
+
res = {}
|
325 |
+
for idx, entry in enumerate(pred_list):
|
326 |
+
gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
|
327 |
+
res[idx] = [{"caption": entry["pred_answer"]}]
|
328 |
+
|
329 |
+
gts = self.tokenizer.tokenize(gts)
|
330 |
+
res = self.tokenizer.tokenize(res)
|
331 |
+
score, _ = self.scorer.compute_score(gts, res)
|
332 |
+
|
333 |
+
bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
|
334 |
+
return bleu4
|