Migrated from GitHub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- data/LICENSE.md +43 -0
- data/docs/resource_listing.md +101 -0
- data/images/example_workflow/1.PNG +3 -0
- data/images/example_workflow/10.png +3 -0
- data/images/example_workflow/11.png +3 -0
- data/images/example_workflow/12.png +3 -0
- data/images/example_workflow/13.png +3 -0
- data/images/example_workflow/14.PNG +3 -0
- data/images/example_workflow/2.PNG +3 -0
- data/images/example_workflow/3.PNG +3 -0
- data/images/example_workflow/4.PNG +3 -0
- data/images/example_workflow/5.PNG +3 -0
- data/images/example_workflow/6.PNG +3 -0
- data/images/example_workflow/7.PNG +3 -0
- data/images/example_workflow/8.PNG +3 -0
- data/images/example_workflow/9.png +3 -0
- data/images/github_banner.png +3 -0
- data/images/protify_logo.png +3 -0
- data/images/synthyra_logo.png +3 -0
- data/probe_package_colab.ipynb +497 -0
- data/pyproject.toml +41 -0
- data/requirements.txt +20 -0
- data/setup_bioenv.sh +45 -0
- data/src/protify/base_models/__init__.py +14 -0
- data/src/protify/base_models/amplify.py +3 -0
- data/src/protify/base_models/ankh.py +78 -0
- data/src/protify/base_models/base_tokenizer.py +36 -0
- data/src/protify/base_models/dplm.py +388 -0
- data/src/protify/base_models/esm2.py +84 -0
- data/src/protify/base_models/esm3.py +3 -0
- data/src/protify/base_models/esmc.py +84 -0
- data/src/protify/base_models/get_base_models.py +215 -0
- data/src/protify/base_models/glm.py +90 -0
- data/src/protify/base_models/protbert.py +79 -0
- data/src/protify/base_models/proteinvec.py +3 -0
- data/src/protify/base_models/prott5.py +83 -0
- data/src/protify/base_models/random.py +62 -0
- data/src/protify/base_models/t5.py +150 -0
- data/src/protify/base_models/utils.py +19 -0
- data/src/protify/data/__init__.py +15 -0
- data/src/protify/data/data_collators.py +295 -0
- data/src/protify/data/data_mixin.py +472 -0
- data/src/protify/data/dataset_classes.py +319 -0
- data/src/protify/data/dataset_utils.py +101 -0
- data/src/protify/data/supported_datasets.py +99 -0
- data/src/protify/data/utils.py +31 -0
- data/src/protify/embedder.py +426 -0
- data/src/protify/github_banner.png +3 -0
- data/src/protify/gui.py +1046 -0
- data/src/protify/logger.py +287 -0
data/LICENSE.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Protify License (BSD 3‑Clause with Additional Restrictions)
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
4 |
+
|
5 |
+
1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimers in full.
|
6 |
+
|
7 |
+
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimers in the documentation and/or other materials provided with the distribution.
|
8 |
+
|
9 |
+
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
10 |
+
|
11 |
+
4. **Commercial Modification & Sale Restriction** – The Software, in whole or in part, may **not** be modified *or* sold for commercial purposes. Commercial entities and individuals may, however, utilize the unmodified Software for internal or external commercial activities, subject to compliance with all other terms of this licence.
|
12 |
+
|
13 |
+
5. **Attribution for Generated Molecules** – Any molecule or derivative generated—whether for commercial or non‑commercial use—using the outputs of the Protify system must include clear and conspicuous credit to **“Protify”** (e.g., “Molecule generated with the Protify system”). Such credit must appear in any publication, disclosure, promotional material, or commercial documentation in which the molecule is referenced, sold, transferred, or otherwise made available.
|
14 |
+
|
15 |
+
6. **No Removal of Notices** – You must not remove, obscure, or alter any proprietary notices (including attribution or copyright notices) that appear in or on the Software or that accompany the outputs of the Protify system.
|
16 |
+
|
17 |
+
---
|
18 |
+
|
19 |
+
### DISCLAIMER & USER AGREEMENT
|
20 |
+
|
21 |
+
Deep‑learning models, including those contained in the Protify system, generate outputs via advanced probabilistic algorithms that may be inaccurate, incomplete, or otherwise unsuitable for any given purpose. **By downloading, installing, or using the Software (including running any of its models), you acknowledge and agree that:**
|
22 |
+
|
23 |
+
* **Assumption of Risk** – You assume full responsibility for verifying the accuracy, fitness, and safety of all outputs produced by the Software—including, without limitation, any molecular structures, sequences, annotations, or recommendations.
|
24 |
+
* **No Professional Advice** – Outputs are provided for informational purposes only and do not constitute professional, scientific, medical, legal, or other advice. You must perform your own independent checks before relying on any output.
|
25 |
+
* **Indemnity** – To the maximum extent permitted by law, you agree to indemnify, defend, and hold harmless the copyright holder and contributors from and against any and all claims, damages, losses, liabilities, costs, or expenses (including reasonable attorneys’ fees) arising out of or related to your use of the Software or its outputs.
|
26 |
+
* **Compliance with Law** – You are solely responsible for ensuring that your use of the Software and its outputs complies with all applicable laws, regulations, and industry standards, including those relating to export control, intellectual‑property rights, and biosafety/biosecurity.
|
27 |
+
|
28 |
+
---
|
29 |
+
|
30 |
+
### WARRANTY DISCLAIMER
|
31 |
+
|
32 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **“AS IS”** AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, NO WARRANTY IS MADE THAT THE SOFTWARE OR ITS OUTPUTS WILL BE ACCURATE, COMPLETE, NON‑INFRINGING, OR FREE FROM DEFECTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, PROFITS, OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE OR ITS OUTPUTS, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
33 |
+
|
34 |
+
---
|
35 |
+
|
36 |
+
### TERMINATION
|
37 |
+
|
38 |
+
Failure to comply with any of the above conditions automatically terminates your rights under this licence. Upon termination, you must cease all use, distribution, and reproduction of the Software and permanently delete or destroy all copies in your possession or control.
|
39 |
+
|
40 |
+
---
|
41 |
+
|
42 |
+
Except as expressly modified above, all terms of the standard BSD 3‑Clause License remain in full force and effect.
|
43 |
+
|
data/docs/resource_listing.md
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Listing Supported Models and Datasets
|
2 |
+
|
3 |
+
Protify provides several ways to view and explore the supported models and datasets. This documentation explains how to use these features.
|
4 |
+
|
5 |
+
## Using the README Toggle Sections
|
6 |
+
|
7 |
+
The main README.md file contains expandable toggle sections for both models and datasets:
|
8 |
+
|
9 |
+
- **Currently Supported Models**: Click the toggle to expand and see a complete table of models with their descriptions, sizes, and types.
|
10 |
+
- **Currently Supported Datasets**: Click the toggle to expand and see a complete table of datasets with their descriptions, types, and tasks.
|
11 |
+
|
12 |
+
## Command-Line Listing
|
13 |
+
|
14 |
+
Protify provides command-line utilities for listing models and datasets with detailed information:
|
15 |
+
|
16 |
+
### Listing Models
|
17 |
+
|
18 |
+
To list all supported models with their descriptions:
|
19 |
+
|
20 |
+
```bash
|
21 |
+
# List all supported models
|
22 |
+
python -m src.protify.base_models.get_base_models --list
|
23 |
+
|
24 |
+
# To download standard models
|
25 |
+
python -m src.protify.base_models.get_base_models --download
|
26 |
+
```
|
27 |
+
|
28 |
+
### Listing Datasets
|
29 |
+
|
30 |
+
To list all supported datasets with their descriptions:
|
31 |
+
|
32 |
+
```bash
|
33 |
+
# List all datasets
|
34 |
+
python -m src.protify.data.dataset_utils --list
|
35 |
+
|
36 |
+
# Get information about a specific dataset
|
37 |
+
python -m src.protify.data.dataset_utils --info EC
|
38 |
+
```
|
39 |
+
|
40 |
+
### Combined Listing
|
41 |
+
|
42 |
+
For a combined view of both models and datasets:
|
43 |
+
|
44 |
+
```bash
|
45 |
+
# List both models and datasets
|
46 |
+
python -m src.protify.resource_info --all
|
47 |
+
|
48 |
+
# List only standard models and datasets
|
49 |
+
python -m src.protify.resource_info --all --standard-only
|
50 |
+
|
51 |
+
# List only models
|
52 |
+
python -m src.protify.resource_info --models
|
53 |
+
|
54 |
+
# List only datasets
|
55 |
+
python -m src.protify.resource_info --datasets
|
56 |
+
```
|
57 |
+
|
58 |
+
## Programmatic Access
|
59 |
+
|
60 |
+
You can also access model and dataset information programmatically:
|
61 |
+
|
62 |
+
```python
|
63 |
+
# For models
|
64 |
+
from src.protify.resource_info import model_descriptions
|
65 |
+
from src.protify.base_models.get_base_models import currently_supported_models, standard_models
|
66 |
+
|
67 |
+
# Get information about a specific model
|
68 |
+
model_info = model_descriptions.get('ESM2-150', {})
|
69 |
+
print(f"Model: ESM2-150")
|
70 |
+
print(f"Description: {model_info.get('description', 'N/A')}")
|
71 |
+
print(f"Size: {model_info.get('size', 'N/A')}")
|
72 |
+
print(f"Type: {model_info.get('type', 'N/A')}")
|
73 |
+
|
74 |
+
# For datasets
|
75 |
+
from src.protify.resource_info import dataset_descriptions
|
76 |
+
from src.protify.data.supported_datasets import supported_datasets
|
77 |
+
|
78 |
+
# Get information about a specific dataset
|
79 |
+
dataset_info = dataset_descriptions.get('EC', {})
|
80 |
+
print(f"Dataset: EC")
|
81 |
+
print(f"Description: {dataset_info.get('description', 'N/A')}")
|
82 |
+
print(f"Type: {dataset_info.get('type', 'N/A')}")
|
83 |
+
print(f"Task: {dataset_info.get('task', 'N/A')}")
|
84 |
+
```
|
85 |
+
|
86 |
+
## Model Group Types
|
87 |
+
|
88 |
+
Models in Protify are generally grouped into the following categories:
|
89 |
+
|
90 |
+
1. **Protein Language Models**: Pre-trained models that have learned protein properties from large-scale sequence data (e.g., ESM2, ProtBert)
|
91 |
+
2. **Baseline Controls**: Models with random weights for comparison (e.g., Random, Random-Transformer)
|
92 |
+
|
93 |
+
## Dataset Group Types
|
94 |
+
|
95 |
+
Datasets are categorized by their task types:
|
96 |
+
|
97 |
+
1. **Multi-label Classification**: Datasets where each protein can have multiple labels (e.g., EC, GO-CC)
|
98 |
+
2. **Classification**: Binary or multi-class classification tasks (e.g., DeepLoc-2, DeepLoc-10)
|
99 |
+
3. **Regression**: Prediction of continuous values (e.g., enzyme-kcat, optimal-temperature)
|
100 |
+
4. **Protein-Protein Interaction**: Tasks focused on protein interactions (e.g., human-ppi, gold-ppi)
|
101 |
+
5. **Token-wise Classification/Regression**: Residue-level prediction tasks (e.g., SecondaryStructure-3)
|
data/images/example_workflow/1.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/10.png
ADDED
![]() |
Git LFS Details
|
data/images/example_workflow/11.png
ADDED
![]() |
Git LFS Details
|
data/images/example_workflow/12.png
ADDED
![]() |
Git LFS Details
|
data/images/example_workflow/13.png
ADDED
![]() |
Git LFS Details
|
data/images/example_workflow/14.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/2.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/3.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/4.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/5.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/6.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/7.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/8.PNG
ADDED
|
Git LFS Details
|
data/images/example_workflow/9.png
ADDED
![]() |
Git LFS Details
|
data/images/github_banner.png
ADDED
![]() |
Git LFS Details
|
data/images/protify_logo.png
ADDED
![]() |
Git LFS Details
|
data/images/synthyra_logo.png
ADDED
![]() |
Git LFS Details
|
data/probe_package_colab.ipynb
ADDED
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# OUT OF DATE - NEEDS TO BE UPDATED"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 1,
|
13 |
+
"metadata": {
|
14 |
+
"id": "mQ__d_petycW"
|
15 |
+
},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"#@title **1. Setup**\n",
|
19 |
+
"\n",
|
20 |
+
"#@markdown ### Identification\n",
|
21 |
+
"huggingface_username = \"Synthyra\" #@param {type:\"string\"}\n",
|
22 |
+
"#@markdown ---\n",
|
23 |
+
"huggingface_token = \"\" #@param {type:\"string\"}\n",
|
24 |
+
"#@markdown ---\n",
|
25 |
+
"wandb_api_key = \"\" #@param {type:\"string\"}\n",
|
26 |
+
"#@markdown ---\n",
|
27 |
+
"synthyra_api_key = \"\" #@param {type:\"string\"}\n",
|
28 |
+
"#@markdown ---\n",
|
29 |
+
"github_token = \"\" #@param {type:\"string\"}\n",
|
30 |
+
"#@markdown ---\n",
|
31 |
+
"\n",
|
32 |
+
"\n",
|
33 |
+
"github_clone_path = f\"https://{github_token}@github.com/Synthyra/ProbePackageHolder.git\"\n",
|
34 |
+
"# !git clone {github_clone_path}\n",
|
35 |
+
"# %cd ProbePackageHolder\n",
|
36 |
+
"# !pip install -r requirements.txt --quiet\n"
|
37 |
+
]
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"cell_type": "code",
|
41 |
+
"execution_count": null,
|
42 |
+
"metadata": {},
|
43 |
+
"outputs": [],
|
44 |
+
"source": [
|
45 |
+
"#@title **2. Session/Directory Settings**\n",
|
46 |
+
"\n",
|
47 |
+
"import torch\n",
|
48 |
+
"import argparse\n",
|
49 |
+
"from types import SimpleNamespace\n",
|
50 |
+
"from base_models.get_base_models import BaseModelArguments, standard_benchmark\n",
|
51 |
+
"from data.hf_data import HFDataArguments\n",
|
52 |
+
"from data.supported_datasets import supported_datasets\n",
|
53 |
+
"from embedder import EmbeddingArguments\n",
|
54 |
+
"from probes.get_probe import ProbeArguments\n",
|
55 |
+
"from probes.trainers import TrainerArguments\n",
|
56 |
+
"from main import MainProcess\n",
|
57 |
+
"\n",
|
58 |
+
"\n",
|
59 |
+
"main = MainProcess(argparse.Namespace(), GUI=True)\n",
|
60 |
+
"\n",
|
61 |
+
"#@markdown **Paths**\n",
|
62 |
+
"\n",
|
63 |
+
"#@markdown These will be created automatically if they don't exist\n",
|
64 |
+
"\n",
|
65 |
+
"#@markdown **Log Directory**\n",
|
66 |
+
"log_dir = \"logs\" #@param {type:\"string\"}\n",
|
67 |
+
"#@markdown ---\n",
|
68 |
+
"\n",
|
69 |
+
"#@markdown **Results Directory**\n",
|
70 |
+
"results_dir = \"results\" #@param {type:\"string\"}\n",
|
71 |
+
"#@markdown ---\n",
|
72 |
+
"\n",
|
73 |
+
"#@markdown **Model Save Directory**\n",
|
74 |
+
"model_save_dir = \"weights\" #@param {type:\"string\"}\n",
|
75 |
+
"#@markdown ---\n",
|
76 |
+
"\n",
|
77 |
+
"#@markdown **Embedding Save Directory**\n",
|
78 |
+
"embedding_save_dir = \"embeddings\" #@param {type:\"string\"}\n",
|
79 |
+
"#@markdown ---\n",
|
80 |
+
"\n",
|
81 |
+
"#@markdown **Download Directory**\n",
|
82 |
+
"#@markdown - Where embeddings are downloaded on Hugging Face\n",
|
83 |
+
"download_dir = \"Synthyra/mean_pooled_embeddings\" #@param {type:\"string\"}\n",
|
84 |
+
"#@markdown ---\n",
|
85 |
+
"\n",
|
86 |
+
"\n",
|
87 |
+
"main.full_args.hf_token = huggingface_token\n",
|
88 |
+
"main.full_args.wandb_api_key = wandb_api_key\n",
|
89 |
+
"main.full_args.synthyra_api_key = synthyra_api_key\n",
|
90 |
+
"main.full_args.log_dir = log_dir\n",
|
91 |
+
"main.full_args.results_dir = results_dir\n",
|
92 |
+
"main.full_args.model_save_dir = model_save_dir\n",
|
93 |
+
"main.full_args.embedding_save_dir = embedding_save_dir\n",
|
94 |
+
"main.full_args.download_dir = download_dir\n",
|
95 |
+
"main.full_args.replay_path = None\n",
|
96 |
+
"main.logger_args = SimpleNamespace(**main.full_args.__dict__)\n",
|
97 |
+
"main.start_log_gui()\n",
|
98 |
+
"\n",
|
99 |
+
"#@markdown Press play to setup the session:"
|
100 |
+
]
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"cell_type": "code",
|
104 |
+
"execution_count": null,
|
105 |
+
"metadata": {
|
106 |
+
"id": "FFgNDvDAt0xp"
|
107 |
+
},
|
108 |
+
"outputs": [],
|
109 |
+
"source": [
|
110 |
+
"#@title **2. Data Settings**\n",
|
111 |
+
"\n",
|
112 |
+
"#@markdown **Max Sequence Length**\n",
|
113 |
+
"max_length = 2048 #@param {type:\"integer\"}\n",
|
114 |
+
"#@markdown ---\n",
|
115 |
+
"\n",
|
116 |
+
"#@markdown **Trim Sequences**\n",
|
117 |
+
"#@markdown - If true, sequences are removed if they are longer than the maximum length\n",
|
118 |
+
"#@markdown - If false, sequences are truncated to the maximum length\n",
|
119 |
+
"trim = False #@param {type:\"boolean\"}\n",
|
120 |
+
"#@markdown ---\n",
|
121 |
+
"\n",
|
122 |
+
"#@markdown **Dataset Names**\n",
|
123 |
+
"#@markdown Valid options (comma-separated):\n",
|
124 |
+
"\n",
|
125 |
+
"#@markdown *Multi-label classification:*\n",
|
126 |
+
"\n",
|
127 |
+
"#@markdown - EC, GO-CC, GO-BP, GO-MF\n",
|
128 |
+
"\n",
|
129 |
+
"#@markdown *Single-label classification:*\n",
|
130 |
+
"\n",
|
131 |
+
"#@markdown - MB, DeepLoc-2, DeepLoc-10, solubility, localization, material-production, cloning-clf, number-of-folds\n",
|
132 |
+
"\n",
|
133 |
+
"#@markdown *Regression:*\n",
|
134 |
+
"\n",
|
135 |
+
"#@markdown - enzyme-kcat,temperature-stability, optimal-temperature, optimal-ph, fitness-prediction, stability-prediction, fluorescence-prediction\n",
|
136 |
+
"\n",
|
137 |
+
"#@markdown *PPI:*\n",
|
138 |
+
"\n",
|
139 |
+
"#@markdown - human-ppi, peptide-HLA-MHC-affinity\n",
|
140 |
+
"\n",
|
141 |
+
"#@markdown *Tokenwise:*\n",
|
142 |
+
"\n",
|
143 |
+
"#@markdown - SecondaryStructure-3, SecondaryStructure-8\n",
|
144 |
+
"dataset_names = \"EC, DeepLoc-2, DeepLoc-10, enzyme-kcat\" #@param {type:\"string\"}\n",
|
145 |
+
"#@markdown ---\n",
|
146 |
+
"\n",
|
147 |
+
"data_paths = [supported_datasets[name.strip()] for name in dataset_names.split(\",\") if name.strip()]\n",
|
148 |
+
"\n",
|
149 |
+
"main.full_args.data_paths = data_paths\n",
|
150 |
+
"main.full_args.max_length = max_length\n",
|
151 |
+
"main.full_args.trim = trim\n",
|
152 |
+
"main.data_args = HFDataArguments(**main.full_args.__dict__)\n",
|
153 |
+
"args_dict = {k: v for k, v in main.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}\n",
|
154 |
+
"main.logger_args = SimpleNamespace(**args_dict)\n",
|
155 |
+
"main.get_datasets()\n",
|
156 |
+
"\n",
|
157 |
+
"#@markdown Press play to load datasets:"
|
158 |
+
]
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"cell_type": "code",
|
162 |
+
"execution_count": 5,
|
163 |
+
"metadata": {
|
164 |
+
"id": "D1iMWkLzt8QM"
|
165 |
+
},
|
166 |
+
"outputs": [],
|
167 |
+
"source": [
|
168 |
+
"#@title **3. Model Selection**\n",
|
169 |
+
"\n",
|
170 |
+
"#@markdown Comma-separated model names.\n",
|
171 |
+
"#@markdown If empty, defaults to `standard_benchmark`.\n",
|
172 |
+
"#@markdown Valid options (comma-separated):\n",
|
173 |
+
"#@markdown - `ESM2-8, ESM2-35, ESM2-150, ESM2-650`\n",
|
174 |
+
"#@markdown - `ESMC-300, ESMC-600`\n",
|
175 |
+
"#@markdown - `Random, Random-Transformer`\n",
|
176 |
+
"model_names = \"ESMC-300\" #@param {type:\"string\"}\n",
|
177 |
+
"#@markdown ---\n",
|
178 |
+
"\n",
|
179 |
+
"selected_models = [name.strip() for name in model_names.split(\",\") if name.strip()]\n",
|
180 |
+
"\n",
|
181 |
+
"if not selected_models:\n",
|
182 |
+
" selected_models = standard_benchmark\n",
|
183 |
+
"\n",
|
184 |
+
"main.full_args.model_names = selected_models\n",
|
185 |
+
"main.model_args = BaseModelArguments(**main.full_args.__dict__)\n",
|
186 |
+
"args_dict = {k: v for k, v in main.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}\n",
|
187 |
+
"main.logger_args = SimpleNamespace(**args_dict)\n",
|
188 |
+
"main._write_args()\n",
|
189 |
+
"\n",
|
190 |
+
"#@markdown *Press play to choose models:*\n"
|
191 |
+
]
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"cell_type": "code",
|
195 |
+
"execution_count": null,
|
196 |
+
"metadata": {
|
197 |
+
"id": "qHCDeczNt20y"
|
198 |
+
},
|
199 |
+
"outputs": [],
|
200 |
+
"source": [
|
201 |
+
"#@title **4. Embedding Settings**\n",
|
202 |
+
"#@markdown **Batch size**\n",
|
203 |
+
"batch_size = 4 #@param {type:\"integer\"}\n",
|
204 |
+
"#@markdown ---\n",
|
205 |
+
"\n",
|
206 |
+
"#@markdown **Number of dataloader workers**\n",
|
207 |
+
"#@markdown - We recommend 0 for small sets of sequences, but 4-8 for larger sets\n",
|
208 |
+
"num_workers = 0 #@param {type:\"integer\"}\n",
|
209 |
+
"#@markdown ---\n",
|
210 |
+
"\n",
|
211 |
+
"#@markdown **Download embeddings from Hugging Face**\n",
|
212 |
+
"#@markdown - If there is a precomputed embedding type that's useful to you, it is probably faster to download it\n",
|
213 |
+
"#@markdown - HIGHLY recommended for CPU users\n",
|
214 |
+
"download_embeddings = False #@param {type:\"boolean\"}\n",
|
215 |
+
"#@markdown ---\n",
|
216 |
+
"\n",
|
217 |
+
"#@markdown **Full residue embeddings**\n",
|
218 |
+
"#@markdown - If true, embeddings are saved as a matrix of shape `(L, d)`\n",
|
219 |
+
"#@markdown - If false, embeddings are pooled to `(d,)`\n",
|
220 |
+
"matrix_embed = False #@param {type:\"boolean\"}\n",
|
221 |
+
"#@markdown ---\n",
|
222 |
+
"\n",
|
223 |
+
"#@markdown **Embedding Pooling Types**\n",
|
224 |
+
"#@markdown - If more than one is passed, embeddings are concatenated\n",
|
225 |
+
"#@markdown Valid options (comma-separated):\n",
|
226 |
+
"#@markdown - `mean, max, norm, median, std, var, cls, parti`\n",
|
227 |
+
"#@markdown - `parti` (pool parti) must be used on its own\n",
|
228 |
+
"embedding_pooling_types = \"mean, std\" #@param {type:\"string\"}\n",
|
229 |
+
"#@markdown ---\n",
|
230 |
+
"\n",
|
231 |
+
"#@markdown **Embedding Data Type**\n",
|
232 |
+
"#@markdown - Embeddings are cast to this data type for storage\n",
|
233 |
+
"embed_dtype = \"float32\" #@param [\"float32\",\"float16\",\"bfloat16\",\"float8_e4m3fn\",\"float8_e5m2\"]\n",
|
234 |
+
"#@markdown ---\n",
|
235 |
+
"\n",
|
236 |
+
"#@markdown **Save embeddings to SQLite**\n",
|
237 |
+
"#@markdown - If true, embeddings are saved to a SQLite database\n",
|
238 |
+
"#@markdown - They will be accessed on the fly by the trainer\n",
|
239 |
+
"#@markdown - This is HIGHLY recommended for matrix embeddings\n",
|
240 |
+
"#@markdown - If false, embeddings are saved to a .pth file but loaded all at once\n",
|
241 |
+
"sql = False #@param {type:\"boolean\"}\n",
|
242 |
+
"#@markdown ---\n",
|
243 |
+
"\n",
|
244 |
+
"main.full_args.all_seqs = main.all_seqs\n",
|
245 |
+
"main.full_args.batch_size = batch_size\n",
|
246 |
+
"main.full_args.num_workers = num_workers\n",
|
247 |
+
"main.full_args.download_embeddings = download_embeddings\n",
|
248 |
+
"main.full_args.matrix_embed = matrix_embed\n",
|
249 |
+
"main.full_args.embedding_pooling_types = [p.strip() for p in embedding_pooling_types.split(\",\") if p.strip()]\n",
|
250 |
+
"if embed_dtype == \"float32\": main.embed_dtype = torch.float32\n",
|
251 |
+
"elif embed_dtype == \"float16\": main.embed_dtype = torch.float16\n",
|
252 |
+
"elif embed_dtype == \"bfloat16\": main.embed_dtype = torch.bfloat16 \n",
|
253 |
+
"elif embed_dtype == \"float8_e4m3fn\": main.embed_dtype = torch.float8_e4m3fn\n",
|
254 |
+
"elif embed_dtype == \"float8_e5m2\": main.embed_dtype = torch.float8_e5m2\n",
|
255 |
+
"else:\n",
|
256 |
+
" print(f\"Invalid embedding dtype: {embed_dtype}. Using float32.\")\n",
|
257 |
+
" main.embed_dtype = torch.float32\n",
|
258 |
+
"main.sql = sql\n",
|
259 |
+
"\n",
|
260 |
+
"\n",
|
261 |
+
"main.embedding_args = EmbeddingArguments(**main.full_args.__dict__)\n",
|
262 |
+
"args_dict = {k: v for k, v in main.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}\n",
|
263 |
+
"main.logger_args = SimpleNamespace(**args_dict)\n",
|
264 |
+
"main.save_embeddings_to_disk()\n",
|
265 |
+
"\n",
|
266 |
+
"#@markdown *Press play to embed sequences:*\n"
|
267 |
+
]
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"cell_type": "code",
|
271 |
+
"execution_count": 7,
|
272 |
+
"metadata": {
|
273 |
+
"id": "K7R-Htvit9Ti"
|
274 |
+
},
|
275 |
+
"outputs": [],
|
276 |
+
"source": [
|
277 |
+
"#@title **5. Probe Settings**\n",
|
278 |
+
"\n",
|
279 |
+
"#@markdown **Probe Type**\n",
|
280 |
+
"#@markdown - `linear`: a MLP for pooled embeddings\n",
|
281 |
+
"#@markdown - `transformer`: a transformer model for matrix embeddings\n",
|
282 |
+
"#@markdown - `retrievalnet`: custom combination of cross-attention and convolution for matrix embeddings\n",
|
283 |
+
"probe_type = \"linear\" #@param [\"linear\", \"transformer\", \"retrievalnet\"]\n",
|
284 |
+
"#@markdown ---\n",
|
285 |
+
"\n",
|
286 |
+
"#@markdown **Tokenwise**\n",
|
287 |
+
"#@markdown - If true, the objective is to predict a property of each token (matrix embeddings only)\n",
|
288 |
+
"#@markdown - If false, the objective is to predict a property of the entire sequence (pooled embeddings OR matrix embeddings)\n",
|
289 |
+
"tokenwise = False #@param {type:\"boolean\"}\n",
|
290 |
+
"#@markdown ---\n",
|
291 |
+
"\n",
|
292 |
+
"#@markdown **Pre-LayerNorm**\n",
|
293 |
+
"#@markdown - If true, a LayerNorm is applied as the first layer of the probe the probe\n",
|
294 |
+
"#@markdown - Typicall improves performance\n",
|
295 |
+
"pre_ln = True #@param {type:\"boolean\"}\n",
|
296 |
+
"#@markdown ---\n",
|
297 |
+
"\n",
|
298 |
+
"#@markdown **Number of layers**\n",
|
299 |
+
"#@markdown - Number of hidden layers in the probe\n",
|
300 |
+
"#@markdown - Linear probes have 1 input layer and 2 output layers, so 1 layer is a 4 layer MLP\n",
|
301 |
+
"#@markdown - This refers to how many transformer blocks are used in the transformer probe\n",
|
302 |
+
"#@markdown - Same for retrievalnet probes\n",
|
303 |
+
"n_layers = 1 #@param {type:\"integer\"}\n",
|
304 |
+
"#@markdown ---\n",
|
305 |
+
"\n",
|
306 |
+
"#@markdown **Hidden dimension**\n",
|
307 |
+
"#@markdown - The hidden dimension of the model\n",
|
308 |
+
"#@markdown - 2048 - 8192 is recommended for linear probes, 384 - 1536 is recommended for transformer probes\n",
|
309 |
+
"hidden_dim = 8192 #@param {type:\"integer\"}\n",
|
310 |
+
"#@markdown ---\n",
|
311 |
+
"\n",
|
312 |
+
"#@markdown **Dropout**\n",
|
313 |
+
"#@markdown - Dropout rate for the probe\n",
|
314 |
+
"#@markdown - 0.2 is recommended for linear, 0.1 otherwise\n",
|
315 |
+
"dropout = 0.2 #@param {type:\"number\"}\n",
|
316 |
+
"#@markdown ---\n",
|
317 |
+
"\n",
|
318 |
+
"#@markdown **Classifier dimension**\n",
|
319 |
+
"#@markdown - The dimension of the classifier layer (transformer, retrievalnet probes only)\n",
|
320 |
+
"classifier_dim = 4096 #@param {type:\"integer\"}\n",
|
321 |
+
"#@markdown ---\n",
|
322 |
+
"\n",
|
323 |
+
"#@markdown **Classifier Dropout**\n",
|
324 |
+
"#@markdown - Dropout rate for the classifier layer\n",
|
325 |
+
"classifier_dropout = 0.2 #@param {type:\"number\"}\n",
|
326 |
+
"#@markdown ---\n",
|
327 |
+
"\n",
|
328 |
+
"#@markdown **Number of heads**\n",
|
329 |
+
"#@markdown - Number of attention heads in models with attention\n",
|
330 |
+
"#@markdown - between `hidden_dim // 128` and `hidden_dim // 32` is recommended\n",
|
331 |
+
"n_heads = 4 #@param {type:\"integer\"}\n",
|
332 |
+
"#@markdown ---\n",
|
333 |
+
"\n",
|
334 |
+
"#@markdown **Rotary Embeddings**\n",
|
335 |
+
"#@markdown - If true, rotary embeddings are used with attention layers\n",
|
336 |
+
"rotary = True #@param {type:\"boolean\"}\n",
|
337 |
+
"#@markdown ---\n",
|
338 |
+
"\n",
|
339 |
+
"#@markdown **Probe Pooling Types**\n",
|
340 |
+
"#@markdown - If more than one is passed, embeddings are concatenated\n",
|
341 |
+
"#@markdown Valid options (comma-separated):\n",
|
342 |
+
"#@markdown - `mean, max, norm, median, std, var, cls`\n",
|
343 |
+
"#@markdown - Is how the transformer or retrievalnet embeddings are pooled for sequence-wise tasks\n",
|
344 |
+
"probe_pooling_types_str = \"mean, cls\" #@param {type:\"string\"}\n",
|
345 |
+
"\n",
|
346 |
+
"probe_pooling_types = [p.strip() for p in probe_pooling_types_str.split(\",\") if p.strip()]\n",
|
347 |
+
"\n",
|
348 |
+
"main.full_args.probe_type = probe_type\n",
|
349 |
+
"main.full_args.tokenwise = tokenwise\n",
|
350 |
+
"main.full_args.pre_ln = pre_ln\n",
|
351 |
+
"main.full_args.n_layers = n_layers\n",
|
352 |
+
"main.full_args.hidden_dim = hidden_dim\n",
|
353 |
+
"main.full_args.dropout = dropout\n",
|
354 |
+
"main.full_args.classifier_dim = classifier_dim\n",
|
355 |
+
"main.full_args.classifier_dropout = classifier_dropout\n",
|
356 |
+
"main.full_args.n_heads = n_heads\n",
|
357 |
+
"main.full_args.rotary = rotary\n",
|
358 |
+
"main.full_args.probe_pooling_types = probe_pooling_types\n",
|
359 |
+
"\n",
|
360 |
+
"main.probe_args = ProbeArguments(**main.full_args.__dict__)\n",
|
361 |
+
"args_dict = {k: v for k, v in main.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}\n",
|
362 |
+
"main.logger_args = SimpleNamespace(**args_dict)\n",
|
363 |
+
"main._write_args()\n",
|
364 |
+
"\n",
|
365 |
+
"#@markdown ---\n",
|
366 |
+
"#@markdown Press play to configure the probe:\n"
|
367 |
+
]
|
368 |
+
},
|
369 |
+
{
|
370 |
+
"cell_type": "code",
|
371 |
+
"execution_count": 8,
|
372 |
+
"metadata": {
|
373 |
+
"id": "8W4OYnn4uIyU"
|
374 |
+
},
|
375 |
+
"outputs": [],
|
376 |
+
"source": [
|
377 |
+
"#@title **6. Training Settings**\n",
|
378 |
+
"\n",
|
379 |
+
"#@markdown **Use LoRA**\n",
|
380 |
+
"#@markdown - If true, LoRA on the base model\n",
|
381 |
+
"use_lora = False #@param {type:\"boolean\"}\n",
|
382 |
+
"#@markdown ---\n",
|
383 |
+
"\n",
|
384 |
+
"#@markdown **Hybrid Probe**\n",
|
385 |
+
"#@markdown - If true, the probe is trained on frozen embeddings\n",
|
386 |
+
"#@markdown - Then, the base model is finetuned alongside the probe\n",
|
387 |
+
"hybrid_probe = False #@param {type:\"boolean\"}\n",
|
388 |
+
"#@markdown ---\n",
|
389 |
+
"\n",
|
390 |
+
"#@markdown **Full Finetuning**\n",
|
391 |
+
"#@markdown - If true, the base model is finetuned for the task\n",
|
392 |
+
"full_finetuning = False #@param {type:\"boolean\"}\n",
|
393 |
+
"#@markdown ---\n",
|
394 |
+
"\n",
|
395 |
+
"#@markdown **Number of epochs**\n",
|
396 |
+
"num_epochs = 200 #@param {type:\"integer\"}\n",
|
397 |
+
"#@markdown ---\n",
|
398 |
+
"\n",
|
399 |
+
"#@markdown **Trainer Batch Size**\n",
|
400 |
+
"#@markdown - The batch size for probe training\n",
|
401 |
+
"#@markdown - We recommend between 32 and 256 with some combination of this and gradient accumulation steps\n",
|
402 |
+
"trainer_batch_size = 64 #@param {type:\"integer\"}\n",
|
403 |
+
"#@markdown ---\n",
|
404 |
+
"\n",
|
405 |
+
"#@markdown **Gradient Accumulation Steps**\n",
|
406 |
+
"gradient_accumulation_steps = 1 #@param {type:\"integer\"}\n",
|
407 |
+
"#@markdown ---\n",
|
408 |
+
"\n",
|
409 |
+
"#@markdown **Learning Rate**\n",
|
410 |
+
"lr = 0.0001 #@param {type:\"number\"}\n",
|
411 |
+
"#@markdown ---\n",
|
412 |
+
"\n",
|
413 |
+
"#@markdown **Weight Decay**\n",
|
414 |
+
"#@markdown - If you are having issues with overfitting, try increasing this\n",
|
415 |
+
"weight_decay = 0.0 #@param {type:\"number\"}\n",
|
416 |
+
"#@markdown ---\n",
|
417 |
+
"\n",
|
418 |
+
"#@markdown **Early Stopping Patience**\n",
|
419 |
+
"#@markdown - We recommend keep the epcohs high and using this to gage convergence\n",
|
420 |
+
"patience = 10 #@param {type:\"integer\"}\n",
|
421 |
+
"#@markdown ---\n",
|
422 |
+
"\n",
|
423 |
+
"main.full_args.use_lora = use_lora\n",
|
424 |
+
"main.full_args.hybrid_probe = hybrid_probe\n",
|
425 |
+
"main.full_args.full_finetuning = full_finetuning\n",
|
426 |
+
"main.full_args.num_epochs = num_epochs\n",
|
427 |
+
"main.full_args.trainer_batch_size = trainer_batch_size\n",
|
428 |
+
"main.full_args.gradient_accumulation_steps = gradient_accumulation_steps\n",
|
429 |
+
"main.full_args.lr = lr\n",
|
430 |
+
"main.full_args.weight_decay = weight_decay\n",
|
431 |
+
"main.full_args.patience = patience\n",
|
432 |
+
"\n",
|
433 |
+
"main.trainer_args = TrainerArguments(**main.full_args.__dict__)\n",
|
434 |
+
"args_dict = {k: v for k, v in main.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}\n",
|
435 |
+
"main.logger_args = SimpleNamespace(**args_dict)\n",
|
436 |
+
"main._write_args()\n",
|
437 |
+
"\n",
|
438 |
+
"#@markdown ---\n",
|
439 |
+
"#@markdown Press play to run the trainer:\n",
|
440 |
+
"main.run_nn_probe()"
|
441 |
+
]
|
442 |
+
},
|
443 |
+
{
|
444 |
+
"cell_type": "code",
|
445 |
+
"execution_count": null,
|
446 |
+
"metadata": {
|
447 |
+
"id": "GdAk8wxWuJWO"
|
448 |
+
},
|
449 |
+
"outputs": [],
|
450 |
+
"source": [
|
451 |
+
"#@title **7. Log Replay**\n",
|
452 |
+
"\n",
|
453 |
+
"#@markdown **Replay Path**\n",
|
454 |
+
"#@markdown - Replay everything from a log by passing the path to the log file\n",
|
455 |
+
"replay_path = \"\" #@param {type:\"string\"}\n",
|
456 |
+
"#@markdown ---\n",
|
457 |
+
"\n",
|
458 |
+
"from logger import LogReplayer\n",
|
459 |
+
"replayer = LogReplayer(replay_path)\n",
|
460 |
+
"replay_args = replayer.parse_log()\n",
|
461 |
+
"replay_args.replay_path = replay_path\n",
|
462 |
+
"\n",
|
463 |
+
"for key, value in replay_args.__dict__.items():\n",
|
464 |
+
" if key in main.full_args.__dict__:\n",
|
465 |
+
" main.full_args[key] = value\n",
|
466 |
+
"\n",
|
467 |
+
"replayer.run_replay(main)\n",
|
468 |
+
"\n",
|
469 |
+
"#@markdown ---\n",
|
470 |
+
"#@markdown Press to replay logs:\n"
|
471 |
+
]
|
472 |
+
}
|
473 |
+
],
|
474 |
+
"metadata": {
|
475 |
+
"colab": {
|
476 |
+
"provenance": []
|
477 |
+
},
|
478 |
+
"kernelspec": {
|
479 |
+
"display_name": "Python 3",
|
480 |
+
"name": "python3"
|
481 |
+
},
|
482 |
+
"language_info": {
|
483 |
+
"codemirror_mode": {
|
484 |
+
"name": "ipython",
|
485 |
+
"version": 3
|
486 |
+
},
|
487 |
+
"file_extension": ".py",
|
488 |
+
"mimetype": "text/x-python",
|
489 |
+
"name": "python",
|
490 |
+
"nbconvert_exporter": "python",
|
491 |
+
"pygments_lexer": "ipython3",
|
492 |
+
"version": "3.11.8"
|
493 |
+
}
|
494 |
+
},
|
495 |
+
"nbformat": 4,
|
496 |
+
"nbformat_minor": 0
|
497 |
+
}
|
data/pyproject.toml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "Protify"
|
3 |
+
version = "0.0.5"
|
4 |
+
description = "Low code molecular property prediction"
|
5 |
+
authors = ["Synthyra <[email protected]>"]
|
6 |
+
keywords = ["plm, protein, transformer"]
|
7 |
+
repository = "https://github.com/Synthyra/Protify"
|
8 |
+
license = "Protify License"
|
9 |
+
readme = "README.md"
|
10 |
+
include = [
|
11 |
+
"LICENSE.md",
|
12 |
+
"README.md",
|
13 |
+
]
|
14 |
+
|
15 |
+
packages = [
|
16 |
+
{ include = "protify", from = "src" },
|
17 |
+
]
|
18 |
+
|
19 |
+
[tool.poetry.dependencies]
|
20 |
+
python = "^3.8"
|
21 |
+
torch = ">=2.5.1"
|
22 |
+
torchvision = "*"
|
23 |
+
transformers = ">=4.47"
|
24 |
+
accelerate = ">=1.1.0"
|
25 |
+
tf-keras = "*"
|
26 |
+
tensorflow = "*"
|
27 |
+
torchinfo = "*"
|
28 |
+
torchmetrics = "*"
|
29 |
+
scikit-learn = "*"
|
30 |
+
scipy = "*"
|
31 |
+
datasets = "*"
|
32 |
+
einops = "*"
|
33 |
+
numpy = "==1.26.1"
|
34 |
+
networkx = ">=3.4.2"
|
35 |
+
xgboost = "*"
|
36 |
+
lightgbm = "*"
|
37 |
+
pyfiglet = "*"
|
38 |
+
|
39 |
+
[build-system]
|
40 |
+
requires = ["poetry-core>=1.0.0"]
|
41 |
+
build-backend = "poetry.core.masonry.api"
|
data/requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch>=2.5.1
|
2 |
+
torchvision
|
3 |
+
transformers>=4.47
|
4 |
+
accelerate>=1.1.0
|
5 |
+
peft
|
6 |
+
tf-keras
|
7 |
+
tensorflow
|
8 |
+
torchinfo
|
9 |
+
torchmetrics
|
10 |
+
scikit-learn==1.5.0
|
11 |
+
scipy>=1.13.1
|
12 |
+
datasets
|
13 |
+
einops
|
14 |
+
numpy==1.26.4
|
15 |
+
networkx>=3.4.2
|
16 |
+
xgboost
|
17 |
+
lightgbm
|
18 |
+
pyfiglet
|
19 |
+
seaborn>=0.13.2
|
20 |
+
matplotlib>=3.9.0
|
data/setup_bioenv.sh
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# chmod +x setup_bioenv.sh
|
4 |
+
# ./setup_bioenv.sh
|
5 |
+
|
6 |
+
# Set up error handling
|
7 |
+
set -e # Exit immediately if a command exits with a non-zero status
|
8 |
+
|
9 |
+
echo "Setting up Python virtual environment for Protify..."
|
10 |
+
|
11 |
+
# Create virtual environment
|
12 |
+
python3 -m venv ~/bioenv
|
13 |
+
|
14 |
+
# Activate virtual environment
|
15 |
+
source ~/bioenv/bin/activate
|
16 |
+
|
17 |
+
# Update pip and setuptools
|
18 |
+
echo "Upgrading pip and setuptools..."
|
19 |
+
pip install --upgrade pip setuptools
|
20 |
+
|
21 |
+
# Install torch and torchvision
|
22 |
+
echo "Installing torch and torchvision..."
|
23 |
+
pip install --force-reinstall torch torchvision --index-url https://download.pytorch.org/whl/cu126
|
24 |
+
|
25 |
+
# Install requirements with force reinstall
|
26 |
+
echo "Installing requirements"
|
27 |
+
pip install -r requirements.txt
|
28 |
+
|
29 |
+
# List installed packages for verification
|
30 |
+
echo -e "\nInstalled packages:"
|
31 |
+
pip list
|
32 |
+
|
33 |
+
# Instructions for future use
|
34 |
+
echo -e "\n======================="
|
35 |
+
echo "Setup complete!"
|
36 |
+
echo "======================="
|
37 |
+
echo "To activate this environment in the future, run:"
|
38 |
+
echo " source ~/bioenv/bin/activate"
|
39 |
+
echo ""
|
40 |
+
echo "To deactivate the environment, simply run:"
|
41 |
+
echo " deactivate"
|
42 |
+
echo ""
|
43 |
+
echo "Your virtual environment is located at: ~/bioenv"
|
44 |
+
echo "======================="
|
45 |
+
|
data/src/protify/base_models/__init__.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .get_base_models import (
|
2 |
+
get_base_model,
|
3 |
+
get_base_model_for_training,
|
4 |
+
get_tokenizer,
|
5 |
+
currently_supported_models,
|
6 |
+
standard_models,
|
7 |
+
experimental_models,
|
8 |
+
BaseModelArguments
|
9 |
+
)
|
10 |
+
|
11 |
+
try:
|
12 |
+
from .model_descriptions import model_descriptions
|
13 |
+
except ImportError:
|
14 |
+
model_descriptions = {}
|
data/src/protify/base_models/amplify.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This is currently not supported
|
3 |
+
"""
|
data/src/protify/base_models/ankh.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from typing import Optional, Union, List, Dict
|
4 |
+
from transformers import T5EncoderModel, AutoTokenizer
|
5 |
+
|
6 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
7 |
+
from .t5 import T5ForSequenceClassification, T5ForTokenClassification
|
8 |
+
|
9 |
+
|
10 |
+
presets = {
|
11 |
+
'ANKH-Base': 'Synthyra/ANKH_base',
|
12 |
+
'ANKH-Large': 'Synthyra/ANKH_large',
|
13 |
+
'ANKH2-Large': 'Synthyra/ANKH2_large',
|
14 |
+
}
|
15 |
+
|
16 |
+
|
17 |
+
class ANKHTokenizerWrapper(BaseSequenceTokenizer):
|
18 |
+
def __init__(self, tokenizer):
|
19 |
+
super().__init__(tokenizer)
|
20 |
+
|
21 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
22 |
+
if isinstance(sequences, str):
|
23 |
+
sequences = [sequences]
|
24 |
+
kwargs.setdefault('return_tensors', 'pt')
|
25 |
+
kwargs.setdefault('padding', 'longest')
|
26 |
+
kwargs.setdefault('add_special_tokens', True)
|
27 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
28 |
+
return tokenized
|
29 |
+
|
30 |
+
|
31 |
+
class AnkhForEmbedding(nn.Module):
|
32 |
+
def __init__(self, model_path: str):
|
33 |
+
super().__init__()
|
34 |
+
self.plm = T5EncoderModel.from_pretrained(model_path)
|
35 |
+
|
36 |
+
def forward(
|
37 |
+
self,
|
38 |
+
input_ids: torch.Tensor,
|
39 |
+
attention_mask: Optional[torch.Tensor] = None,
|
40 |
+
output_attentions: Optional[bool] = None,
|
41 |
+
) -> torch.Tensor:
|
42 |
+
if output_attentions:
|
43 |
+
out = self.plm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
44 |
+
return out.last_hidden_state, out.attentions
|
45 |
+
else:
|
46 |
+
return self.plm(input_ids, attention_mask=attention_mask).last_hidden_state
|
47 |
+
|
48 |
+
|
49 |
+
def get_ankh_tokenizer(preset: str):
|
50 |
+
return ANKHTokenizerWrapper(AutoTokenizer.from_pretrained('Synthyra/ANKH_base'))
|
51 |
+
|
52 |
+
|
53 |
+
def build_ankh_model(preset: str):
|
54 |
+
model_path = presets[preset]
|
55 |
+
model = AnkhForEmbedding(model_path).eval()
|
56 |
+
tokenizer = get_ankh_tokenizer(preset)
|
57 |
+
return model, tokenizer
|
58 |
+
|
59 |
+
|
60 |
+
def get_ankh_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
61 |
+
model_path = presets[preset]
|
62 |
+
if hybrid:
|
63 |
+
model = T5EncoderModel.from_pretrained(model_path).eval()
|
64 |
+
else:
|
65 |
+
if tokenwise:
|
66 |
+
model = T5ForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
67 |
+
else:
|
68 |
+
model = T5ForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
69 |
+
tokenizer = get_ankh_tokenizer(preset)
|
70 |
+
return model, tokenizer
|
71 |
+
|
72 |
+
|
73 |
+
if __name__ == '__main__':
|
74 |
+
# py -m src.protify.base_models.ankh
|
75 |
+
model, tokenizer = build_ankh_model('ANKH-Base')
|
76 |
+
print(model)
|
77 |
+
print(tokenizer)
|
78 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/base_tokenizer.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from typing import List, Dict, Union
|
3 |
+
|
4 |
+
|
5 |
+
class BaseSequenceTokenizer:
|
6 |
+
def __init__(self, tokenizer):
|
7 |
+
if tokenizer is None:
|
8 |
+
raise ValueError("Tokenizer cannot be None.")
|
9 |
+
self.tokenizer = tokenizer
|
10 |
+
|
11 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
12 |
+
# Default tokenizer args if not provided
|
13 |
+
kwargs.setdefault('return_tensors', 'pt')
|
14 |
+
kwargs.setdefault('padding', 'max_length')
|
15 |
+
kwargs.setdefault('add_special_tokens', True)
|
16 |
+
|
17 |
+
return self.tokenizer(sequences, **kwargs)
|
18 |
+
|
19 |
+
@property
|
20 |
+
def vocab_size(self):
|
21 |
+
return self.tokenizer.vocab_size
|
22 |
+
|
23 |
+
@property
|
24 |
+
def pad_token_id(self):
|
25 |
+
return getattr(self.tokenizer, 'pad_token_id')
|
26 |
+
|
27 |
+
@property
|
28 |
+
def eos_token_id(self):
|
29 |
+
return getattr(self.tokenizer, 'eos_token_id')
|
30 |
+
|
31 |
+
@property
|
32 |
+
def cls_token_id(self):
|
33 |
+
return getattr(self.tokenizer, 'cls_token_id')
|
34 |
+
|
35 |
+
def save_pretrained(self, save_dir: str):
|
36 |
+
self.tokenizer.save_pretrained(save_dir)
|
data/src/protify/base_models/dplm.py
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
3 |
+
# SPDX-License-Identifier: Apache-2.0
|
4 |
+
|
5 |
+
"""
|
6 |
+
This is a modified version of the DPLM model from https://github.com/bytedance/dplm/blob/main/src/byprot/models/lm/esm_dplm.py
|
7 |
+
"""
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
from torch.nn import functional as F
|
11 |
+
from typing import List, Optional, Tuple, Union, Dict
|
12 |
+
from transformers.models.esm.modeling_esm import (
|
13 |
+
EsmAttention,
|
14 |
+
EsmSelfAttention,
|
15 |
+
EsmLayer,
|
16 |
+
EsmEncoder,
|
17 |
+
EsmModel,
|
18 |
+
EsmEmbeddings,
|
19 |
+
EsmPooler,
|
20 |
+
EsmContactPredictionHead,
|
21 |
+
EsmIntermediate,
|
22 |
+
EsmOutput,
|
23 |
+
EsmLMHead,
|
24 |
+
EsmForMaskedLM,
|
25 |
+
EsmSelfOutput,
|
26 |
+
EsmPreTrainedModel,
|
27 |
+
EsmForSequenceClassification,
|
28 |
+
EsmForTokenClassification,
|
29 |
+
)
|
30 |
+
from transformers import EsmTokenizer, AutoTokenizer
|
31 |
+
from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
|
32 |
+
|
33 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
34 |
+
|
35 |
+
|
36 |
+
MODEL_REGISTRY = {}
|
37 |
+
|
38 |
+
def register_model(name):
|
39 |
+
def decorator(cls):
|
40 |
+
MODEL_REGISTRY[name] = cls
|
41 |
+
return cls
|
42 |
+
return decorator
|
43 |
+
|
44 |
+
|
45 |
+
class ModifiedEsmSelfAttention(EsmSelfAttention):
|
46 |
+
def forward(
|
47 |
+
self,
|
48 |
+
hidden_states: torch.Tensor,
|
49 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
50 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
51 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
52 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
53 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
54 |
+
output_attentions: Optional[bool] = False,
|
55 |
+
) -> Tuple[torch.Tensor]:
|
56 |
+
mixed_query_layer = self.query(hidden_states)
|
57 |
+
|
58 |
+
# If this is instantiated as a cross-attention module, the keys
|
59 |
+
# and values come from an encoder; the attention mask needs to be
|
60 |
+
# such that the encoder's padding tokens are not attended to.
|
61 |
+
is_cross_attention = encoder_hidden_states is not None
|
62 |
+
|
63 |
+
if is_cross_attention and past_key_value is not None:
|
64 |
+
# reuse k,v, cross_attentions
|
65 |
+
key_layer = past_key_value[0]
|
66 |
+
value_layer = past_key_value[1]
|
67 |
+
attention_mask = encoder_attention_mask
|
68 |
+
elif is_cross_attention:
|
69 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
70 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
71 |
+
attention_mask = encoder_attention_mask
|
72 |
+
elif past_key_value is not None:
|
73 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
74 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
75 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
76 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
77 |
+
else:
|
78 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
79 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
80 |
+
|
81 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
82 |
+
|
83 |
+
query_layer = query_layer * self.attention_head_size**-0.5
|
84 |
+
|
85 |
+
if self.is_decoder:
|
86 |
+
past_key_value = (key_layer, value_layer)
|
87 |
+
|
88 |
+
if self.position_embedding_type == "rotary":
|
89 |
+
query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
|
90 |
+
|
91 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
92 |
+
raise NotImplementedError
|
93 |
+
|
94 |
+
# Mask heads if we want to
|
95 |
+
if head_mask is not None:
|
96 |
+
raise NotImplementedError
|
97 |
+
|
98 |
+
query_layer = query_layer.contiguous()
|
99 |
+
key_layer = key_layer.contiguous()
|
100 |
+
value_layer = value_layer.contiguous()
|
101 |
+
context_layer = F.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, scale=1.0)
|
102 |
+
|
103 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
104 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
105 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
106 |
+
|
107 |
+
# outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
108 |
+
outputs = (context_layer,)
|
109 |
+
|
110 |
+
if self.is_decoder:
|
111 |
+
outputs = outputs + (past_key_value,)
|
112 |
+
return outputs
|
113 |
+
|
114 |
+
|
115 |
+
class ModifiedEsmAttention(EsmAttention):
|
116 |
+
def __init__(self, config):
|
117 |
+
nn.Module.__init__(self)
|
118 |
+
self.self = ModifiedEsmSelfAttention(config)
|
119 |
+
self.output = EsmSelfOutput(config)
|
120 |
+
self.pruned_heads = set()
|
121 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
122 |
+
|
123 |
+
|
124 |
+
class ModifiedEsmLayer(EsmLayer):
|
125 |
+
def __init__(self, config):
|
126 |
+
nn.Module.__init__(self)
|
127 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
128 |
+
self.seq_len_dim = 1
|
129 |
+
self.attention = ModifiedEsmAttention(config)
|
130 |
+
self.is_decoder = config.is_decoder
|
131 |
+
self.add_cross_attention = config.add_cross_attention
|
132 |
+
if self.add_cross_attention:
|
133 |
+
if not self.is_decoder:
|
134 |
+
raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
|
135 |
+
self.crossattention = ModifiedEsmAttention(config)
|
136 |
+
self.intermediate = EsmIntermediate(config)
|
137 |
+
self.output = EsmOutput(config)
|
138 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
139 |
+
|
140 |
+
|
141 |
+
class ModifiedEsmEncoder(EsmEncoder):
|
142 |
+
def __init__(self, config):
|
143 |
+
nn.Module.__init__(self)
|
144 |
+
self.config = config
|
145 |
+
self.layer = nn.ModuleList([ModifiedEsmLayer(config) for _ in range(config.num_hidden_layers)])
|
146 |
+
self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
147 |
+
self.gradient_checkpointing = False
|
148 |
+
|
149 |
+
|
150 |
+
class ModifiedEsmModel(EsmModel):
|
151 |
+
def __init__(self, config, add_pooling_layer=True):
|
152 |
+
EsmPreTrainedModel.__init__(self, config)
|
153 |
+
self.config = config
|
154 |
+
|
155 |
+
self.embeddings = EsmEmbeddings(config)
|
156 |
+
self.encoder = ModifiedEsmEncoder(config)
|
157 |
+
|
158 |
+
self.pooler = EsmPooler(config) if add_pooling_layer else None
|
159 |
+
|
160 |
+
self.contact_head = EsmContactPredictionHead(
|
161 |
+
in_features=config.num_hidden_layers * config.num_attention_heads, bias=True
|
162 |
+
)
|
163 |
+
|
164 |
+
# Initialize weights and apply final processing
|
165 |
+
self.post_init()
|
166 |
+
|
167 |
+
def forward(
|
168 |
+
self,
|
169 |
+
input_ids: Optional[torch.Tensor] = None,
|
170 |
+
attention_mask: Optional[torch.Tensor] = None,
|
171 |
+
position_ids: Optional[torch.Tensor] = None,
|
172 |
+
head_mask: Optional[torch.Tensor] = None,
|
173 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
174 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
175 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
176 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
177 |
+
use_cache: Optional[bool] = None,
|
178 |
+
output_attentions: Optional[bool] = None,
|
179 |
+
output_hidden_states: Optional[bool] = None,
|
180 |
+
return_dict: Optional[bool] = None,
|
181 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
182 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
183 |
+
output_hidden_states = (
|
184 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
185 |
+
)
|
186 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
187 |
+
|
188 |
+
if self.config.is_decoder:
|
189 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
190 |
+
else:
|
191 |
+
use_cache = False
|
192 |
+
|
193 |
+
if input_ids is not None and inputs_embeds is not None:
|
194 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
195 |
+
elif input_ids is not None:
|
196 |
+
input_shape = input_ids.size()
|
197 |
+
elif inputs_embeds is not None:
|
198 |
+
input_shape = inputs_embeds.size()[:-1]
|
199 |
+
else:
|
200 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
201 |
+
|
202 |
+
batch_size, seq_length = input_shape
|
203 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
204 |
+
|
205 |
+
# past_key_values_length
|
206 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
207 |
+
|
208 |
+
if attention_mask is None:
|
209 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
210 |
+
|
211 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
212 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
213 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
|
214 |
+
|
215 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
216 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
217 |
+
if self.config.is_decoder and encoder_hidden_states is not None:
|
218 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
219 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
220 |
+
if encoder_attention_mask is None:
|
221 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
222 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
223 |
+
else:
|
224 |
+
# encoder_extended_attention_mask = None
|
225 |
+
encoder_extended_attention_mask = encoder_attention_mask
|
226 |
+
|
227 |
+
# Prepare head mask if needed
|
228 |
+
# 1.0 in head_mask indicate we keep the head
|
229 |
+
# attention_probs has shape bsz x n_heads x N x N
|
230 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
231 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
232 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
233 |
+
|
234 |
+
embedding_output = self.embeddings(
|
235 |
+
input_ids=input_ids,
|
236 |
+
position_ids=position_ids,
|
237 |
+
attention_mask=attention_mask,
|
238 |
+
inputs_embeds=inputs_embeds,
|
239 |
+
past_key_values_length=past_key_values_length,
|
240 |
+
)
|
241 |
+
encoder_outputs = self.encoder(
|
242 |
+
embedding_output,
|
243 |
+
attention_mask=extended_attention_mask,
|
244 |
+
head_mask=head_mask,
|
245 |
+
encoder_hidden_states=encoder_hidden_states,
|
246 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
247 |
+
past_key_values=past_key_values,
|
248 |
+
use_cache=use_cache,
|
249 |
+
output_attentions=output_attentions,
|
250 |
+
output_hidden_states=output_hidden_states,
|
251 |
+
return_dict=return_dict,
|
252 |
+
)
|
253 |
+
sequence_output = encoder_outputs[0]
|
254 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
255 |
+
|
256 |
+
if not return_dict:
|
257 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
258 |
+
|
259 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
260 |
+
last_hidden_state=sequence_output,
|
261 |
+
pooler_output=pooled_output,
|
262 |
+
past_key_values=encoder_outputs.past_key_values,
|
263 |
+
hidden_states=encoder_outputs.hidden_states,
|
264 |
+
attentions=encoder_outputs.attentions,
|
265 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
266 |
+
)
|
267 |
+
|
268 |
+
|
269 |
+
@register_model('mlm_esm')
|
270 |
+
class EsmForDPLM(EsmForMaskedLM):
|
271 |
+
def __init__(self, config, dropout=0.1):
|
272 |
+
tokenizer = AutoTokenizer.from_pretrained(config._name_or_path)
|
273 |
+
config.hidden_dropout_prob = dropout
|
274 |
+
|
275 |
+
EsmPreTrainedModel.__init__(self, config)
|
276 |
+
self.esm = ModifiedEsmModel(config, add_pooling_layer=False)
|
277 |
+
self.lm_head = EsmLMHead(config)
|
278 |
+
|
279 |
+
self.init_weights()
|
280 |
+
|
281 |
+
self.mask_id = tokenizer.mask_token_id
|
282 |
+
self.pad_id = tokenizer.pad_token_id
|
283 |
+
self.bos_id = tokenizer.cls_token_id
|
284 |
+
self.eos_id = tokenizer.eos_token_id
|
285 |
+
self.x_id = tokenizer._token_to_id['X']
|
286 |
+
|
287 |
+
self.contact_head = None
|
288 |
+
self.tokenizer = tokenizer
|
289 |
+
|
290 |
+
def forward(self,
|
291 |
+
input_ids,
|
292 |
+
attention_mask=None,
|
293 |
+
inputs_embeds=None,
|
294 |
+
decoder_input_ids=None,
|
295 |
+
decoder_attention_mask=None,
|
296 |
+
decoder_inputs_embeds=None,
|
297 |
+
labels=None,
|
298 |
+
output_attentions=None,
|
299 |
+
output_hidden_states=None,
|
300 |
+
return_dict=None,
|
301 |
+
encoder_hidden_states=None,
|
302 |
+
encoder_attention_mask=None,
|
303 |
+
):
|
304 |
+
attention_mask = input_ids.ne(self.pad_id)
|
305 |
+
outputs = self.esm(
|
306 |
+
input_ids,
|
307 |
+
attention_mask=attention_mask,
|
308 |
+
encoder_hidden_states=encoder_hidden_states,
|
309 |
+
encoder_attention_mask=encoder_attention_mask,
|
310 |
+
)
|
311 |
+
sequence_output = outputs[0]
|
312 |
+
logits = self.lm_head(sequence_output)
|
313 |
+
|
314 |
+
result = {
|
315 |
+
"logits": logits,
|
316 |
+
"last_hidden_state": sequence_output,
|
317 |
+
}
|
318 |
+
return result
|
319 |
+
|
320 |
+
|
321 |
+
presets = {
|
322 |
+
'DPLM-150': 'airkingbd/dplm_150m',
|
323 |
+
'DPLM-650': 'airkingbd/dplm_650m',
|
324 |
+
'DPLM-3B': 'airkingbd/dplm_3b',
|
325 |
+
}
|
326 |
+
|
327 |
+
|
328 |
+
class DPLMTokenizerWrapper(BaseSequenceTokenizer):
|
329 |
+
def __init__(self, tokenizer: EsmTokenizer):
|
330 |
+
super().__init__(tokenizer)
|
331 |
+
|
332 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
333 |
+
if isinstance(sequences, str):
|
334 |
+
sequences = [sequences]
|
335 |
+
kwargs.setdefault('return_tensors', 'pt')
|
336 |
+
kwargs.setdefault('padding', 'longest')
|
337 |
+
kwargs.setdefault('add_special_tokens', True)
|
338 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
339 |
+
return tokenized
|
340 |
+
|
341 |
+
|
342 |
+
class DPLMForEmbedding(nn.Module):
|
343 |
+
def __init__(self, model_path: str):
|
344 |
+
super().__init__()
|
345 |
+
self.dplm = EsmForDPLM.from_pretrained(model_path)
|
346 |
+
|
347 |
+
def forward(
|
348 |
+
self,
|
349 |
+
input_ids: torch.Tensor,
|
350 |
+
attention_mask: Optional[torch.Tensor] = None,
|
351 |
+
output_attentions: Optional[bool] = None,
|
352 |
+
) -> torch.Tensor:
|
353 |
+
if output_attentions:
|
354 |
+
out = self.dplm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
355 |
+
return out.last_hidden_state, out.attentions
|
356 |
+
else:
|
357 |
+
return self.dplm(input_ids, attention_mask=attention_mask)['last_hidden_state']
|
358 |
+
|
359 |
+
|
360 |
+
def get_dplm_tokenizer(preset: str):
|
361 |
+
return DPLMTokenizerWrapper(EsmTokenizer.from_pretrained('facebook/esm2_t6_8M_UR50D'))
|
362 |
+
|
363 |
+
|
364 |
+
def build_dplm_model(preset: str):
|
365 |
+
model = DPLMForEmbedding(presets[preset]).eval()
|
366 |
+
tokenizer = get_dplm_tokenizer(preset)
|
367 |
+
return model, tokenizer
|
368 |
+
|
369 |
+
|
370 |
+
def get_dplm_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
371 |
+
model_path = presets[preset]
|
372 |
+
if hybrid:
|
373 |
+
model = EsmForDPLM.from_pretrained(model_path).eval()
|
374 |
+
else:
|
375 |
+
if tokenwise:
|
376 |
+
model = EsmForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
377 |
+
else:
|
378 |
+
model = EsmForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
379 |
+
tokenizer = get_dplm_tokenizer(preset)
|
380 |
+
return model, tokenizer
|
381 |
+
|
382 |
+
|
383 |
+
if __name__ == '__main__':
|
384 |
+
# py -m src.protify.base_models.dplm
|
385 |
+
model, tokenizer = build_dplm_model('DPLM-150')
|
386 |
+
print(model)
|
387 |
+
print(tokenizer)
|
388 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/esm2.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
We use the FastESM2 implementation of ESM2, which is exactly equivalent but uses FlashAttention2.
|
3 |
+
"""
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
from typing import Optional, Union, List, Dict
|
7 |
+
from transformers import EsmTokenizer
|
8 |
+
|
9 |
+
from .FastPLMs.modeling_fastesm import FastEsmModel, FastEsmForSequenceClassification, FastEsmForTokenClassification
|
10 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
11 |
+
|
12 |
+
|
13 |
+
presets = {
|
14 |
+
'ESM2-8': 'Synthyra/ESM2-8M',
|
15 |
+
'ESM2-35': 'Synthyra/ESM2-35M',
|
16 |
+
'ESM2-150': 'Synthyra/ESM2-150M',
|
17 |
+
'ESM2-650': 'Synthyra/ESM2-650M',
|
18 |
+
'ESM2-3B': 'Synthyra/ESM2-3B',
|
19 |
+
'DSM-150': 'GleghornLab/ESM_diff_150',
|
20 |
+
'DSM-650': 'GleghornLab/ESM_diff_650',
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
class ESM2TokenizerWrapper(BaseSequenceTokenizer):
|
25 |
+
def __init__(self, tokenizer: EsmTokenizer):
|
26 |
+
super().__init__(tokenizer)
|
27 |
+
|
28 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
29 |
+
if isinstance(sequences, str):
|
30 |
+
sequences = [sequences]
|
31 |
+
kwargs.setdefault('return_tensors', 'pt')
|
32 |
+
kwargs.setdefault('padding', 'longest')
|
33 |
+
kwargs.setdefault('add_special_tokens', True)
|
34 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
35 |
+
return tokenized
|
36 |
+
|
37 |
+
|
38 |
+
class FastEsmForEmbedding(nn.Module):
|
39 |
+
def __init__(self, model_path: str):
|
40 |
+
super().__init__()
|
41 |
+
self.esm = FastEsmModel.from_pretrained(model_path)
|
42 |
+
|
43 |
+
def forward(
|
44 |
+
self,
|
45 |
+
input_ids: torch.Tensor,
|
46 |
+
attention_mask: Optional[torch.Tensor] = None,
|
47 |
+
output_attentions: Optional[bool] = None,
|
48 |
+
) -> torch.Tensor:
|
49 |
+
if output_attentions:
|
50 |
+
out = self.esm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
51 |
+
return out.last_hidden_state, out.attentions
|
52 |
+
else:
|
53 |
+
return self.esm(input_ids, attention_mask=attention_mask).last_hidden_state
|
54 |
+
|
55 |
+
|
56 |
+
def get_esm2_tokenizer(preset: str):
|
57 |
+
return ESM2TokenizerWrapper(EsmTokenizer.from_pretrained('facebook/esm2_t6_8M_UR50D'))
|
58 |
+
|
59 |
+
|
60 |
+
def build_esm2_model(preset: str):
|
61 |
+
model = FastEsmForEmbedding(presets[preset]).eval()
|
62 |
+
tokenizer = get_esm2_tokenizer(preset)
|
63 |
+
return model, tokenizer
|
64 |
+
|
65 |
+
|
66 |
+
def get_esm2_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
67 |
+
model_path = presets[preset]
|
68 |
+
if hybrid:
|
69 |
+
model = FastEsmModel.from_pretrained(model_path).eval()
|
70 |
+
else:
|
71 |
+
if tokenwise:
|
72 |
+
model = FastEsmForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
73 |
+
else:
|
74 |
+
model = FastEsmForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
75 |
+
tokenizer = get_esm2_tokenizer(preset)
|
76 |
+
return model, tokenizer
|
77 |
+
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
# py -m src.protify.base_models.esm2
|
81 |
+
model, tokenizer = build_esm2_model('ESM2-8')
|
82 |
+
print(model)
|
83 |
+
print(tokenizer)
|
84 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/esm3.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This is currently not supported
|
3 |
+
"""
|
data/src/protify/base_models/esmc.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
We use the ESM++ implementation of ESMC, which is exactly equivalent but offers batching.
|
3 |
+
"""
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
from typing import Optional, Union, List, Dict
|
7 |
+
|
8 |
+
from .FastPLMs.modeling_esm_plusplus import (
|
9 |
+
ESMplusplusModel,
|
10 |
+
ESMplusplusForSequenceClassification,
|
11 |
+
ESMplusplusForTokenClassification,
|
12 |
+
EsmSequenceTokenizer
|
13 |
+
)
|
14 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
15 |
+
|
16 |
+
|
17 |
+
presets = {
|
18 |
+
'ESMC-300': 'Synthyra/ESMplusplus_small',
|
19 |
+
'ESMC-600': 'Synthyra/ESMplusplus_large',
|
20 |
+
}
|
21 |
+
|
22 |
+
|
23 |
+
class ESMTokenizerWrapper(BaseSequenceTokenizer):
|
24 |
+
def __init__(self, tokenizer: EsmSequenceTokenizer):
|
25 |
+
super().__init__(tokenizer)
|
26 |
+
|
27 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
28 |
+
if isinstance(sequences, str):
|
29 |
+
sequences = [sequences]
|
30 |
+
kwargs.setdefault('return_tensors', 'pt')
|
31 |
+
kwargs.setdefault('padding', 'longest')
|
32 |
+
kwargs.setdefault('add_special_tokens', True)
|
33 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
34 |
+
return tokenized
|
35 |
+
|
36 |
+
|
37 |
+
class ESMplusplusForEmbedding(nn.Module):
|
38 |
+
def __init__(self, model_path: str):
|
39 |
+
super().__init__()
|
40 |
+
self.esm = ESMplusplusModel.from_pretrained(model_path)
|
41 |
+
|
42 |
+
def forward(
|
43 |
+
self,
|
44 |
+
input_ids: torch.Tensor,
|
45 |
+
attention_mask: Optional[torch.Tensor] = None,
|
46 |
+
output_attentions: Optional[bool] = None,
|
47 |
+
) -> torch.Tensor:
|
48 |
+
if output_attentions:
|
49 |
+
out = self.esm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
50 |
+
return out.last_hidden_state, out.attentions
|
51 |
+
else:
|
52 |
+
return self.esm(input_ids, attention_mask=attention_mask).last_hidden_state
|
53 |
+
|
54 |
+
|
55 |
+
def get_esmc_tokenizer(preset: str):
|
56 |
+
tokenizer = EsmSequenceTokenizer()
|
57 |
+
return ESMTokenizerWrapper(tokenizer)
|
58 |
+
|
59 |
+
|
60 |
+
def build_esmc_model(preset: str):
|
61 |
+
model = ESMplusplusForEmbedding(presets[preset]).eval()
|
62 |
+
tokenizer = get_esmc_tokenizer(preset)
|
63 |
+
return model, tokenizer
|
64 |
+
|
65 |
+
|
66 |
+
def get_esmc_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
67 |
+
model_path = presets[preset]
|
68 |
+
if hybrid:
|
69 |
+
model = ESMplusplusModel.from_pretrained(model_path).eval()
|
70 |
+
else:
|
71 |
+
if tokenwise:
|
72 |
+
model = ESMplusplusForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
73 |
+
else:
|
74 |
+
model = ESMplusplusForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
75 |
+
tokenizer = get_esmc_tokenizer(preset)
|
76 |
+
return model, tokenizer
|
77 |
+
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
# py -m src.protify.base_models.esmc
|
81 |
+
model, tokenizer = build_esmc_model('ESMC-300')
|
82 |
+
print(model)
|
83 |
+
print(tokenizer)
|
84 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/get_base_models.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
|
4 |
+
currently_supported_models = [
|
5 |
+
'ESM2-8',
|
6 |
+
'ESM2-35',
|
7 |
+
'ESM2-150',
|
8 |
+
'ESM2-650',
|
9 |
+
'ESM2-3B',
|
10 |
+
'Random',
|
11 |
+
'Random-Transformer',
|
12 |
+
'Random-ESM2-8',
|
13 |
+
'Random-ESM2-35', # same as Random-Transformer
|
14 |
+
'Random-ESM2-150',
|
15 |
+
'Random-ESM2-650',
|
16 |
+
'ESMC-300',
|
17 |
+
'ESMC-600',
|
18 |
+
'ESM2-diff-150',
|
19 |
+
'ESM2-diffAV-150',
|
20 |
+
'ProtBert',
|
21 |
+
'ProtBert-BFD',
|
22 |
+
'ProtT5',
|
23 |
+
'ProtT5-XL-UniRef50-full-prec',
|
24 |
+
'ProtT5-XXL-UniRef50',
|
25 |
+
'ProtT5-XL-BFD',
|
26 |
+
'ProtT5-XXL-BFD',
|
27 |
+
'ANKH-Base',
|
28 |
+
'ANKH-Large',
|
29 |
+
'ANKH2-Large',
|
30 |
+
'GLM2-150',
|
31 |
+
'GLM2-650',
|
32 |
+
'GLM2-GAIA',
|
33 |
+
'DPLM-150',
|
34 |
+
'DPLM-650',
|
35 |
+
'DPLM-3B',
|
36 |
+
'DSM-150',
|
37 |
+
'DSM-650',
|
38 |
+
]
|
39 |
+
|
40 |
+
standard_models = [
|
41 |
+
'ESM2-8',
|
42 |
+
'ESM2-35',
|
43 |
+
'ESM2-150',
|
44 |
+
'ESM2-650',
|
45 |
+
'ESM2-3B',
|
46 |
+
'ESMC-300',
|
47 |
+
'ESMC-600',
|
48 |
+
'ProtBert',
|
49 |
+
'ProtT5',
|
50 |
+
'GLM2-150',
|
51 |
+
'GLM2-650',
|
52 |
+
'ANKH-Base',
|
53 |
+
'ANKH-Large',
|
54 |
+
'DPLM-150',
|
55 |
+
'DPLM-650',
|
56 |
+
'DSM-150',
|
57 |
+
'DSM-650',
|
58 |
+
'Random',
|
59 |
+
'Random-Transformer',
|
60 |
+
]
|
61 |
+
|
62 |
+
experimental_models = []
|
63 |
+
|
64 |
+
|
65 |
+
@dataclass
|
66 |
+
class BaseModelArguments:
|
67 |
+
def __init__(self, model_names: list[str] = None, **kwargs):
|
68 |
+
if model_names[0] == 'standard':
|
69 |
+
self.model_names = standard_models
|
70 |
+
elif 'exp' in model_names[0].lower():
|
71 |
+
self.model_names = experimental_models
|
72 |
+
else:
|
73 |
+
self.model_names = model_names
|
74 |
+
|
75 |
+
|
76 |
+
def get_base_model(model_name: str):
|
77 |
+
if 'random' in model_name.lower():
|
78 |
+
from .random import build_random_model
|
79 |
+
return build_random_model(model_name)
|
80 |
+
elif 'esm2' in model_name.lower() or 'dsm' in model_name.lower():
|
81 |
+
from .esm2 import build_esm2_model
|
82 |
+
return build_esm2_model(model_name)
|
83 |
+
elif 'esmc' in model_name.lower():
|
84 |
+
from .esmc import build_esmc_model
|
85 |
+
return build_esmc_model(model_name)
|
86 |
+
elif 'protbert' in model_name.lower():
|
87 |
+
from .protbert import build_protbert_model
|
88 |
+
return build_protbert_model(model_name)
|
89 |
+
elif 'prott5' in model_name.lower():
|
90 |
+
from .prott5 import build_prott5_model
|
91 |
+
return build_prott5_model(model_name)
|
92 |
+
elif 'ankh' in model_name.lower():
|
93 |
+
from .ankh import build_ankh_model
|
94 |
+
return build_ankh_model(model_name)
|
95 |
+
elif 'glm' in model_name.lower():
|
96 |
+
from .glm import build_glm2_model
|
97 |
+
return build_glm2_model(model_name)
|
98 |
+
elif 'dplm' in model_name.lower():
|
99 |
+
from .dplm import build_dplm_model
|
100 |
+
return build_dplm_model(model_name)
|
101 |
+
else:
|
102 |
+
raise ValueError(f"Model {model_name} not supported")
|
103 |
+
|
104 |
+
|
105 |
+
def get_base_model_for_training(model_name: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
106 |
+
if 'esm2' in model_name.lower() or 'dsm' in model_name.lower():
|
107 |
+
from .esm2 import get_esm2_for_training
|
108 |
+
return get_esm2_for_training(model_name, tokenwise, num_labels, hybrid)
|
109 |
+
elif 'esmc' in model_name.lower():
|
110 |
+
from .esmc import get_esmc_for_training
|
111 |
+
return get_esmc_for_training(model_name, tokenwise, num_labels, hybrid)
|
112 |
+
elif 'protbert' in model_name.lower():
|
113 |
+
from .protbert import get_protbert_for_training
|
114 |
+
return get_protbert_for_training(model_name, tokenwise, num_labels, hybrid)
|
115 |
+
elif 'prott5' in model_name.lower():
|
116 |
+
from .prott5 import get_prott5_for_training
|
117 |
+
return get_prott5_for_training(model_name, tokenwise, num_labels, hybrid)
|
118 |
+
elif 'ankh' in model_name.lower():
|
119 |
+
from .ankh import get_ankh_for_training
|
120 |
+
return get_ankh_for_training(model_name, tokenwise, num_labels, hybrid)
|
121 |
+
elif 'glm' in model_name.lower():
|
122 |
+
from .glm import get_glm2_for_training
|
123 |
+
return get_glm2_for_training(model_name, tokenwise, num_labels, hybrid)
|
124 |
+
elif 'dplm' in model_name.lower():
|
125 |
+
from .dplm import get_dplm_for_training
|
126 |
+
return get_dplm_for_training(model_name, tokenwise, num_labels, hybrid)
|
127 |
+
else:
|
128 |
+
raise ValueError(f"Model {model_name} not supported")
|
129 |
+
|
130 |
+
|
131 |
+
def get_tokenizer(model_name: str):
|
132 |
+
if 'esm2' in model_name.lower() or 'random' in model_name.lower() or 'dsm' in model_name.lower():
|
133 |
+
from .esm2 import get_esm2_tokenizer
|
134 |
+
return get_esm2_tokenizer(model_name)
|
135 |
+
elif 'esmc' in model_name.lower():
|
136 |
+
from .esmc import get_esmc_tokenizer
|
137 |
+
return get_esmc_tokenizer(model_name)
|
138 |
+
elif 'protbert' in model_name.lower():
|
139 |
+
from .protbert import get_protbert_tokenizer
|
140 |
+
return get_protbert_tokenizer(model_name)
|
141 |
+
elif 'prott5' in model_name.lower():
|
142 |
+
from .prott5 import get_prott5_tokenizer
|
143 |
+
return get_prott5_tokenizer(model_name)
|
144 |
+
elif 'ankh' in model_name.lower():
|
145 |
+
from .ankh import get_ankh_tokenizer
|
146 |
+
return get_ankh_tokenizer(model_name)
|
147 |
+
elif 'glm' in model_name.lower():
|
148 |
+
from .glm import get_glm2_tokenizer
|
149 |
+
return get_glm2_tokenizer(model_name)
|
150 |
+
elif 'dplm' in model_name.lower():
|
151 |
+
from .dplm import get_dplm_tokenizer
|
152 |
+
return get_dplm_tokenizer(model_name)
|
153 |
+
else:
|
154 |
+
raise ValueError(f"Model {model_name} not supported")
|
155 |
+
|
156 |
+
|
157 |
+
if __name__ == '__main__':
|
158 |
+
# py -m src.protify.base_models.get_base_models
|
159 |
+
import sys
|
160 |
+
import argparse
|
161 |
+
|
162 |
+
parser = argparse.ArgumentParser(description='Download and list supported models')
|
163 |
+
parser.add_argument('--download', action='store_true', help='Download all standard models')
|
164 |
+
parser.add_argument('--list', action='store_true', help='List all supported models with descriptions')
|
165 |
+
args = parser.parse_args()
|
166 |
+
|
167 |
+
if len(sys.argv) == 1:
|
168 |
+
parser.print_help()
|
169 |
+
sys.exit(1)
|
170 |
+
|
171 |
+
if args.list:
|
172 |
+
try:
|
173 |
+
from resource_info import model_descriptions
|
174 |
+
print("\n=== Currently Supported Models ===\n")
|
175 |
+
|
176 |
+
max_name_len = max(len(name) for name in currently_supported_models)
|
177 |
+
max_type_len = max(len(model_descriptions.get(name, {}).get('type', 'Unknown')) for name in currently_supported_models if name in model_descriptions)
|
178 |
+
max_size_len = max(len(model_descriptions.get(name, {}).get('size', 'Unknown')) for name in currently_supported_models if name in model_descriptions)
|
179 |
+
|
180 |
+
# Print header
|
181 |
+
print(f"{'Model':<{max_name_len+2}}{'Type':<{max_type_len+2}}{'Size':<{max_size_len+2}}Description")
|
182 |
+
print("-" * (max_name_len + max_type_len + max_size_len + 50))
|
183 |
+
|
184 |
+
for model_name in currently_supported_models:
|
185 |
+
if model_name in model_descriptions:
|
186 |
+
model_info = model_descriptions[model_name]
|
187 |
+
print(f"{model_name:<{max_name_len+2}}{model_info.get('type', 'Unknown'):<{max_type_len+2}}{model_info.get('size', 'Unknown'):<{max_size_len+2}}{model_info.get('description', 'No description available')}")
|
188 |
+
else:
|
189 |
+
print(f"{model_name:<{max_name_len+2}}{'Unknown':<{max_type_len+2}}{'Unknown':<{max_size_len+2}}No description available")
|
190 |
+
|
191 |
+
print("\n=== Standard Models ===\n")
|
192 |
+
for model_name in standard_models:
|
193 |
+
print(f"- {model_name}")
|
194 |
+
|
195 |
+
except ImportError:
|
196 |
+
print("Model descriptions file not found. Only listing model names.")
|
197 |
+
print("\n=== Currently Supported Models ===\n")
|
198 |
+
for model_name in currently_supported_models:
|
199 |
+
print(f"- {model_name}")
|
200 |
+
|
201 |
+
print("\n=== Standard Models ===\n")
|
202 |
+
for model_name in standard_models:
|
203 |
+
print(f"- {model_name}")
|
204 |
+
|
205 |
+
if args.download:
|
206 |
+
### This will download all standard models
|
207 |
+
from torchinfo import summary
|
208 |
+
from ..utils import clear_screen
|
209 |
+
download_args = BaseModelArguments(model_names=['standard'])
|
210 |
+
for model_name in download_args.model_names:
|
211 |
+
model, tokenizer = get_base_model(model_name)
|
212 |
+
print(f'Downloaded {model_name}')
|
213 |
+
tokenized = tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICLLLICIIVMLL', return_tensors='pt').input_ids
|
214 |
+
summary(model, input_data=tokenized)
|
215 |
+
clear_screen()
|
data/src/protify/base_models/glm.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from typing import Optional, Tuple, Union, List, Dict
|
4 |
+
from transformers import AutoTokenizer, AutoModel, AutoModelForTokenClassification, AutoModelForSequenceClassification
|
5 |
+
|
6 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
7 |
+
|
8 |
+
|
9 |
+
presets = {
|
10 |
+
'GLM2-150': 'tattabio/gLM2_150M',
|
11 |
+
'GLM2-650': 'tattabio/gLM2_650M',
|
12 |
+
'GLM2-GAIA': 'tattabio/gLM2_650M_embed'
|
13 |
+
}
|
14 |
+
|
15 |
+
|
16 |
+
class GLMTokenizerWrapper(BaseSequenceTokenizer):
|
17 |
+
def __init__(self, tokenizer: AutoTokenizer):
|
18 |
+
super().__init__(tokenizer)
|
19 |
+
self.plus_token = "<+>"
|
20 |
+
if self.plus_token not in self.tokenizer.vocab:
|
21 |
+
print(f"Warning: Token '{self.plus_token}' not found in GLM tokenizer vocabulary.")
|
22 |
+
|
23 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
24 |
+
if isinstance(sequences, str):
|
25 |
+
sequences = [sequences]
|
26 |
+
kwargs.setdefault('return_tensors', 'pt')
|
27 |
+
kwargs.setdefault('padding', 'longest')
|
28 |
+
kwargs.setdefault('add_special_tokens', True)
|
29 |
+
modified_sequences = [self.plus_token + seq for seq in sequences]
|
30 |
+
tokenized = self.tokenizer(modified_sequences, **kwargs)
|
31 |
+
return tokenized
|
32 |
+
|
33 |
+
|
34 |
+
class gLM2ForEmbedding(nn.Module):
|
35 |
+
def __init__(self, model_path: str):
|
36 |
+
super().__init__()
|
37 |
+
self.glm2 = AutoModel.from_pretrained(model_path, trust_remote_code=True)
|
38 |
+
|
39 |
+
def forward(
|
40 |
+
self,
|
41 |
+
input_ids: torch.Tensor,
|
42 |
+
attention_mask: Optional[torch.Tensor] = None,
|
43 |
+
output_attentions: Optional[bool] = None,
|
44 |
+
output_hidden_states: Optional[bool] = False,
|
45 |
+
) -> torch.Tensor:
|
46 |
+
assert not output_attentions or not output_hidden_states, (
|
47 |
+
"output_attentions=True and output_hidden_states=True are not supported by gLM2ForEmbedding."
|
48 |
+
)
|
49 |
+
|
50 |
+
out = self.glm2(
|
51 |
+
input_ids=input_ids,
|
52 |
+
attention_mask=attention_mask,
|
53 |
+
)
|
54 |
+
return out.last_hidden_state
|
55 |
+
|
56 |
+
|
57 |
+
def get_glm2_tokenizer(preset: str):
|
58 |
+
return GLMTokenizerWrapper(AutoTokenizer.from_pretrained(presets[preset], trust_remote_code=True))
|
59 |
+
|
60 |
+
|
61 |
+
def build_glm2_model(preset: str) -> Tuple[gLM2ForEmbedding, AutoTokenizer]:
|
62 |
+
model_path = presets[preset]
|
63 |
+
model = gLM2ForEmbedding(model_path).eval()
|
64 |
+
tokenizer = get_glm2_tokenizer(preset)
|
65 |
+
return model, tokenizer
|
66 |
+
|
67 |
+
|
68 |
+
def get_glm2_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
69 |
+
model_path = presets[preset]
|
70 |
+
if hybrid:
|
71 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).eval()
|
72 |
+
else:
|
73 |
+
if tokenwise:
|
74 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
75 |
+
model_path, num_labels=num_labels, trust_remote_code=True
|
76 |
+
).eval()
|
77 |
+
else:
|
78 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
79 |
+
model_path, num_labels=num_labels, trust_remote_code=True
|
80 |
+
).eval()
|
81 |
+
tokenizer = get_glm2_tokenizer(preset)
|
82 |
+
return model, tokenizer
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == '__main__':
|
86 |
+
# py -m src.protify.base_models.glm
|
87 |
+
model, tokenizer = build_glm2_model('GLM2-650')
|
88 |
+
print(model)
|
89 |
+
print(tokenizer)
|
90 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/protbert.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import re
|
4 |
+
from typing import Optional, Union, List, Dict
|
5 |
+
from transformers import BertModel, BertTokenizer, BertForSequenceClassification, BertForTokenClassification
|
6 |
+
|
7 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
8 |
+
|
9 |
+
|
10 |
+
presets = {
|
11 |
+
'ProtBert': 'Rostlab/prot_bert',
|
12 |
+
'ProtBert-BFD': 'Rostlab/prot_bert_bfd',
|
13 |
+
}
|
14 |
+
|
15 |
+
|
16 |
+
class BERTTokenizerWrapper(BaseSequenceTokenizer):
|
17 |
+
def __init__(self, tokenizer: BertTokenizer):
|
18 |
+
super().__init__(tokenizer)
|
19 |
+
|
20 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
21 |
+
if isinstance(sequences, str):
|
22 |
+
sequences = [sequences]
|
23 |
+
kwargs.setdefault('return_tensors', 'pt')
|
24 |
+
kwargs.setdefault('padding', 'longest')
|
25 |
+
kwargs.setdefault('add_special_tokens', True)
|
26 |
+
sequences = [re.sub(r"[UZOB]", "X", seq) for seq in sequences]
|
27 |
+
sequences = [' '.join(seq) for seq in sequences]
|
28 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
29 |
+
return tokenized
|
30 |
+
|
31 |
+
|
32 |
+
class ProtBertForEmbedding(nn.Module):
|
33 |
+
def __init__(self, model_path: str):
|
34 |
+
super().__init__()
|
35 |
+
self.plm = BertModel.from_pretrained(model_path, attn_implementation="sdpa")
|
36 |
+
|
37 |
+
def forward(
|
38 |
+
self,
|
39 |
+
input_ids: torch.Tensor,
|
40 |
+
attention_mask: Optional[torch.Tensor] = None,
|
41 |
+
output_attentions: Optional[bool] = None,
|
42 |
+
) -> torch.Tensor:
|
43 |
+
if output_attentions:
|
44 |
+
out = self.plm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
45 |
+
return out.last_hidden_state, out.attentions
|
46 |
+
else:
|
47 |
+
return self.plm(input_ids, attention_mask=attention_mask).last_hidden_state
|
48 |
+
|
49 |
+
|
50 |
+
def get_protbert_tokenizer(preset: str):
|
51 |
+
return BERTTokenizerWrapper(BertTokenizer.from_pretrained('Rostlab/prot_bert'))
|
52 |
+
|
53 |
+
|
54 |
+
def build_protbert_model(preset: str):
|
55 |
+
model_path = presets[preset]
|
56 |
+
model = ProtBertForEmbedding(model_path).eval()
|
57 |
+
tokenizer = get_protbert_tokenizer(preset)
|
58 |
+
return model, tokenizer
|
59 |
+
|
60 |
+
|
61 |
+
def get_protbert_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
62 |
+
model_path = presets[preset]
|
63 |
+
if hybrid:
|
64 |
+
model = BertModel.from_pretrained(model_path).eval()
|
65 |
+
else:
|
66 |
+
if tokenwise:
|
67 |
+
model = BertForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
68 |
+
else:
|
69 |
+
model = BertForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
70 |
+
tokenizer = get_protbert_tokenizer(preset)
|
71 |
+
return model, tokenizer
|
72 |
+
|
73 |
+
|
74 |
+
if __name__ == '__main__':
|
75 |
+
# py -m src.protify.base_models.protbert
|
76 |
+
model, tokenizer = build_protbert_model('ProtBert')
|
77 |
+
print(model)
|
78 |
+
print(tokenizer)
|
79 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/proteinvec.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This is currently not supported
|
3 |
+
"""
|
data/src/protify/base_models/prott5.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import re
|
4 |
+
from typing import Optional, Union, List, Dict
|
5 |
+
from transformers import T5EncoderModel, T5Tokenizer
|
6 |
+
|
7 |
+
from .t5 import T5ForSequenceClassification, T5ForTokenClassification
|
8 |
+
from .base_tokenizer import BaseSequenceTokenizer
|
9 |
+
|
10 |
+
|
11 |
+
presets = {
|
12 |
+
'ProtT5': 'Rostlab/prot_t5_xl_half_uniref50-enc',
|
13 |
+
'ProtT5-XL-UniRef50-full-prec': 'Rostlab/prot_t5_xl_uniref50',
|
14 |
+
'ProtT5-XXL-UniRef50': 'Rostlab/prot_t5_xxl_uniref50',
|
15 |
+
'ProtT5-XL-BFD': 'Rostlab/prot_t5_xl_bfd',
|
16 |
+
'ProtT5-XXL-BFD': 'Rostlab/prot_t5_xxl_bfd',
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
class T5TokenizerWrapper(BaseSequenceTokenizer):
|
21 |
+
def __init__(self, tokenizer: T5Tokenizer):
|
22 |
+
super().__init__(tokenizer)
|
23 |
+
|
24 |
+
def __call__(self, sequences: Union[str, List[str]], **kwargs) -> Dict[str, torch.Tensor]:
|
25 |
+
if isinstance(sequences, str):
|
26 |
+
sequences = [sequences]
|
27 |
+
kwargs.setdefault('return_tensors', 'pt')
|
28 |
+
kwargs.setdefault('padding', 'longest')
|
29 |
+
kwargs.setdefault('add_special_tokens', True)
|
30 |
+
sequences = [re.sub(r"[UZOB]", "X", seq) for seq in sequences]
|
31 |
+
sequences = [' '.join(seq) for seq in sequences]
|
32 |
+
tokenized = self.tokenizer(sequences, **kwargs)
|
33 |
+
return tokenized
|
34 |
+
|
35 |
+
|
36 |
+
class Prott5ForEmbedding(nn.Module):
|
37 |
+
def __init__(self, model_path: str):
|
38 |
+
super().__init__()
|
39 |
+
self.plm = T5EncoderModel.from_pretrained(model_path)
|
40 |
+
|
41 |
+
def forward(
|
42 |
+
self,
|
43 |
+
input_ids: torch.Tensor,
|
44 |
+
attention_mask: Optional[torch.Tensor] = None,
|
45 |
+
output_attentions: Optional[bool] = None,
|
46 |
+
) -> torch.Tensor:
|
47 |
+
if output_attentions:
|
48 |
+
out = self.plm(input_ids, attention_mask=attention_mask, output_attentions=output_attentions)
|
49 |
+
return out.last_hidden_state, out.attentions
|
50 |
+
else:
|
51 |
+
return self.plm(input_ids, attention_mask=attention_mask).last_hidden_state
|
52 |
+
|
53 |
+
|
54 |
+
def get_prott5_tokenizer(preset: str):
|
55 |
+
return T5TokenizerWrapper(T5Tokenizer.from_pretrained(presets[preset]))
|
56 |
+
|
57 |
+
|
58 |
+
def build_prott5_model(preset: str):
|
59 |
+
model_path = presets[preset]
|
60 |
+
model = Prott5ForEmbedding(model_path).eval()
|
61 |
+
tokenizer = get_prott5_tokenizer(preset)
|
62 |
+
return model, tokenizer
|
63 |
+
|
64 |
+
|
65 |
+
def get_prott5_for_training(preset: str, tokenwise: bool = False, num_labels: int = None, hybrid: bool = False):
|
66 |
+
model_path = presets[preset]
|
67 |
+
if hybrid:
|
68 |
+
model = T5EncoderModel.from_pretrained(model_path).eval()
|
69 |
+
else:
|
70 |
+
if tokenwise:
|
71 |
+
model = T5ForTokenClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
72 |
+
else:
|
73 |
+
model = T5ForSequenceClassification.from_pretrained(model_path, num_labels=num_labels).eval()
|
74 |
+
tokenizer = get_prott5_tokenizer(preset)
|
75 |
+
return model, tokenizer
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == '__main__':
|
79 |
+
# py -m src.protify.base_models.prott5
|
80 |
+
model, tokenizer = build_prott5_model('ProtT5')
|
81 |
+
print(model)
|
82 |
+
print(tokenizer)
|
83 |
+
print(tokenizer('MEKVQYLTRSAIRRASTIEMPQQARQKLQNLFINFCLILICBBOLLICIIVMLL'))
|
data/src/protify/base_models/random.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from typing import Optional
|
4 |
+
from transformers import EsmTokenizer, EsmConfig
|
5 |
+
from model_components.transformer import TransformerForMaskedLM, TransformerConfig
|
6 |
+
|
7 |
+
|
8 |
+
presets = {
|
9 |
+
'Random': 'random',
|
10 |
+
'Random-Transformer': 'facebook/esm2_t12_35M_UR50D', # default is 35M version
|
11 |
+
'Random-ESM2-8': 'facebook/esm2_t6_8M_UR50D',
|
12 |
+
'Random-ESM2-35': 'facebook/esm2_t12_35M_UR50D',
|
13 |
+
'Random-ESM2-150': 'facebook/esm2_t30_150M_UR50D',
|
14 |
+
'Random-ESM2-650': 'facebook/esm2_t36_650M_UR50D',
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
class RandomModel(nn.Module):
|
19 |
+
def __init__(self, config: EsmConfig):
|
20 |
+
super().__init__()
|
21 |
+
self.config = config
|
22 |
+
self.hidden_size = config.hidden_size
|
23 |
+
self.holder_param = torch.nn.Parameter(torch.randn(1, 1, self.hidden_size))
|
24 |
+
|
25 |
+
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
26 |
+
device = self.holder_param.device
|
27 |
+
return torch.randn(input_ids.shape[0], input_ids.shape[1], self.hidden_size, device=device)
|
28 |
+
|
29 |
+
|
30 |
+
class RandomTransformer(nn.Module):
|
31 |
+
def __init__(self, config: TransformerConfig):
|
32 |
+
super().__init__()
|
33 |
+
self.config = config
|
34 |
+
self.transformer = TransformerForMaskedLM(config)
|
35 |
+
|
36 |
+
def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False) -> torch.Tensor:
|
37 |
+
if output_attentions:
|
38 |
+
out = self.transformer(input_ids, attention_mask, output_attentions=output_attentions)
|
39 |
+
return out.last_hidden_state, out.attentions
|
40 |
+
else:
|
41 |
+
return self.transformer(input_ids, attention_mask).last_hidden_state
|
42 |
+
|
43 |
+
|
44 |
+
def build_random_model(preset: str):
|
45 |
+
tokenizer = EsmTokenizer.from_pretrained('facebook/esm2_t12_35M_UR50D')
|
46 |
+
if preset == 'Random':
|
47 |
+
model = RandomModel(EsmConfig.from_pretrained('facebook/esm2_t12_35M_UR50D'))
|
48 |
+
else:
|
49 |
+
esm_config = EsmConfig.from_pretrained(presets[preset])
|
50 |
+
config = TransformerConfig()
|
51 |
+
config.hidden_size = esm_config.hidden_size
|
52 |
+
config.n_heads = esm_config.num_attention_heads
|
53 |
+
config.n_layers = esm_config.num_hidden_layers
|
54 |
+
config.vocab_size = esm_config.vocab_size
|
55 |
+
model = RandomTransformer(config).eval()
|
56 |
+
return model, tokenizer
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == '__main__':
|
60 |
+
model, tokenizer = build_random_model('Random-Transformer')
|
61 |
+
print(model)
|
62 |
+
print(tokenizer)
|
data/src/protify/base_models/t5.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
|
4 |
+
from typing import Optional, Tuple, Union
|
5 |
+
from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput
|
6 |
+
from transformers import T5EncoderModel, T5Config
|
7 |
+
|
8 |
+
|
9 |
+
class T5ClassificationHead(nn.Module):
|
10 |
+
"""Head for sentence-level classification tasks."""
|
11 |
+
|
12 |
+
def __init__(self, config: T5Config):
|
13 |
+
super().__init__()
|
14 |
+
self.dense = nn.Linear(config.d_model, config.d_model)
|
15 |
+
self.dropout = nn.Dropout(p=config.classifier_dropout)
|
16 |
+
self.out_proj = nn.Linear(config.d_model, config.num_labels)
|
17 |
+
|
18 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
19 |
+
hidden_states = self.dropout(hidden_states)
|
20 |
+
hidden_states = self.dense(hidden_states)
|
21 |
+
hidden_states = torch.tanh(hidden_states)
|
22 |
+
hidden_states = self.dropout(hidden_states)
|
23 |
+
hidden_states = self.out_proj(hidden_states)
|
24 |
+
return hidden_states
|
25 |
+
|
26 |
+
|
27 |
+
class T5ForSequenceClassification(T5EncoderModel):
|
28 |
+
config_class = T5Config
|
29 |
+
def __init__(self, config: T5Config):
|
30 |
+
super().__init__(config)
|
31 |
+
self.classifier = T5ClassificationHead(config)
|
32 |
+
|
33 |
+
def forward(
|
34 |
+
self,
|
35 |
+
input_ids: Optional[torch.LongTensor] = None,
|
36 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
37 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
38 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
39 |
+
output_attentions: Optional[bool] = None,
|
40 |
+
output_hidden_states: Optional[bool] = None,
|
41 |
+
return_dict: Optional[bool] = None,
|
42 |
+
) -> SequenceClassifierOutput:
|
43 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
44 |
+
|
45 |
+
encoder_outputs = self.encoder(
|
46 |
+
input_ids=input_ids,
|
47 |
+
attention_mask=attention_mask,
|
48 |
+
inputs_embeds=inputs_embeds,
|
49 |
+
head_mask=head_mask,
|
50 |
+
output_attentions=output_attentions,
|
51 |
+
output_hidden_states=output_hidden_states,
|
52 |
+
return_dict=return_dict,
|
53 |
+
)
|
54 |
+
|
55 |
+
sequence_output = encoder_outputs[0]
|
56 |
+
cls_token = sequence_output[:, 0, :]
|
57 |
+
|
58 |
+
logits = self.classifier(cls_token)
|
59 |
+
|
60 |
+
loss = None
|
61 |
+
if labels is not None:
|
62 |
+
labels = labels.to(logits.device)
|
63 |
+
if self.config.problem_type is None:
|
64 |
+
if self.config.num_labels == 1:
|
65 |
+
self.config.problem_type = "regression"
|
66 |
+
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
67 |
+
self.config.problem_type = "single_label_classification"
|
68 |
+
else:
|
69 |
+
self.config.problem_type = "multi_label_classification"
|
70 |
+
|
71 |
+
if self.config.problem_type == "regression":
|
72 |
+
loss_fct = MSELoss()
|
73 |
+
if self.config.num_labels == 1:
|
74 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
75 |
+
else:
|
76 |
+
loss = loss_fct(logits, labels)
|
77 |
+
elif self.config.problem_type == "single_label_classification":
|
78 |
+
loss_fct = CrossEntropyLoss()
|
79 |
+
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
|
80 |
+
elif self.config.problem_type == "multi_label_classification":
|
81 |
+
loss_fct = BCEWithLogitsLoss()
|
82 |
+
loss = loss_fct(logits, labels)
|
83 |
+
|
84 |
+
return SequenceClassifierOutput(
|
85 |
+
loss=loss,
|
86 |
+
logits=logits
|
87 |
+
)
|
88 |
+
|
89 |
+
|
90 |
+
class T5ForTokenClassification(T5EncoderModel):
|
91 |
+
config_class = T5Config
|
92 |
+
def __init__(self, config: T5Config):
|
93 |
+
super().__init__(config)
|
94 |
+
self.classifier = T5ClassificationHead(config)
|
95 |
+
|
96 |
+
def forward(
|
97 |
+
self,
|
98 |
+
input_ids: Optional[torch.LongTensor] = None,
|
99 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
100 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
101 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
102 |
+
output_attentions: Optional[bool] = None,
|
103 |
+
output_hidden_states: Optional[bool] = None,
|
104 |
+
return_dict: Optional[bool] = None,
|
105 |
+
) -> SequenceClassifierOutput:
|
106 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
107 |
+
|
108 |
+
encoder_outputs = self.encoder(
|
109 |
+
input_ids=input_ids,
|
110 |
+
attention_mask=attention_mask,
|
111 |
+
inputs_embeds=inputs_embeds,
|
112 |
+
head_mask=head_mask,
|
113 |
+
output_attentions=output_attentions,
|
114 |
+
output_hidden_states=output_hidden_states,
|
115 |
+
return_dict=return_dict,
|
116 |
+
)
|
117 |
+
|
118 |
+
sequence_output = encoder_outputs[0]
|
119 |
+
|
120 |
+
logits = self.classifier(sequence_output)
|
121 |
+
|
122 |
+
loss = None
|
123 |
+
if labels is not None:
|
124 |
+
labels = labels.to(logits.device)
|
125 |
+
if self.config.problem_type is None:
|
126 |
+
if self.config.num_labels == 1:
|
127 |
+
self.config.problem_type = "regression"
|
128 |
+
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
129 |
+
self.config.problem_type = "single_label_classification"
|
130 |
+
else:
|
131 |
+
self.config.problem_type = "multi_label_classification"
|
132 |
+
|
133 |
+
if self.config.problem_type == "regression":
|
134 |
+
loss_fct = MSELoss()
|
135 |
+
if self.config.num_labels == 1:
|
136 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
137 |
+
else:
|
138 |
+
loss = loss_fct(logits, labels)
|
139 |
+
elif self.config.problem_type == "single_label_classification":
|
140 |
+
loss_fct = CrossEntropyLoss()
|
141 |
+
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
|
142 |
+
elif self.config.problem_type == "multi_label_classification":
|
143 |
+
loss_fct = BCEWithLogitsLoss()
|
144 |
+
loss = loss_fct(logits, labels)
|
145 |
+
|
146 |
+
return TokenClassifierOutput(
|
147 |
+
loss=loss,
|
148 |
+
logits=logits,
|
149 |
+
)
|
150 |
+
|
data/src/protify/base_models/utils.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
from peft import LoraConfig, LoraModel
|
3 |
+
|
4 |
+
|
5 |
+
def wrap_lora(module: nn.Module, r: int, lora_alpha: float, lora_dropout: float) -> nn.Module:
|
6 |
+
# these modules handle ESM++ and ESM2 attention types, as well as any additional transformer blocks from Syndev
|
7 |
+
target_modules=["layernorm_qkv.1", "out_proj", "query", "key", "value", "dense"]
|
8 |
+
lora_config = LoraConfig(
|
9 |
+
r=r,
|
10 |
+
lora_alpha=lora_alpha,
|
11 |
+
lora_dropout=lora_dropout,
|
12 |
+
bias="none",
|
13 |
+
target_modules=target_modules,
|
14 |
+
)
|
15 |
+
module = LoraModel(module, lora_config, 'default')
|
16 |
+
for name, param in module.named_parameters():
|
17 |
+
if 'classifier' in name.lower():
|
18 |
+
param.requires_grad = True
|
19 |
+
return module
|
data/src/protify/data/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .supported_datasets import (
|
2 |
+
supported_datasets,
|
3 |
+
internal_datasets,
|
4 |
+
possible_with_vector_reps,
|
5 |
+
standard_data_benchmark,
|
6 |
+
testing,
|
7 |
+
residue_wise_problems
|
8 |
+
)
|
9 |
+
|
10 |
+
try:
|
11 |
+
from .dataset_descriptions import dataset_descriptions
|
12 |
+
except ImportError:
|
13 |
+
dataset_descriptions = {}
|
14 |
+
|
15 |
+
from .dataset_utils import list_supported_datasets, get_dataset_info
|
data/src/protify/data/data_collators.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from typing import List, Tuple, Dict, Union
|
3 |
+
from .utils import pad_and_concatenate_dimer
|
4 |
+
|
5 |
+
|
6 |
+
def _pad_matrix_embeds(embeds: List[torch.Tensor], max_len: int) -> Tuple[torch.Tensor, torch.Tensor]:
|
7 |
+
# pad and concatenate, return padded embeds and mask
|
8 |
+
padded_embeds, attention_masks = [], []
|
9 |
+
for embed in embeds:
|
10 |
+
seq_len = embed.size(0)
|
11 |
+
padding_size = max_len - seq_len
|
12 |
+
|
13 |
+
# Create attention mask (1 for real tokens, 0 for padding)
|
14 |
+
attention_mask = torch.ones(max_len, dtype=torch.long)
|
15 |
+
if padding_size > 0:
|
16 |
+
attention_mask[seq_len:] = 0
|
17 |
+
|
18 |
+
# Pad along the sequence dimension (dim=0)
|
19 |
+
padding = torch.zeros((padding_size, embed.size(1)), dtype=embed.dtype)
|
20 |
+
padded_embed = torch.cat((embed, padding), dim=0)
|
21 |
+
else:
|
22 |
+
padded_embed = embed
|
23 |
+
|
24 |
+
padded_embeds.append(padded_embed)
|
25 |
+
attention_masks.append(attention_mask)
|
26 |
+
|
27 |
+
return torch.stack(padded_embeds), torch.stack(attention_masks)
|
28 |
+
|
29 |
+
|
30 |
+
class StringCollator:
|
31 |
+
def __init__(self, tokenizer, **kwargs):
|
32 |
+
self.tokenizer = tokenizer
|
33 |
+
|
34 |
+
def __call__(self, batch: Tuple[List[str], List[str]]) -> Dict[str, torch.Tensor]:
|
35 |
+
batch = self.tokenizer(batch,
|
36 |
+
padding='longest',
|
37 |
+
return_tensors='pt',
|
38 |
+
add_special_tokens=True)
|
39 |
+
return batch
|
40 |
+
|
41 |
+
|
42 |
+
class StringLabelsCollator:
|
43 |
+
def __init__(self, tokenizer, task_type='tokenwise', **kwargs):
|
44 |
+
self.tokenizer = tokenizer
|
45 |
+
self.task_type = task_type
|
46 |
+
|
47 |
+
def __call__(self, batch: List[Tuple[str, Union[float, int]]]) -> Dict[str, torch.Tensor]:
|
48 |
+
seqs = [ex[0] for ex in batch]
|
49 |
+
labels = [ex[1] for ex in batch]
|
50 |
+
|
51 |
+
# Tokenize the sequences
|
52 |
+
batch_encoding = self.tokenizer(
|
53 |
+
seqs,
|
54 |
+
padding='longest',
|
55 |
+
truncation=False,
|
56 |
+
return_tensors='pt',
|
57 |
+
add_special_tokens=True
|
58 |
+
)
|
59 |
+
|
60 |
+
# Handle labels based on tokenwise flag
|
61 |
+
if self.task_type == 'tokenwise':
|
62 |
+
# For token-wise labels, we need to pad to match the tokenized sequence length
|
63 |
+
attention_mask = batch_encoding['attention_mask']
|
64 |
+
lengths = [torch.sum(attention_mask[i]).item() for i in range(len(batch))]
|
65 |
+
max_length = max(lengths)
|
66 |
+
|
67 |
+
padded_labels = []
|
68 |
+
for label in labels:
|
69 |
+
if not isinstance(label, torch.Tensor):
|
70 |
+
label = torch.tensor(label)
|
71 |
+
|
72 |
+
label = label.flatten()
|
73 |
+
padding_size = max_length - len(label)
|
74 |
+
# Pad or truncate labels to match tokenized sequence length
|
75 |
+
if padding_size > 0:
|
76 |
+
# Pad with -100 (ignored by loss functions)
|
77 |
+
padding = torch.full((padding_size,), -100, dtype=label.dtype)
|
78 |
+
padded_label = torch.cat((label, padding))
|
79 |
+
else:
|
80 |
+
padded_label = label[:max_length]
|
81 |
+
padded_labels.append(padded_label)
|
82 |
+
|
83 |
+
# Stack all padded labels
|
84 |
+
batch_encoding['labels'] = torch.stack(padded_labels)
|
85 |
+
else:
|
86 |
+
# For sequence-level labels, just stack them
|
87 |
+
batch_encoding['labels'] = torch.stack([torch.tensor(ex[1]) for ex in batch])
|
88 |
+
|
89 |
+
if self.task_type == 'multilabel':
|
90 |
+
batch_encoding['labels'] = batch_encoding['labels'].float()
|
91 |
+
else:
|
92 |
+
batch_encoding['labels'] = batch_encoding['labels'].long()
|
93 |
+
|
94 |
+
return batch_encoding
|
95 |
+
|
96 |
+
|
97 |
+
class EmbedsLabelsCollator:
|
98 |
+
def __init__(self, full=False, task_type='tokenwise', **kwargs):
|
99 |
+
self.full = full
|
100 |
+
self.task_type = task_type
|
101 |
+
|
102 |
+
def __call__(self, batch: List[Tuple[torch.Tensor, Union[float, int]]]) -> Dict[str, torch.Tensor]:
|
103 |
+
if self.full:
|
104 |
+
embeds = [ex[0] for ex in batch]
|
105 |
+
labels = [ex[1] for ex in batch]
|
106 |
+
|
107 |
+
# Find max sequence length for padding
|
108 |
+
max_length = max(embed.size(0) for embed in embeds)
|
109 |
+
|
110 |
+
embeds, attention_mask = _pad_matrix_embeds(embeds, max_length)
|
111 |
+
|
112 |
+
# Pad labels
|
113 |
+
if self.task_type == 'tokenwise':
|
114 |
+
padded_labels = []
|
115 |
+
for label in labels:
|
116 |
+
if not isinstance(label, torch.Tensor):
|
117 |
+
label = torch.tensor(label)
|
118 |
+
|
119 |
+
label = label.flatten()
|
120 |
+
padding_size = max_length - len(label)
|
121 |
+
if padding_size > 0:
|
122 |
+
# Use -100 as padding value for labels (ignored by loss functions)
|
123 |
+
padding = torch.full((padding_size,), -100, dtype=label.dtype)
|
124 |
+
padded_label = torch.cat((label, padding))
|
125 |
+
else:
|
126 |
+
padded_label = label[:max_length]
|
127 |
+
padded_labels.append(padded_label)
|
128 |
+
else:
|
129 |
+
padded_labels = labels
|
130 |
+
|
131 |
+
labels = torch.stack(padded_labels)
|
132 |
+
|
133 |
+
if self.task_type == 'multilabel':
|
134 |
+
labels = labels.float()
|
135 |
+
else:
|
136 |
+
labels = labels.long()
|
137 |
+
|
138 |
+
return {
|
139 |
+
'embeddings': embeds,
|
140 |
+
'attention_mask': attention_mask,
|
141 |
+
'labels': labels,
|
142 |
+
}
|
143 |
+
else:
|
144 |
+
embeds = torch.stack([ex[0] for ex in batch])
|
145 |
+
labels = torch.stack([ex[1] for ex in batch])
|
146 |
+
|
147 |
+
if self.task_type == 'multilabel':
|
148 |
+
labels = labels.float()
|
149 |
+
else:
|
150 |
+
labels = labels.long()
|
151 |
+
|
152 |
+
return {
|
153 |
+
'embeddings': embeds,
|
154 |
+
'labels': labels
|
155 |
+
}
|
156 |
+
|
157 |
+
|
158 |
+
class PairCollator_input_ids:
|
159 |
+
def __init__(self, tokenizer, **kwargs):
|
160 |
+
self.tokenizer = tokenizer
|
161 |
+
|
162 |
+
def __call__(self, batch: List[Tuple[str, str, Union[float, int]]]) -> Dict[str, torch.Tensor]:
|
163 |
+
seqs_a, seqs_b, labels = zip(*batch)
|
164 |
+
labels = torch.tensor(labels, dtype=torch.float)
|
165 |
+
tokenized = self.tokenizer(
|
166 |
+
seqs_a, seqs_b,
|
167 |
+
padding='longest',
|
168 |
+
return_tensors='pt'
|
169 |
+
)
|
170 |
+
return {
|
171 |
+
'input_ids': tokenized['input_ids'],
|
172 |
+
'attention_mask': tokenized['attention_mask'],
|
173 |
+
'labels': labels
|
174 |
+
}
|
175 |
+
|
176 |
+
|
177 |
+
class PairCollator_ab:
|
178 |
+
def __init__(self, tokenizer, **kwargs):
|
179 |
+
self.tokenizer = tokenizer
|
180 |
+
|
181 |
+
def __call__(self, batch: List[Tuple[str, str, Union[float, int]]]) -> Dict[str, torch.Tensor]:
|
182 |
+
seqs_a, seqs_b, labels = zip(*batch)
|
183 |
+
labels = torch.tensor(labels, dtype=torch.float)
|
184 |
+
tokenized_a = self.tokenizer(
|
185 |
+
seqs_a,
|
186 |
+
padding='longest',
|
187 |
+
truncation=True,
|
188 |
+
return_tensors='pt'
|
189 |
+
)
|
190 |
+
tokenized_b = self.tokenizer(
|
191 |
+
seqs_b,
|
192 |
+
padding='longest',
|
193 |
+
truncation=True,
|
194 |
+
return_tensors='pt'
|
195 |
+
)
|
196 |
+
return {
|
197 |
+
'input_ids_a': tokenized_a['input_ids'],
|
198 |
+
'input_ids_b': tokenized_b['input_ids'],
|
199 |
+
'attention_mask_a': tokenized_a['attention_mask'],
|
200 |
+
'attention_mask_b': tokenized_b['attention_mask'],
|
201 |
+
'labels': labels
|
202 |
+
}
|
203 |
+
|
204 |
+
|
205 |
+
class PairEmbedsLabelsCollator:
|
206 |
+
def __init__(self, full=False, **kwargs):
|
207 |
+
self.full = full
|
208 |
+
|
209 |
+
def __call__(self, batch: List[Tuple[torch.Tensor, torch.Tensor, Union[float, int]]]) -> Dict[str, torch.Tensor]:
|
210 |
+
if self.full:
|
211 |
+
embeds_a = [ex[0] for ex in batch]
|
212 |
+
embeds_b = [ex[1] for ex in batch]
|
213 |
+
max_len_a = max(embed.size(0) for embed in embeds_a)
|
214 |
+
max_len_b = max(embed.size(0) for embed in embeds_b)
|
215 |
+
embeds_a, attention_mask_a = _pad_matrix_embeds(embeds_a, max_len_a)
|
216 |
+
embeds_b, attention_mask_b = _pad_matrix_embeds(embeds_b, max_len_b)
|
217 |
+
embeds, attention_mask = pad_and_concatenate_dimer(embeds_a, embeds_b, attention_mask_a, attention_mask_b)
|
218 |
+
|
219 |
+
labels = torch.stack([ex[2] for ex in batch])
|
220 |
+
|
221 |
+
return {
|
222 |
+
'embeddings': embeds,
|
223 |
+
'attention_mask': attention_mask,
|
224 |
+
'labels': labels
|
225 |
+
}
|
226 |
+
else:
|
227 |
+
embeds_a = torch.stack([ex[0] for ex in batch])
|
228 |
+
embeds_b = torch.stack([ex[1] for ex in batch])
|
229 |
+
labels = torch.stack([ex[2] for ex in batch])
|
230 |
+
embeds = torch.cat([embeds_a, embeds_b], dim=-1)
|
231 |
+
return {
|
232 |
+
'embeddings': embeds,
|
233 |
+
'labels': labels
|
234 |
+
}
|
235 |
+
|
236 |
+
|
237 |
+
class OneHotCollator:
|
238 |
+
def __init__(self, alphabet="ACDEFGHIKLMNPQRSTVWY"):
|
239 |
+
# Add X for unknown amino acids, and special CLS and EOS tokens
|
240 |
+
alphabet = alphabet + "X"
|
241 |
+
alphabet = list(alphabet)
|
242 |
+
alphabet.append('cls')
|
243 |
+
alphabet.append('eos')
|
244 |
+
self.mapping = {token: idx for idx, token in enumerate(alphabet)}
|
245 |
+
|
246 |
+
def __call__(self, batch):
|
247 |
+
seqs = [ex[0] for ex in batch]
|
248 |
+
labels = torch.stack([torch.tensor(ex[1]) for ex in batch])
|
249 |
+
|
250 |
+
# Find the longest sequence in the batch (plus 2 for CLS and EOS)
|
251 |
+
max_len = max(len(seq) for seq in seqs) + 2
|
252 |
+
|
253 |
+
# One-hot encode and pad each sequence
|
254 |
+
batch_size = len(seqs)
|
255 |
+
one_hot_tensors = []
|
256 |
+
attention_masks = []
|
257 |
+
|
258 |
+
for seq in seqs:
|
259 |
+
seq = ['cls'] + list(seq) + ['eos']
|
260 |
+
# Create one-hot encoding for each sequence (including CLS and EOS)
|
261 |
+
seq_len = len(seq)
|
262 |
+
one_hot = torch.zeros(seq_len, len(self.alphabet))
|
263 |
+
|
264 |
+
# Add sequence tokens in the middle
|
265 |
+
for pos, token in enumerate(seq):
|
266 |
+
if token in self.mapping:
|
267 |
+
one_hot[pos, self.mapping[token]] = 1.0
|
268 |
+
else:
|
269 |
+
# For non-canonical amino acids, use the X token
|
270 |
+
one_hot[pos, self.mapping["X"]] = 1.0
|
271 |
+
|
272 |
+
# Create attention mask (1 for actual tokens, 0 for padding)
|
273 |
+
attention_mask = torch.ones(seq_len)
|
274 |
+
|
275 |
+
# Pad to the max length in this batch
|
276 |
+
padding_size = max_len - seq_len
|
277 |
+
if padding_size > 0:
|
278 |
+
padding = torch.zeros(padding_size, len(self.alphabet))
|
279 |
+
one_hot = torch.cat([one_hot, padding], dim=0)
|
280 |
+
# Add zeros to attention mask for padding
|
281 |
+
mask_padding = torch.zeros(padding_size)
|
282 |
+
attention_mask = torch.cat([attention_mask, mask_padding], dim=0)
|
283 |
+
|
284 |
+
one_hot_tensors.append(one_hot)
|
285 |
+
attention_masks.append(attention_mask)
|
286 |
+
|
287 |
+
# Stack all tensors in the batch
|
288 |
+
embeddings = torch.stack(one_hot_tensors)
|
289 |
+
attention_masks = torch.stack(attention_masks)
|
290 |
+
|
291 |
+
return {
|
292 |
+
'embeddings': embeddings,
|
293 |
+
'attention_mask': attention_masks,
|
294 |
+
'labels': labels,
|
295 |
+
}
|
data/src/protify/data/data_mixin.py
ADDED
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
import os
|
5 |
+
import sqlite3
|
6 |
+
from typing import List, Tuple, Dict, Optional
|
7 |
+
from glob import glob
|
8 |
+
from pandas import read_csv, read_excel
|
9 |
+
from datasets import load_dataset, Dataset
|
10 |
+
from dataclasses import dataclass
|
11 |
+
from utils import print_message
|
12 |
+
from .supported_datasets import supported_datasets, standard_data_benchmark
|
13 |
+
|
14 |
+
|
15 |
+
AMINO_ACIDS = set('LAGVSERTIPDKQNFYMHWCXBUOZ* ')
|
16 |
+
CODONS = set('aA@bB#$%rRnNdDcCeEqQ^G&ghHiIj+MmlJLkK(fFpPoO=szZwSXTtxWyYuvUV]}) ')
|
17 |
+
DNA = set('ATCG ')
|
18 |
+
RNA = set('AUCG ')
|
19 |
+
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class DataArguments:
|
23 |
+
"""
|
24 |
+
Args:
|
25 |
+
data_paths: List[str]
|
26 |
+
paths to the datasets
|
27 |
+
max_length: int
|
28 |
+
max length of sequences
|
29 |
+
trim: bool
|
30 |
+
whether to trim sequences to max_length
|
31 |
+
"""
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
data_names: List[str],
|
35 |
+
delimiter: str = ',',
|
36 |
+
col_names: List[str] = ['seqs', 'labels'],
|
37 |
+
max_length: int = 1024,
|
38 |
+
trim: bool = False,
|
39 |
+
data_dirs: Optional[List[str]] = [],
|
40 |
+
**kwargs
|
41 |
+
):
|
42 |
+
self.data_names = data_names
|
43 |
+
self.data_dirs = data_dirs
|
44 |
+
self.delimiter = delimiter
|
45 |
+
self.col_names = col_names
|
46 |
+
self.max_length = max_length
|
47 |
+
self.trim = trim
|
48 |
+
|
49 |
+
if data_names[0] == 'standard_benchmark':
|
50 |
+
self.data_paths = [supported_datasets[data_name] for data_name in standard_data_benchmark]
|
51 |
+
else:
|
52 |
+
self.data_paths = []
|
53 |
+
for data_name in data_names:
|
54 |
+
if data_name in supported_datasets:
|
55 |
+
self.data_paths.append(supported_datasets[data_name])
|
56 |
+
else:
|
57 |
+
self.data_paths.append(data_name)
|
58 |
+
|
59 |
+
if data_dirs is not None:
|
60 |
+
for dir in data_dirs:
|
61 |
+
if os.path.exists(dir):
|
62 |
+
self.data_paths.append(dir)
|
63 |
+
else:
|
64 |
+
raise FileNotFoundError(f'{dir} does not exist')
|
65 |
+
|
66 |
+
|
67 |
+
class DataMixin:
|
68 |
+
def __init__(self, data_args: Optional[DataArguments] = None):
|
69 |
+
# intialize defaults
|
70 |
+
self._sql = False
|
71 |
+
self._full = False
|
72 |
+
self._max_length = 1024
|
73 |
+
self._trim = False
|
74 |
+
self._delimiter = ','
|
75 |
+
self._col_names = ['seqs', 'labels']
|
76 |
+
self.data_args = data_args
|
77 |
+
|
78 |
+
def _not_regression(self, labels): # not a great assumption but works most of the time
|
79 |
+
return all(isinstance(label, (int, float)) and label == int(label) for label in labels)
|
80 |
+
|
81 |
+
def _encode_labels(self, labels, tag2id):
|
82 |
+
return [torch.tensor([tag2id[tag] for tag in doc], dtype=torch.long) for doc in labels]
|
83 |
+
|
84 |
+
def _label_type_checker(self, labels):
|
85 |
+
ex = labels[0]
|
86 |
+
if self._not_regression(labels):
|
87 |
+
if isinstance(ex, list):
|
88 |
+
label_type = 'multilabel'
|
89 |
+
elif isinstance(ex, int) or isinstance(ex, float):
|
90 |
+
label_type = 'singlelabel' # binary or multiclass
|
91 |
+
elif isinstance(ex, str):
|
92 |
+
label_type = 'string'
|
93 |
+
else:
|
94 |
+
label_type = 'regression'
|
95 |
+
return label_type
|
96 |
+
|
97 |
+
def _select_from_sql(self, c, seq, cast_to_torch=True):
|
98 |
+
c.execute("SELECT embedding FROM embeddings WHERE sequence = ?", (seq,))
|
99 |
+
embedding = np.frombuffer(c.fetchone()[0], dtype=np.float32).reshape(1, -1)
|
100 |
+
if self._full:
|
101 |
+
embedding = embedding.reshape(len(seq), -1)
|
102 |
+
if cast_to_torch:
|
103 |
+
embedding = torch.tensor(embedding)
|
104 |
+
return embedding
|
105 |
+
|
106 |
+
def _select_from_pth(self, emb_dict, seq, cast_to_np=False):
|
107 |
+
embedding = emb_dict[seq].reshape(1, -1)
|
108 |
+
if self._full:
|
109 |
+
embedding = embedding.reshape(len(seq), -1)
|
110 |
+
if cast_to_np:
|
111 |
+
embedding = embedding.numpy()
|
112 |
+
return embedding
|
113 |
+
|
114 |
+
def _labels_to_numpy(self, labels):
|
115 |
+
if isinstance(labels[0], list):
|
116 |
+
return np.array(labels).flatten()
|
117 |
+
else:
|
118 |
+
return np.array([labels]).flatten()
|
119 |
+
|
120 |
+
def _random_order(self, seq_a, seq_b):
|
121 |
+
if random.random() < 0.5:
|
122 |
+
return seq_a, seq_b
|
123 |
+
else:
|
124 |
+
return seq_b, seq_a
|
125 |
+
|
126 |
+
def _truncate_pairs(self, ex):
|
127 |
+
# Truncate longest first, but if that makes it shorter than the other, truncate that one
|
128 |
+
seq_a, seq_b = ex['SeqA'], ex['SeqB']
|
129 |
+
trunc_a, trunc_b = seq_a, seq_b
|
130 |
+
while len(trunc_a) + len(trunc_b) > self._max_length:
|
131 |
+
if len(trunc_a) > len(trunc_b):
|
132 |
+
trunc_a = trunc_a[:-1]
|
133 |
+
else:
|
134 |
+
trunc_b = trunc_b[:-1]
|
135 |
+
ex['SeqA'] = trunc_a
|
136 |
+
ex['SeqB'] = trunc_b
|
137 |
+
return ex
|
138 |
+
|
139 |
+
def process_datasets(
|
140 |
+
self,
|
141 |
+
hf_datasets: List[Tuple[Dataset, Dataset, Dataset, bool]],
|
142 |
+
data_names: List[str],
|
143 |
+
)-> Tuple[Dict[str, Tuple[Dataset, Dataset, Dataset, int, str, bool]], List[str]]:
|
144 |
+
max_length = self._max_length
|
145 |
+
datasets, all_seqs = {}, set()
|
146 |
+
for dataset, data_name in zip(hf_datasets, data_names):
|
147 |
+
print_message(f'Processing {data_name}')
|
148 |
+
train_set, valid_set, test_set, ppi = dataset
|
149 |
+
if self._trim: # trim by length if necessary
|
150 |
+
original_train_size, original_valid_size, original_test_size = len(train_set), len(valid_set), len(test_set)
|
151 |
+
if ppi:
|
152 |
+
train_set = train_set.filter(lambda x: len(x['SeqA']) + len(x['SeqB']) <= max_length)
|
153 |
+
valid_set = valid_set.filter(lambda x: len(x['SeqA']) + len(x['SeqB']) <= max_length)
|
154 |
+
test_set = test_set.filter(lambda x: len(x['SeqA']) + len(x['SeqB']) <= max_length)
|
155 |
+
else:
|
156 |
+
train_set = train_set.filter(lambda x: len(x['seqs']) <= max_length)
|
157 |
+
valid_set = valid_set.filter(lambda x: len(x['seqs']) <= max_length)
|
158 |
+
test_set = test_set.filter(lambda x: len(x['seqs']) <= max_length)
|
159 |
+
|
160 |
+
print_message(f'Trimmed {100 * round((original_train_size-len(train_set)) / original_train_size, 2)}% from train')
|
161 |
+
print_message(f'Trimmed {100 * round((original_valid_size-len(valid_set)) / original_valid_size, 2)}% from valid')
|
162 |
+
print_message(f'Trimmed {100 * round((original_test_size-len(test_set)) / original_test_size, 2)}% from test')
|
163 |
+
|
164 |
+
else: # truncate to max_length
|
165 |
+
if ppi:
|
166 |
+
train_set = train_set.map(self._truncate_pairs)
|
167 |
+
valid_set = valid_set.map(self._truncate_pairs)
|
168 |
+
test_set = test_set.map(self._truncate_pairs)
|
169 |
+
else:
|
170 |
+
train_set = train_set.map(lambda x: {'seqs': x['seqs'][:max_length]})
|
171 |
+
valid_set = valid_set.map(lambda x: {'seqs': x['seqs'][:max_length]})
|
172 |
+
test_set = test_set.map(lambda x: {'seqs': x['seqs'][:max_length]})
|
173 |
+
|
174 |
+
# sanitize
|
175 |
+
if ppi:
|
176 |
+
train_set = train_set.map(lambda x: {'SeqA': ''.join(aa for aa in x['SeqA'] if aa in AMINO_ACIDS),
|
177 |
+
'SeqB': ''.join(aa for aa in x['SeqB'] if aa in AMINO_ACIDS)})
|
178 |
+
valid_set = valid_set.map(lambda x: {'SeqA': ''.join(aa for aa in x['SeqA'] if aa in AMINO_ACIDS),
|
179 |
+
'SeqB': ''.join(aa for aa in x['SeqB'] if aa in AMINO_ACIDS)})
|
180 |
+
test_set = test_set.map(lambda x: {'SeqA': ''.join(aa for aa in x['SeqA'] if aa in AMINO_ACIDS),
|
181 |
+
'SeqB': ''.join(aa for aa in x['SeqB'] if aa in AMINO_ACIDS)})
|
182 |
+
all_seqs.update(train_set['SeqA'] + train_set['SeqB'])
|
183 |
+
all_seqs.update(valid_set['SeqA'] + valid_set['SeqB'])
|
184 |
+
all_seqs.update(test_set['SeqA'] + test_set['SeqB'])
|
185 |
+
else:
|
186 |
+
train_set = train_set.map(lambda x: {'seqs': ''.join(aa for aa in x['seqs'] if aa in AMINO_ACIDS)})
|
187 |
+
valid_set = valid_set.map(lambda x: {'seqs': ''.join(aa for aa in x['seqs'] if aa in AMINO_ACIDS)})
|
188 |
+
test_set = test_set.map(lambda x: {'seqs': ''.join(aa for aa in x['seqs'] if aa in AMINO_ACIDS)})
|
189 |
+
all_seqs.update(train_set['seqs'])
|
190 |
+
all_seqs.update(valid_set['seqs'])
|
191 |
+
all_seqs.update(test_set['seqs'])
|
192 |
+
|
193 |
+
# confirm the type of labels
|
194 |
+
check_labels = valid_set['labels']
|
195 |
+
label_type = self._label_type_checker(check_labels)
|
196 |
+
|
197 |
+
if label_type == 'string': # might be string or multilabel
|
198 |
+
example = valid_set['labels'][0]
|
199 |
+
try:
|
200 |
+
import ast
|
201 |
+
new_ex = ast.literal_eval(example)
|
202 |
+
if isinstance(new_ex, list): # if ast runs correctly and is now a list it is multilabel labels
|
203 |
+
label_type = 'multilabel'
|
204 |
+
train_set = train_set.map(lambda ex: {'labels': ast.literal_eval(ex['labels'])})
|
205 |
+
valid_set = valid_set.map(lambda ex: {'labels': ast.literal_eval(ex['labels'])})
|
206 |
+
test_set = test_set.map(lambda ex: {'labels': ast.literal_eval(ex['labels'])})
|
207 |
+
except:
|
208 |
+
label_type = 'string' # if ast throws error it is actually string
|
209 |
+
|
210 |
+
if label_type == 'string': # if still string, it's for tokenwise classification
|
211 |
+
train_labels = train_set['labels']
|
212 |
+
unique_tags = set(tag for doc in train_labels for tag in doc)
|
213 |
+
tag2id = {tag: id for id, tag in enumerate(sorted(unique_tags))}
|
214 |
+
# add cls token to labels
|
215 |
+
train_set = train_set.map(lambda ex: {'labels': self._encode_labels(ex['labels'], tag2id=tag2id)})
|
216 |
+
valid_set = valid_set.map(lambda ex: {'labels': self._encode_labels(ex['labels'], tag2id=tag2id)})
|
217 |
+
test_set = test_set.map(lambda ex: {'labels': self._encode_labels(ex['labels'], tag2id=tag2id)})
|
218 |
+
label_type = 'tokenwise'
|
219 |
+
num_labels = len(unique_tags)
|
220 |
+
else:
|
221 |
+
if label_type == 'regression':
|
222 |
+
num_labels = 1
|
223 |
+
else: # if classification, get the total number of leabels
|
224 |
+
try:
|
225 |
+
num_labels = len(train_set['labels'][0])
|
226 |
+
except:
|
227 |
+
unique = np.unique(train_set['labels'])
|
228 |
+
max_label = max(unique) # sometimes there are missing labels
|
229 |
+
full_list = np.arange(0, max_label+1)
|
230 |
+
num_labels = len(full_list)
|
231 |
+
datasets[data_name] = (train_set, valid_set, test_set, num_labels, label_type, ppi)
|
232 |
+
|
233 |
+
all_seqs = list(all_seqs)
|
234 |
+
all_seqs = sorted(all_seqs, key=len, reverse=True) # longest first
|
235 |
+
return datasets, all_seqs
|
236 |
+
|
237 |
+
def get_data(self):
|
238 |
+
"""
|
239 |
+
Supports .csv, .tsv, .txt
|
240 |
+
TODO fasta, fa, fna, etc.
|
241 |
+
"""
|
242 |
+
datasets, data_names = [], []
|
243 |
+
|
244 |
+
for data_path in self.data_args.data_paths:
|
245 |
+
data_name = data_path.split('/')[-1]
|
246 |
+
print_message(f'Loading {data_name}')
|
247 |
+
dataset = load_dataset(data_path)
|
248 |
+
ppi = 'SeqA' in dataset['train'].column_names
|
249 |
+
print_message(f'PPI: {ppi}')
|
250 |
+
try:
|
251 |
+
train_set, valid_set, test_set = dataset['train'], dataset['valid'], dataset['test']
|
252 |
+
except:
|
253 |
+
# No valid or test set, make 10% splits randomly
|
254 |
+
train_set = dataset['train'].train_test_split(test_size=0.2, seed=42)
|
255 |
+
valid_set = train_set['test']
|
256 |
+
train_set = train_set['train']
|
257 |
+
test_set = train_set.train_test_split(test_size=0.5, seed=42)
|
258 |
+
test_set = test_set['test']
|
259 |
+
datasets.append((train_set, valid_set, test_set, ppi))
|
260 |
+
data_names.append(data_name)
|
261 |
+
|
262 |
+
for data_dir in self.data_args.data_dirs:
|
263 |
+
data_name = data_dir.split('/')[-2]
|
264 |
+
ppi = 'ppi' in data_dir.lower()
|
265 |
+
train_path = glob(os.path.join(data_dir, 'train.*'))[0]
|
266 |
+
valid_path = glob(os.path.join(data_dir, 'valid.*'))[0]
|
267 |
+
test_path = glob(os.path.join(data_dir, 'test.*'))[0]
|
268 |
+
if '.xlsx' in train_path:
|
269 |
+
train_set = read_excel(train_path)
|
270 |
+
valid_set = read_excel(valid_path)
|
271 |
+
test_set = read_excel(test_path)
|
272 |
+
else:
|
273 |
+
train_set = read_csv(train_path, delimiter=self._delimiter, names=self._col_names)
|
274 |
+
valid_set = read_csv(valid_path, delimiter=self._delimiter, names=self._col_names)
|
275 |
+
test_set = read_csv(test_path, delimiter=self._delimiter, names=self._col_names)
|
276 |
+
|
277 |
+
train_set = Dataset.from_pandas(train_set)
|
278 |
+
valid_set = Dataset.from_pandas(valid_set)
|
279 |
+
test_set = Dataset.from_pandas(test_set)
|
280 |
+
datasets.append((train_set, valid_set, test_set, ppi))
|
281 |
+
data_names.append(data_name)
|
282 |
+
|
283 |
+
return self.process_datasets(hf_datasets=datasets, data_names=data_names)
|
284 |
+
|
285 |
+
def get_embedding_dim_sql(self, save_path, test_seq):
|
286 |
+
import sqlite3
|
287 |
+
test_seq_len = len(test_seq) + 2
|
288 |
+
|
289 |
+
with sqlite3.connect(save_path) as conn:
|
290 |
+
c = conn.cursor()
|
291 |
+
c.execute("SELECT embedding FROM embeddings WHERE sequence = ?", (test_seq,))
|
292 |
+
test_embedding = c.fetchone()[0]
|
293 |
+
test_embedding = torch.tensor(np.frombuffer(test_embedding, dtype=np.float32).reshape(1, -1))
|
294 |
+
if self._full:
|
295 |
+
try:
|
296 |
+
test_embedding = test_embedding.reshape(test_seq_len, -1)
|
297 |
+
except:
|
298 |
+
test_embedding = test_embedding.reshape(test_seq_len - 1, -1) # some pLMs have only one special token added
|
299 |
+
embedding_dim = test_embedding.shape[-1]
|
300 |
+
return embedding_dim
|
301 |
+
|
302 |
+
def get_embedding_dim_pth(self, emb_dict, test_seq):
|
303 |
+
test_seq_len = len(test_seq) + 2
|
304 |
+
|
305 |
+
test_embedding = emb_dict[test_seq]
|
306 |
+
print(test_embedding.shape)
|
307 |
+
if self._full:
|
308 |
+
try:
|
309 |
+
test_embedding = test_embedding.reshape(test_seq_len, -1)
|
310 |
+
except:
|
311 |
+
test_embedding = test_embedding.reshape(test_seq_len - 1, -1) # some pLMs have only one special token added
|
312 |
+
else:
|
313 |
+
test_embedding = test_embedding.reshape(1, -1)
|
314 |
+
embedding_dim = test_embedding.shape[-1]
|
315 |
+
return embedding_dim
|
316 |
+
|
317 |
+
def build_vector_numpy_dataset_from_embeddings(
|
318 |
+
self,
|
319 |
+
model_name,
|
320 |
+
train_seqs,
|
321 |
+
valid_seqs,
|
322 |
+
test_seqs,
|
323 |
+
):
|
324 |
+
save_dir = self.embedding_args.embedding_save_dir
|
325 |
+
train_array, valid_array, test_array = [], [], []
|
326 |
+
if self._sql:
|
327 |
+
import sqlite3
|
328 |
+
save_path = os.path.join(save_dir, f'{model_name}_{self._full}.db')
|
329 |
+
with sqlite3.connect(save_path) as conn:
|
330 |
+
c = conn.cursor()
|
331 |
+
for seq in train_seqs:
|
332 |
+
embedding = self._select_from_sql(c, seq, cast_to_torch=False)
|
333 |
+
train_array.append(embedding)
|
334 |
+
|
335 |
+
for seq in valid_seqs:
|
336 |
+
embedding = self._select_from_sql(c, seq, cast_to_torch=False)
|
337 |
+
valid_array.append(embedding)
|
338 |
+
|
339 |
+
for seq in test_seqs:
|
340 |
+
embedding = self._select_from_sql(c, seq, cast_to_torch=False)
|
341 |
+
test_array.append(embedding)
|
342 |
+
else:
|
343 |
+
save_path = os.path.join(save_dir, f'{model_name}_{self._full}.pth')
|
344 |
+
emb_dict = torch.load(save_path)
|
345 |
+
for seq in train_seqs:
|
346 |
+
embedding = self._select_from_pth(emb_dict, seq, cast_to_np=True)
|
347 |
+
train_array.append(embedding)
|
348 |
+
|
349 |
+
for seq in valid_seqs:
|
350 |
+
embedding = self._select_from_pth(emb_dict, seq, cast_to_np=True)
|
351 |
+
valid_array.append(embedding)
|
352 |
+
|
353 |
+
for seq in test_seqs:
|
354 |
+
embedding = self._select_from_pth(emb_dict, seq, cast_to_np=True)
|
355 |
+
test_array.append(embedding)
|
356 |
+
del emb_dict
|
357 |
+
|
358 |
+
train_array = np.concatenate(train_array, axis=0)
|
359 |
+
valid_array = np.concatenate(valid_array, axis=0)
|
360 |
+
test_array = np.concatenate(test_array, axis=0)
|
361 |
+
|
362 |
+
if self._full: # average over the length of the sequence
|
363 |
+
train_array = np.mean(train_array, axis=1)
|
364 |
+
valid_array = np.mean(valid_array, axis=1)
|
365 |
+
test_array = np.mean(test_array, axis=1)
|
366 |
+
|
367 |
+
print_message('Numpy dataset shapes')
|
368 |
+
print_message(f'Train: {train_array.shape}')
|
369 |
+
print_message(f'Valid: {valid_array.shape}')
|
370 |
+
print_message(f'Test: {test_array.shape}')
|
371 |
+
return train_array, valid_array, test_array
|
372 |
+
|
373 |
+
def build_pair_vector_numpy_dataset_from_embeddings(
|
374 |
+
self,
|
375 |
+
model_name,
|
376 |
+
train_seqs_a,
|
377 |
+
train_seqs_b,
|
378 |
+
valid_seqs_a,
|
379 |
+
valid_seqs_b,
|
380 |
+
test_seqs_a,
|
381 |
+
test_seqs_b,
|
382 |
+
):
|
383 |
+
save_dir = self.embedding_args.embedding_save_dir
|
384 |
+
train_array, valid_array, test_array = [], [], []
|
385 |
+
if self._sql:
|
386 |
+
save_path = os.path.join(save_dir, f'{model_name}_{self._full}.db')
|
387 |
+
with sqlite3.connect(save_path) as conn:
|
388 |
+
c = conn.cursor()
|
389 |
+
for seq_a, seq_b in zip(train_seqs_a, train_seqs_b):
|
390 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
391 |
+
embedding_a = self._select_from_sql(c, seq_a, cast_to_torch=False)
|
392 |
+
embedding_b = self._select_from_sql(c, seq_b, cast_to_torch=False)
|
393 |
+
train_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
394 |
+
|
395 |
+
for seq_a, seq_b in zip(valid_seqs_a, valid_seqs_b):
|
396 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
397 |
+
embedding_a = self._select_from_sql(c, seq_a, cast_to_torch=False)
|
398 |
+
embedding_b = self._select_from_sql(c, seq_b, cast_to_torch=False)
|
399 |
+
valid_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
400 |
+
|
401 |
+
for seq_a, seq_b in zip(test_seqs_a, test_seqs_b):
|
402 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
403 |
+
embedding_a = self._select_from_sql(c, seq_a, cast_to_torch=False)
|
404 |
+
embedding_b = self._select_from_sql(c, seq_b, cast_to_torch=False)
|
405 |
+
test_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
406 |
+
else:
|
407 |
+
save_path = os.path.join(save_dir, f'{model_name}_{self._full}.pth')
|
408 |
+
emb_dict = torch.load(save_path)
|
409 |
+
for seq_a, seq_b in zip(train_seqs_a, train_seqs_b):
|
410 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
411 |
+
embedding_a = self._select_from_pth(emb_dict, seq_a, cast_to_np=True)
|
412 |
+
embedding_b = self._select_from_pth(emb_dict, seq_b, cast_to_np=True)
|
413 |
+
train_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
414 |
+
|
415 |
+
for seq_a, seq_b in zip(valid_seqs_a, valid_seqs_b):
|
416 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
417 |
+
embedding_a = self._select_from_pth(emb_dict, seq_a, cast_to_np=True)
|
418 |
+
embedding_b = self._select_from_pth(emb_dict, seq_b, cast_to_np=True)
|
419 |
+
valid_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
420 |
+
|
421 |
+
for seq_a, seq_b in zip(test_seqs_a, test_seqs_b):
|
422 |
+
seq_a, seq_b = self._random_order(seq_a, seq_b)
|
423 |
+
embedding_a = self._select_from_pth(emb_dict, seq_a, cast_to_np=True)
|
424 |
+
embedding_b = self._select_from_pth(emb_dict, seq_b, cast_to_np=True)
|
425 |
+
test_array.append(np.concatenate([embedding_a, embedding_b], axis=-1))
|
426 |
+
del emb_dict
|
427 |
+
|
428 |
+
train_array = np.concatenate(train_array, axis=0)
|
429 |
+
valid_array = np.concatenate(valid_array, axis=0)
|
430 |
+
test_array = np.concatenate(test_array, axis=0)
|
431 |
+
|
432 |
+
if self._full: # average over the length of the sequence
|
433 |
+
train_array = np.mean(train_array, axis=1)
|
434 |
+
valid_array = np.mean(valid_array, axis=1)
|
435 |
+
test_array = np.mean(test_array, axis=1)
|
436 |
+
|
437 |
+
print_message('Numpy dataset shapes')
|
438 |
+
print_message(f'Train: {train_array.shape}')
|
439 |
+
print_message(f'Valid: {valid_array.shape}')
|
440 |
+
print_message(f'Test: {test_array.shape}')
|
441 |
+
return train_array, valid_array, test_array
|
442 |
+
|
443 |
+
def prepare_scikit_dataset(self, model_name, dataset):
|
444 |
+
train_set, valid_set, test_set, _, label_type, ppi = dataset
|
445 |
+
|
446 |
+
if ppi:
|
447 |
+
X_train, X_valid, X_test = self.build_pair_vector_numpy_dataset_from_embeddings(
|
448 |
+
model_name,
|
449 |
+
train_set['SeqA'],
|
450 |
+
train_set['SeqB'],
|
451 |
+
valid_set['SeqA'],
|
452 |
+
valid_set['SeqB'],
|
453 |
+
test_set['SeqA'],
|
454 |
+
test_set['SeqB'],
|
455 |
+
)
|
456 |
+
else:
|
457 |
+
X_train, X_valid, X_test = self.build_vector_numpy_dataset_from_embeddings(
|
458 |
+
model_name,
|
459 |
+
train_set['seqs'],
|
460 |
+
valid_set['seqs'],
|
461 |
+
test_set['seqs'],
|
462 |
+
)
|
463 |
+
|
464 |
+
y_train = self._labels_to_numpy(train_set['labels'])
|
465 |
+
y_valid = self._labels_to_numpy(valid_set['labels'])
|
466 |
+
y_test = self._labels_to_numpy(test_set['labels'])
|
467 |
+
|
468 |
+
print_message('Numpy dataset shapes with labels')
|
469 |
+
print_message(f'Train: {X_train.shape}, {y_train.shape}')
|
470 |
+
print_message(f'Valid: {X_valid.shape}, {y_valid.shape}')
|
471 |
+
print_message(f'Test: {X_test.shape}, {y_test.shape}')
|
472 |
+
return X_train, y_train, X_valid, y_valid, X_test, y_test, label_type
|
data/src/protify/data/dataset_classes.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### imports
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import sqlite3
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch.utils.data import Dataset as TorchDataset
|
8 |
+
from utils import print_message
|
9 |
+
from tqdm.auto import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
class PairEmbedsLabelsDatasetFromDisk(TorchDataset):
|
13 |
+
def __init__(
|
14 |
+
self,
|
15 |
+
hf_dataset,
|
16 |
+
col_a='SeqA',
|
17 |
+
col_b='SeqB',
|
18 |
+
label_col='labels',
|
19 |
+
full=False,
|
20 |
+
db_path='embeddings.db',
|
21 |
+
batch_size=64,
|
22 |
+
read_scaler=1000,
|
23 |
+
input_dim=768,
|
24 |
+
task_type='regression',
|
25 |
+
**kwargs
|
26 |
+
):
|
27 |
+
self.seqs_a, self.seqs_b, self.labels = hf_dataset[col_a], hf_dataset[col_b], hf_dataset[label_col]
|
28 |
+
self.db_file = db_path
|
29 |
+
self.batch_size = batch_size
|
30 |
+
self.input_dim = input_dim
|
31 |
+
self.full = full
|
32 |
+
self.length = len(self.labels)
|
33 |
+
self.read_amt = read_scaler * self.batch_size
|
34 |
+
self.embeddings_a, self.embeddings_b, self.current_labels = [], [], []
|
35 |
+
self.count, self.index = 0, 0
|
36 |
+
self.task_type = task_type
|
37 |
+
|
38 |
+
def __len__(self):
|
39 |
+
return self.length
|
40 |
+
|
41 |
+
def check_seqs(self, all_seqs):
|
42 |
+
missing_seqs = [seq for seq in self.seqs_a + self.seqs_b if seq not in all_seqs]
|
43 |
+
if missing_seqs:
|
44 |
+
print_message(f'Sequences not found in embeddings: {missing_seqs}')
|
45 |
+
else:
|
46 |
+
print_message('All sequences in embeddings')
|
47 |
+
|
48 |
+
def reset_epoch(self):
|
49 |
+
data = list(zip(self.seqs_a, self.seqs_b, self.labels))
|
50 |
+
random.shuffle(data)
|
51 |
+
self.seqs_a, self.seqs_b, self.labels = zip(*data)
|
52 |
+
self.seqs_a, self.seqs_b, self.labels = list(self.seqs_a), list(self.seqs_b), list(self.labels)
|
53 |
+
self.embeddings_a, self.embeddings_b, self.current_labels = [], [], []
|
54 |
+
self.count, self.index = 0, 0
|
55 |
+
|
56 |
+
def get_embedding(self, c, seq):
|
57 |
+
result = c.execute("SELECT embedding FROM embeddings WHERE sequence=?", (seq,))
|
58 |
+
row = result.fetchone()
|
59 |
+
if row is None:
|
60 |
+
raise ValueError(f"Embedding not found for sequence: {seq}")
|
61 |
+
emb_data = row[0]
|
62 |
+
emb = torch.tensor(np.frombuffer(emb_data, dtype=np.float32).reshape(-1, self.input_dim))
|
63 |
+
return emb
|
64 |
+
|
65 |
+
def read_embeddings(self):
|
66 |
+
embeddings_a, embeddings_b, labels = [], [], []
|
67 |
+
self.count += self.read_amt
|
68 |
+
if self.count >= self.length:
|
69 |
+
self.reset_epoch()
|
70 |
+
conn = sqlite3.connect(self.db_file)
|
71 |
+
c = conn.cursor()
|
72 |
+
for i in range(self.count, self.count + self.read_amt):
|
73 |
+
if i >= self.length:
|
74 |
+
break
|
75 |
+
emb_a = self.get_embedding(c, self.seqs_a[i])
|
76 |
+
emb_b = self.get_embedding(c, self.seqs_b[i])
|
77 |
+
embeddings_a.append(emb_a)
|
78 |
+
embeddings_b.append(emb_b)
|
79 |
+
labels.append(self.labels[i])
|
80 |
+
conn.close()
|
81 |
+
self.index = 0
|
82 |
+
self.embeddings_a = embeddings_a
|
83 |
+
self.embeddings_b = embeddings_b
|
84 |
+
self.current_labels = labels
|
85 |
+
|
86 |
+
def __getitem__(self, idx):
|
87 |
+
if self.index >= len(self.current_labels) or len(self.current_labels) == 0:
|
88 |
+
self.read_embeddings()
|
89 |
+
|
90 |
+
emb_a = self.embeddings_a[self.index]
|
91 |
+
emb_b = self.embeddings_b[self.index]
|
92 |
+
label = self.current_labels[self.index]
|
93 |
+
|
94 |
+
self.index += 1
|
95 |
+
|
96 |
+
# 50% chance to switch the order of a and b
|
97 |
+
if random.random() < 0.5:
|
98 |
+
emb_a, emb_b = emb_b, emb_a
|
99 |
+
|
100 |
+
if self.task_type == 'multilabel' or self.task_type == 'regression':
|
101 |
+
label = torch.tensor(label, dtype=torch.float)
|
102 |
+
else:
|
103 |
+
label = torch.tensor(label, dtype=torch.long)
|
104 |
+
|
105 |
+
return emb_a, emb_b, label
|
106 |
+
|
107 |
+
|
108 |
+
class PairEmbedsLabelsDataset(TorchDataset):
|
109 |
+
def __init__(
|
110 |
+
self,
|
111 |
+
hf_dataset,
|
112 |
+
emb_dict,
|
113 |
+
col_a='SeqA',
|
114 |
+
col_b='SeqB',
|
115 |
+
full=False,
|
116 |
+
label_col='labels',
|
117 |
+
input_dim=768,
|
118 |
+
task_type='regression',
|
119 |
+
**kwargs
|
120 |
+
):
|
121 |
+
self.seqs_a = hf_dataset[col_a]
|
122 |
+
self.seqs_b = hf_dataset[col_b]
|
123 |
+
self.labels = hf_dataset[label_col]
|
124 |
+
self.input_dim = input_dim // 2 if not full else input_dim # already scaled if ppi
|
125 |
+
self.task_type = task_type
|
126 |
+
self.full = full
|
127 |
+
|
128 |
+
# Combine seqs_a and seqs_b to find all unique sequences needed
|
129 |
+
needed_seqs = set(hf_dataset[col_a] + hf_dataset[col_b])
|
130 |
+
# Filter emb_dict to keep only the necessary embeddings
|
131 |
+
self.emb_dict = {seq: emb_dict[seq] for seq in needed_seqs if seq in emb_dict}
|
132 |
+
# Check for any missing embeddings
|
133 |
+
missing_seqs = needed_seqs - self.emb_dict.keys()
|
134 |
+
if missing_seqs:
|
135 |
+
raise ValueError(f"Embeddings not found for sequences: {missing_seqs}")
|
136 |
+
|
137 |
+
def __len__(self):
|
138 |
+
return len(self.labels)
|
139 |
+
|
140 |
+
def __getitem__(self, idx):
|
141 |
+
seq_a, seq_b = self.seqs_a[idx], self.seqs_b[idx]
|
142 |
+
emb_a = self.emb_dict.get(seq_a).reshape(-1, self.input_dim)
|
143 |
+
emb_b = self.emb_dict.get(seq_b).reshape(-1, self.input_dim)
|
144 |
+
|
145 |
+
# 50% chance to switch the order of a and b
|
146 |
+
if random.random() < 0.5:
|
147 |
+
emb_a, emb_b = emb_b, emb_a
|
148 |
+
|
149 |
+
# Prepare the label
|
150 |
+
if self.task_type in ['multilabel', 'regression']:
|
151 |
+
label = torch.tensor(self.labels[idx], dtype=torch.float)
|
152 |
+
else:
|
153 |
+
label = torch.tensor(self.labels[idx], dtype=torch.long)
|
154 |
+
|
155 |
+
return emb_a, emb_b, label
|
156 |
+
|
157 |
+
|
158 |
+
class EmbedsLabelsDatasetFromDisk(TorchDataset):
|
159 |
+
def __init__(
|
160 |
+
self,
|
161 |
+
hf_dataset,
|
162 |
+
col_name='seqs',
|
163 |
+
label_col='labels',
|
164 |
+
full=False,
|
165 |
+
db_path='embeddings.db',
|
166 |
+
batch_size=64,
|
167 |
+
read_scaler=1000,
|
168 |
+
input_dim=768,
|
169 |
+
task_type='singlelabel',
|
170 |
+
**kwargs
|
171 |
+
):
|
172 |
+
self.seqs, self.labels = hf_dataset[col_name], hf_dataset[label_col]
|
173 |
+
self.length = len(self.labels)
|
174 |
+
self.max_length = len(max(self.seqs, key=len))
|
175 |
+
print_message(f'Max length: {self.max_length}')
|
176 |
+
|
177 |
+
self.db_file = db_path
|
178 |
+
self.batch_size = batch_size
|
179 |
+
self.input_dim = input_dim
|
180 |
+
self.full = full
|
181 |
+
|
182 |
+
self.task_type = task_type
|
183 |
+
self.read_amt = read_scaler * self.batch_size
|
184 |
+
self.embeddings, self.current_labels = [], []
|
185 |
+
self.count, self.index = 0, 0
|
186 |
+
|
187 |
+
self.reset_epoch()
|
188 |
+
|
189 |
+
def __len__(self):
|
190 |
+
return self.length
|
191 |
+
|
192 |
+
def check_seqs(self, all_seqs):
|
193 |
+
cond = False
|
194 |
+
for seq in self.seqs:
|
195 |
+
if seq not in all_seqs:
|
196 |
+
cond = True
|
197 |
+
if cond:
|
198 |
+
break
|
199 |
+
if cond:
|
200 |
+
print_message('Sequences not found in embeddings')
|
201 |
+
else:
|
202 |
+
print_message('All sequences in embeddings')
|
203 |
+
|
204 |
+
def reset_epoch(self):
|
205 |
+
data = list(zip(self.seqs, self.labels))
|
206 |
+
random.shuffle(data)
|
207 |
+
self.seqs, self.labels = zip(*data)
|
208 |
+
self.seqs, self.labels = list(self.seqs), list(self.labels)
|
209 |
+
self.embeddings, self.current_labels = [], []
|
210 |
+
self.count, self.index = 0, 0
|
211 |
+
|
212 |
+
def read_embeddings(self):
|
213 |
+
embeddings, labels = [], []
|
214 |
+
self.count += self.read_amt
|
215 |
+
if self.count >= self.length:
|
216 |
+
self.reset_epoch()
|
217 |
+
conn = sqlite3.connect(self.db_file)
|
218 |
+
c = conn.cursor()
|
219 |
+
for i in range(self.count, self.count + self.read_amt):
|
220 |
+
if i >= self.length:
|
221 |
+
break
|
222 |
+
result = c.execute("SELECT embedding FROM embeddings WHERE sequence=?", (self.seqs[i],))
|
223 |
+
row = result.fetchone()
|
224 |
+
emb_data = row[0]
|
225 |
+
emb = torch.tensor(np.frombuffer(emb_data, dtype=np.float32).reshape(-1, self.input_dim))
|
226 |
+
if self.full:
|
227 |
+
padding_needed = self.max_length - emb.size(0)
|
228 |
+
emb = F.pad(emb, (0, 0, 0, padding_needed), value=0)
|
229 |
+
embeddings.append(emb)
|
230 |
+
labels.append(self.labels[i])
|
231 |
+
conn.close()
|
232 |
+
self.index = 0
|
233 |
+
self.embeddings = embeddings
|
234 |
+
self.current_labels = labels
|
235 |
+
|
236 |
+
def __getitem__(self, idx):
|
237 |
+
if self.index >= len(self.current_labels) or len(self.current_labels) == 0:
|
238 |
+
self.read_embeddings()
|
239 |
+
|
240 |
+
emb = self.embeddings[self.index]
|
241 |
+
label = self.current_labels[self.index]
|
242 |
+
|
243 |
+
self.index += 1
|
244 |
+
|
245 |
+
if self.task_type == 'multilabel' or self.task_type == 'regression':
|
246 |
+
label = torch.tensor(label, dtype=torch.float)
|
247 |
+
else:
|
248 |
+
label = torch.tensor(label, dtype=torch.long)
|
249 |
+
|
250 |
+
return emb.squeeze(0), label
|
251 |
+
|
252 |
+
|
253 |
+
class EmbedsLabelsDataset(TorchDataset):
|
254 |
+
def __init__(self, hf_dataset, emb_dict, col_name='seqs', label_col='labels', task_type='singlelabel', full=False, **kwargs):
|
255 |
+
self.embeddings = self.get_embs(emb_dict, hf_dataset[col_name])
|
256 |
+
self.full = full
|
257 |
+
self.labels = hf_dataset[label_col]
|
258 |
+
self.task_type = task_type
|
259 |
+
self.max_length = len(max(hf_dataset[col_name], key=len))
|
260 |
+
print_message(f'Max length: {self.max_length}')
|
261 |
+
|
262 |
+
def __len__(self):
|
263 |
+
return len(self.labels)
|
264 |
+
|
265 |
+
def get_embs(self, emb_dict, seqs):
|
266 |
+
embeddings = []
|
267 |
+
for seq in tqdm(seqs, desc='Loading Embeddings'):
|
268 |
+
emb = emb_dict[seq]
|
269 |
+
embeddings.append(emb)
|
270 |
+
return embeddings
|
271 |
+
|
272 |
+
def __getitem__(self, idx):
|
273 |
+
if self.task_type == 'multilabel' or self.task_type == 'regression':
|
274 |
+
label = torch.tensor(self.labels[idx], dtype=torch.float)
|
275 |
+
else:
|
276 |
+
label = torch.tensor(self.labels[idx], dtype=torch.long)
|
277 |
+
emb = self.embeddings[idx].float()
|
278 |
+
if self.full:
|
279 |
+
padding_needed = self.max_length - emb.size(0)
|
280 |
+
emb = F.pad(emb, (0, 0, 0, padding_needed), value=0)
|
281 |
+
return emb.squeeze(0), label
|
282 |
+
|
283 |
+
|
284 |
+
class StringLabelDataset(TorchDataset):
|
285 |
+
def __init__(self, hf_dataset, col_name='seqs', label_col='labels', **kwargs):
|
286 |
+
self.seqs = hf_dataset[col_name]
|
287 |
+
self.labels = hf_dataset[label_col]
|
288 |
+
self.lengths = [len(seq) for seq in self.seqs]
|
289 |
+
|
290 |
+
def avg(self):
|
291 |
+
return sum(self.lengths) / len(self.lengths)
|
292 |
+
|
293 |
+
def __len__(self):
|
294 |
+
return len(self.seqs)
|
295 |
+
|
296 |
+
def __getitem__(self, idx):
|
297 |
+
seq = self.seqs[idx]
|
298 |
+
label = self.labels[idx]
|
299 |
+
return seq, label
|
300 |
+
|
301 |
+
|
302 |
+
class PairStringLabelDataset(TorchDataset):
|
303 |
+
def __init__(self, hf_dataset, col_a='SeqA', col_b='SeqB', label_col='labels', train=True, **kwargs):
|
304 |
+
self.seqs_a, self.seqs_b = hf_dataset[col_a], hf_dataset[col_b]
|
305 |
+
self.labels = hf_dataset[label_col]
|
306 |
+
self.train = train
|
307 |
+
|
308 |
+
def avg(self):
|
309 |
+
return sum(len(seqa) + len(seqb) for seqa, seqb in zip(self.seqs_a, self.seqs_b)) / len(self.seqs_a)
|
310 |
+
|
311 |
+
def __len__(self):
|
312 |
+
return len(self.seqs_a)
|
313 |
+
|
314 |
+
def __getitem__(self, idx):
|
315 |
+
seq_a, seq_b = self.seqs_a[idx], self.seqs_b[idx]
|
316 |
+
if self.train and random.random() < 0.5:
|
317 |
+
seq_a, seq_b = seq_b, seq_a
|
318 |
+
return seq_a, seq_b, self.labels[idx]
|
319 |
+
|
data/src/protify/data/dataset_utils.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .supported_datasets import (
|
2 |
+
supported_datasets,
|
3 |
+
internal_datasets,
|
4 |
+
possible_with_vector_reps,
|
5 |
+
standard_data_benchmark,
|
6 |
+
testing,
|
7 |
+
residue_wise_problems
|
8 |
+
)
|
9 |
+
|
10 |
+
def list_supported_datasets(with_descriptions=True):
|
11 |
+
"""
|
12 |
+
Lists all supported datasets with optional descriptions.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
with_descriptions (bool): Whether to include descriptions (if available)
|
16 |
+
"""
|
17 |
+
try:
|
18 |
+
from .dataset_descriptions import dataset_descriptions
|
19 |
+
has_descriptions = True
|
20 |
+
except ImportError:
|
21 |
+
has_descriptions = False
|
22 |
+
|
23 |
+
if not with_descriptions or not has_descriptions:
|
24 |
+
print("\n=== Supported Datasets ===\n")
|
25 |
+
for dataset_name in supported_datasets:
|
26 |
+
print(f"- {dataset_name}: {supported_datasets[dataset_name]}")
|
27 |
+
return
|
28 |
+
|
29 |
+
print("\n=== Supported Datasets ===\n")
|
30 |
+
|
31 |
+
# Calculate maximum widths for formatting
|
32 |
+
max_name_len = max(len(name) for name in supported_datasets)
|
33 |
+
max_type_len = max(len(dataset_descriptions.get(name, {}).get('type', 'Unknown')) for name in supported_datasets if name in dataset_descriptions)
|
34 |
+
max_task_len = max(len(dataset_descriptions.get(name, {}).get('task', 'Unknown')) for name in supported_datasets if name in dataset_descriptions)
|
35 |
+
|
36 |
+
# Print header
|
37 |
+
print(f"{'Dataset':<{max_name_len+2}}{'Type':<{max_type_len+2}}{'Task':<{max_task_len+2}}Description")
|
38 |
+
print("-" * (max_name_len + max_type_len + max_task_len + 50))
|
39 |
+
|
40 |
+
# Print dataset information
|
41 |
+
for dataset_name in supported_datasets:
|
42 |
+
if dataset_name in dataset_descriptions:
|
43 |
+
dataset_info = dataset_descriptions[dataset_name]
|
44 |
+
print(f"{dataset_name:<{max_name_len+2}}{dataset_info.get('type', 'Unknown'):<{max_type_len+2}}{dataset_info.get('task', 'Unknown'):<{max_task_len+2}}{dataset_info.get('description', 'No description available')}")
|
45 |
+
else:
|
46 |
+
print(f"{dataset_name:<{max_name_len+2}}{'Unknown':<{max_type_len+2}}{'Unknown':<{max_task_len+2}}No description available")
|
47 |
+
|
48 |
+
print("\n=== Standard Benchmark Datasets ===\n")
|
49 |
+
for dataset_name in standard_data_benchmark:
|
50 |
+
print(f"- {dataset_name}")
|
51 |
+
|
52 |
+
print("\n=== Residue-wise Datasets ===\n")
|
53 |
+
for dataset_name in residue_wise_problems:
|
54 |
+
print(f"- {dataset_name}")
|
55 |
+
|
56 |
+
def get_dataset_info(dataset_name):
|
57 |
+
"""
|
58 |
+
Get detailed information about a specific dataset.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
dataset_name (str): Name of the dataset
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
dict: Dataset information or None if not found
|
65 |
+
"""
|
66 |
+
try:
|
67 |
+
from .dataset_descriptions import dataset_descriptions
|
68 |
+
if dataset_name in dataset_descriptions:
|
69 |
+
return dataset_descriptions[dataset_name]
|
70 |
+
except ImportError:
|
71 |
+
pass
|
72 |
+
|
73 |
+
if dataset_name in supported_datasets:
|
74 |
+
return {"name": dataset_name, "source": supported_datasets[dataset_name]}
|
75 |
+
|
76 |
+
return None
|
77 |
+
|
78 |
+
if __name__ == "__main__":
|
79 |
+
import sys
|
80 |
+
import argparse
|
81 |
+
|
82 |
+
parser = argparse.ArgumentParser(description='List and describe supported datasets')
|
83 |
+
parser.add_argument('--list', action='store_true', help='List all supported datasets')
|
84 |
+
parser.add_argument('--info', type=str, help='Get information about a specific dataset')
|
85 |
+
args = parser.parse_args()
|
86 |
+
|
87 |
+
if len(sys.argv) == 1:
|
88 |
+
parser.print_help()
|
89 |
+
sys.exit(1)
|
90 |
+
|
91 |
+
if args.list:
|
92 |
+
list_supported_datasets()
|
93 |
+
|
94 |
+
if args.info:
|
95 |
+
dataset_info = get_dataset_info(args.info)
|
96 |
+
if dataset_info:
|
97 |
+
print(f"\n=== Dataset: {args.info} ===\n")
|
98 |
+
for key, value in dataset_info.items():
|
99 |
+
print(f"{key.capitalize()}: {value}")
|
100 |
+
else:
|
101 |
+
print(f"Dataset '{args.info}' not found in supported datasets.")
|
data/src/protify/data/supported_datasets.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
supported_datasets = {
|
3 |
+
'EC': 'GleghornLab/EC_reg',
|
4 |
+
'GO-CC': 'GleghornLab/CC_reg',
|
5 |
+
'GO-BP': 'GleghornLab/BP_reg',
|
6 |
+
'GO-MF': 'GleghornLab/MF_reg',
|
7 |
+
'MB': 'GleghornLab/MB_reg',
|
8 |
+
'DeepLoc-2': 'GleghornLab/DL2_reg',
|
9 |
+
'DeepLoc-10': 'GleghornLab/DL10_reg',
|
10 |
+
'enzyme-kcat': 'GleghornLab/enzyme_kcat',
|
11 |
+
'solubility': 'GleghornLab/solubility_prediction',
|
12 |
+
'localization': 'GleghornLab/localization_prediction',
|
13 |
+
'temperature-stability': 'GleghornLab/temperature_stability',
|
14 |
+
'peptide-HLA-MHC-affinity': 'GleghornLab/peptide_HLA_MHC_affinity_ppi',
|
15 |
+
'optimal-temperature': 'GleghornLab/optimal_temperature',
|
16 |
+
'optimal-ph': 'GleghornLab/optimal_ph',
|
17 |
+
'material-production': 'GleghornLab/material_production',
|
18 |
+
'fitness-prediction': 'GleghornLab/fitness_prediction',
|
19 |
+
'number-of-folds': 'GleghornLab/fold_prediction',
|
20 |
+
'cloning-clf': 'GleghornLab/cloning_clf',
|
21 |
+
'stability-prediction': 'GleghornLab/stability_prediction',
|
22 |
+
'human-ppi': 'GleghornLab/HPPI',
|
23 |
+
'SecondaryStructure-3': 'GleghornLab/SS3',
|
24 |
+
'SecondaryStructure-8': 'GleghornLab/SS8',
|
25 |
+
'fluorescence-prediction': 'GleghornLab/fluorescence_prediction',
|
26 |
+
'plastic': 'GleghornLab/plastic_degradation_benchmark',
|
27 |
+
'gold-ppi': 'Synthyra/bernett_gold_ppi',
|
28 |
+
'human-ppi-pinui': 'GleghornLab/HPPI_PiNUI',
|
29 |
+
'yeast-ppi-pinui': 'GleghornLab/YPPI_PiNUI',
|
30 |
+
'shs27-ppi': 'Synthyra/SHS27k',
|
31 |
+
'shs148-ppi': 'Synthyra/SHS148k',
|
32 |
+
'PPA-ppi': 'Synthyra/ProteinProteinAffinity',
|
33 |
+
'synthyra-ppi': 'Synthyra/ppi_set_v5',
|
34 |
+
}
|
35 |
+
|
36 |
+
internal_datasets = {
|
37 |
+
'synthyra-ppi': 'Synthyra/ppi_set_v5',
|
38 |
+
'plastic': 'GleghornLab/plastic_degradation_benchmark',
|
39 |
+
}
|
40 |
+
|
41 |
+
possible_with_vector_reps = [
|
42 |
+
# multi-label
|
43 |
+
'EC',
|
44 |
+
'GO-CC',
|
45 |
+
'GO-BP',
|
46 |
+
'GO-MF',
|
47 |
+
# classification
|
48 |
+
'MB',
|
49 |
+
'DeepLoc-2',
|
50 |
+
'DeepLoc-10',
|
51 |
+
'solubility',
|
52 |
+
'localization',
|
53 |
+
'temperature-stability',
|
54 |
+
'material-production',
|
55 |
+
'fitness-prediction',
|
56 |
+
'number-of-folds',
|
57 |
+
'cloning-clf',
|
58 |
+
'stability-prediction',
|
59 |
+
# regression
|
60 |
+
'enzyme-kcat',
|
61 |
+
'optimal-temperature',
|
62 |
+
'optimal-ph',
|
63 |
+
# ppi
|
64 |
+
'human-ppi',
|
65 |
+
'PPA-ppi',
|
66 |
+
'human-ppi-pinui',
|
67 |
+
'yeast-ppi-pinui',
|
68 |
+
'gold-ppi',
|
69 |
+
'peptide-HLA-MHC-affinity',
|
70 |
+
]
|
71 |
+
|
72 |
+
standard_data_benchmark = [
|
73 |
+
'EC',
|
74 |
+
'GO-CC',
|
75 |
+
'GO-BP',
|
76 |
+
'GO-MF',
|
77 |
+
'MB',
|
78 |
+
'DeepLoc-2',
|
79 |
+
'DeepLoc-10',
|
80 |
+
'enzyme-kcat',
|
81 |
+
'optimal-temperature',
|
82 |
+
'optimal-ph',
|
83 |
+
'fitness-prediction',
|
84 |
+
]
|
85 |
+
|
86 |
+
testing = [
|
87 |
+
'EC', # multilabel
|
88 |
+
'DeepLoc-2', #
|
89 |
+
'DeepLoc-10', # multiclass
|
90 |
+
'enzyme-kcat', # regression
|
91 |
+
'human-ppi', # ppi
|
92 |
+
]
|
93 |
+
|
94 |
+
|
95 |
+
residue_wise_problems = [
|
96 |
+
'SecondaryStructure-3',
|
97 |
+
'SecondaryStructure-8',
|
98 |
+
'fluorescence-prediction',
|
99 |
+
]
|
data/src/protify/data/utils.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from typing import Optional, Tuple
|
3 |
+
|
4 |
+
|
5 |
+
def pad_and_concatenate_dimer(
|
6 |
+
A: torch.Tensor,
|
7 |
+
B: torch.Tensor,
|
8 |
+
a_mask: Optional[torch.Tensor] = None,
|
9 |
+
b_mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
|
10 |
+
"""
|
11 |
+
Given two sequences A and B with masks, pad (if needed) and concatenate them.
|
12 |
+
"""
|
13 |
+
batch_size, L, d = A.size()
|
14 |
+
if a_mask is None:
|
15 |
+
a_mask = torch.ones(batch_size, L, device=A.device)
|
16 |
+
if b_mask is None:
|
17 |
+
b_mask = torch.ones(batch_size, L, device=A.device)
|
18 |
+
# Compute the maximum (valid) length in the batch.
|
19 |
+
max_len = max(
|
20 |
+
int(a_mask[i].sum().item() + b_mask[i].sum().item())
|
21 |
+
for i in range(batch_size)
|
22 |
+
)
|
23 |
+
combined = torch.zeros(batch_size, max_len, d, device=A.device)
|
24 |
+
combined_mask = torch.zeros(batch_size, max_len, device=A.device)
|
25 |
+
for i in range(batch_size):
|
26 |
+
a_len = int(a_mask[i].sum().item())
|
27 |
+
b_len = int(b_mask[i].sum().item())
|
28 |
+
combined[i, :a_len] = A[i, :a_len]
|
29 |
+
combined[i, a_len:a_len+b_len] = B[i, :b_len]
|
30 |
+
combined_mask[i, :a_len+b_len] = 1
|
31 |
+
return combined, combined_mask
|
data/src/protify/embedder.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import warnings
|
4 |
+
import sqlite3
|
5 |
+
import gzip
|
6 |
+
from torch.utils.data import Dataset, DataLoader
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
from dataclasses import dataclass
|
9 |
+
from typing import Optional, Callable, List, Tuple
|
10 |
+
from huggingface_hub import hf_hub_download
|
11 |
+
from base_models.get_base_models import get_base_model
|
12 |
+
from utils import torch_load, print_message
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class EmbeddingArguments:
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
embedding_batch_size: int = 4,
|
20 |
+
embedding_num_workers: int = 0,
|
21 |
+
download_embeddings: bool = False,
|
22 |
+
download_dir: str = 'Synthyra/mean_pooled_embeddings',
|
23 |
+
matrix_embed: bool = False,
|
24 |
+
embedding_pooling_types: List[str] = ['mean'],
|
25 |
+
save_embeddings: bool = False,
|
26 |
+
embed_dtype: torch.dtype = torch.float32,
|
27 |
+
sql: bool = False,
|
28 |
+
embedding_save_dir: str = 'embeddings',
|
29 |
+
**kwargs
|
30 |
+
):
|
31 |
+
self.batch_size = embedding_batch_size
|
32 |
+
self.num_workers = embedding_num_workers
|
33 |
+
self.download_embeddings = download_embeddings
|
34 |
+
self.download_dir = download_dir
|
35 |
+
self.matrix_embed = matrix_embed
|
36 |
+
self.pooling_types = embedding_pooling_types
|
37 |
+
self.save_embeddings = save_embeddings
|
38 |
+
self.embed_dtype = embed_dtype
|
39 |
+
self.sql = sql
|
40 |
+
self.embedding_save_dir = embedding_save_dir
|
41 |
+
|
42 |
+
|
43 |
+
class Pooler:
|
44 |
+
def __init__(self, pooling_types: List[str]):
|
45 |
+
self.pooling_types = pooling_types
|
46 |
+
self.pooling_options = {
|
47 |
+
'mean': self.mean_pooling,
|
48 |
+
'max': self.max_pooling,
|
49 |
+
'norm': self.norm_pooling,
|
50 |
+
'median': self.median_pooling,
|
51 |
+
'std': self.std_pooling,
|
52 |
+
'var': self.var_pooling,
|
53 |
+
'cls': self.cls_pooling,
|
54 |
+
}
|
55 |
+
|
56 |
+
def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
57 |
+
if attention_mask is None:
|
58 |
+
return emb.mean(dim=1)
|
59 |
+
else:
|
60 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
61 |
+
return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
|
62 |
+
|
63 |
+
def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
64 |
+
if attention_mask is None:
|
65 |
+
return emb.max(dim=1).values
|
66 |
+
else:
|
67 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
68 |
+
return (emb * attention_mask).max(dim=1).values
|
69 |
+
|
70 |
+
def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
71 |
+
if attention_mask is None:
|
72 |
+
return emb.norm(dim=1, p=2)
|
73 |
+
else:
|
74 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
75 |
+
return (emb * attention_mask).norm(dim=1, p=2)
|
76 |
+
|
77 |
+
def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
78 |
+
if attention_mask is None:
|
79 |
+
return emb.median(dim=1).values
|
80 |
+
else:
|
81 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
82 |
+
return (emb * attention_mask).median(dim=1).values
|
83 |
+
|
84 |
+
def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
85 |
+
if attention_mask is None:
|
86 |
+
return emb.std(dim=1)
|
87 |
+
else:
|
88 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
89 |
+
return (emb * attention_mask).std(dim=1)
|
90 |
+
|
91 |
+
def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
92 |
+
if attention_mask is None:
|
93 |
+
return emb.var(dim=1)
|
94 |
+
else:
|
95 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
96 |
+
return (emb * attention_mask).var(dim=1)
|
97 |
+
|
98 |
+
def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
|
99 |
+
return emb[:, 0, :]
|
100 |
+
|
101 |
+
def __call__(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # [mean, max]
|
102 |
+
final_emb = []
|
103 |
+
for pooling_type in self.pooling_types:
|
104 |
+
final_emb.append(self.pooling_options[pooling_type](emb, attention_mask)) # (b, d)
|
105 |
+
return torch.cat(final_emb, dim=-1) # (b, n_pooling_types * d)
|
106 |
+
|
107 |
+
|
108 |
+
def pool_parti(X: torch.Tensor, attentions: Tuple[torch.Tensor], attention_mask: torch.Tensor) -> torch.Tensor:
|
109 |
+
# X: (bs, seq_len, d)
|
110 |
+
# attentions: num_layres of (bs, n_heads, seq_len, seq_len)
|
111 |
+
# attention_mask: (bs, seq_len)
|
112 |
+
bs, seq_len, _ = X.shape
|
113 |
+
attentions = torch.stack(attentions, dim=1).float() # (bs, n_layers, n_heads, seq_len, seq_len)
|
114 |
+
att_mask = attention_mask[:, None, None, None, :].expand(bs, 1, 1, seq_len, seq_len)
|
115 |
+
attentions = attentions * att_mask
|
116 |
+
attentions = attentions.mean(dim=2) # (bs, n_layers, seq_len, seq_len)
|
117 |
+
attentions = attentions.mean(dim=1) # (bs, seq_len, seq_len)
|
118 |
+
attentions = attentions.mean(dim=-1) # (bs, seq_len)
|
119 |
+
X = X * attentions.unsqueeze(-1)
|
120 |
+
attention_mask = attention_mask.unsqueeze(-1)
|
121 |
+
return (X * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # (bs, d)
|
122 |
+
|
123 |
+
|
124 |
+
### Dataset for Embedding
|
125 |
+
class ProteinDataset(Dataset):
|
126 |
+
"""Simple dataset for protein sequences."""
|
127 |
+
def __init__(self, sequences: List[str]):
|
128 |
+
self.sequences = sequences
|
129 |
+
|
130 |
+
def __len__(self) -> int:
|
131 |
+
return len(self.sequences)
|
132 |
+
|
133 |
+
def __getitem__(self, idx: int) -> str:
|
134 |
+
return self.sequences[idx]
|
135 |
+
|
136 |
+
|
137 |
+
def build_collator(tokenizer) -> Callable[[List[str]], tuple[torch.Tensor, torch.Tensor]]:
|
138 |
+
def _collate_fn(sequences: List[str]) -> tuple[torch.Tensor, torch.Tensor]:
|
139 |
+
"""Collate function for batching sequences."""
|
140 |
+
return tokenizer(sequences, return_tensors="pt", padding='longest', pad_to_multiple_of=8)
|
141 |
+
return _collate_fn
|
142 |
+
|
143 |
+
|
144 |
+
class Embedder:
|
145 |
+
def __init__(self, args: EmbeddingArguments, all_seqs: List[str]):
|
146 |
+
self.args = args
|
147 |
+
self.all_seqs = all_seqs
|
148 |
+
self.batch_size = args.batch_size
|
149 |
+
self.num_workers = args.num_workers
|
150 |
+
self.matrix_embed = args.matrix_embed
|
151 |
+
self.pooling_types = args.pooling_types
|
152 |
+
self.download_embeddings = args.download_embeddings
|
153 |
+
self.download_dir = args.download_dir
|
154 |
+
self.save_embeddings = args.save_embeddings
|
155 |
+
self.embed_dtype = args.embed_dtype
|
156 |
+
self.sql = args.sql
|
157 |
+
self.embedding_save_dir = args.embedding_save_dir
|
158 |
+
|
159 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
160 |
+
print_message(f'Device {self.device} found')
|
161 |
+
|
162 |
+
def _download_embeddings(self, model_name: str):
|
163 |
+
# download from download_dir
|
164 |
+
# unzip
|
165 |
+
# move to embedding_save_dir
|
166 |
+
try:
|
167 |
+
local_path = hf_hub_download(
|
168 |
+
repo_id=self.download_dir,
|
169 |
+
filename=f'embeddings/{model_name}_{self.matrix_embed}.pth.gz',
|
170 |
+
repo_type='dataset'
|
171 |
+
)
|
172 |
+
except:
|
173 |
+
print(f'No embeddings found for {model_name} in {self.download_dir}')
|
174 |
+
return
|
175 |
+
|
176 |
+
# unzip
|
177 |
+
print_message(f'Unzipping {local_path}')
|
178 |
+
with gzip.open(local_path, 'rb') as f_in:
|
179 |
+
with open(local_path.replace('.gz', ''), 'wb') as f_out:
|
180 |
+
f_out.write(f_in.read())
|
181 |
+
# move to embedding_save_dir
|
182 |
+
unzipped_path = local_path.replace('.gz', '')
|
183 |
+
final_path = os.path.join(self.embedding_save_dir, f'{model_name}_{self.matrix_embed}.pth')
|
184 |
+
|
185 |
+
if os.path.exists(final_path):
|
186 |
+
print_message(f'Found existing embeddings in {final_path}')
|
187 |
+
# Load downloaded embeddings
|
188 |
+
downloaded_embeddings = torch_load(unzipped_path)
|
189 |
+
existing_embeddings = torch_load(final_path)
|
190 |
+
|
191 |
+
download_dtype = torch.float16
|
192 |
+
if self.embed_dtype != download_dtype:
|
193 |
+
print_message(f"Warning:\nDownloaded embeddings are {download_dtype} but the current setting is {self.embed_dtype}\nWhen combining with existing embeddings, this could result in unintended biases or reductions in performance")
|
194 |
+
|
195 |
+
# Combine with existing embeddings
|
196 |
+
print_message('Combining and casting')
|
197 |
+
downloaded_embeddings.update(existing_embeddings)
|
198 |
+
|
199 |
+
# Cast all embeddings to the correct dtype
|
200 |
+
for seq in downloaded_embeddings:
|
201 |
+
downloaded_embeddings[seq] = downloaded_embeddings[seq].to(self.embed_dtype)
|
202 |
+
|
203 |
+
# Save the combined embeddings
|
204 |
+
print_message(f'Saving combined embeddings to {final_path}')
|
205 |
+
torch.save(downloaded_embeddings, final_path)
|
206 |
+
else:
|
207 |
+
print_message(f'Downloading embeddings from {self.download_dir}, no previous embeddings found')
|
208 |
+
downloaded_embeddings = torch.load(unzipped_path)
|
209 |
+
torch.save(downloaded_embeddings, final_path)
|
210 |
+
return final_path
|
211 |
+
|
212 |
+
def _read_sequences_from_db(self, db_path: str) -> set[str]:
|
213 |
+
"""Read sequences from SQLite database."""
|
214 |
+
import sqlite3
|
215 |
+
sequences = []
|
216 |
+
with sqlite3.connect(db_path) as conn:
|
217 |
+
c = conn.cursor()
|
218 |
+
c.execute("SELECT sequence FROM embeddings")
|
219 |
+
while True:
|
220 |
+
row = c.fetchone()
|
221 |
+
if row is None:
|
222 |
+
break
|
223 |
+
sequences.append(row[0])
|
224 |
+
return set(sequences)
|
225 |
+
|
226 |
+
def _read_embeddings_from_disk(self, model_name: str):
|
227 |
+
if self.sql:
|
228 |
+
save_path = os.path.join(self.embedding_save_dir, f'{model_name}_{self.matrix_embed}.db')
|
229 |
+
if os.path.exists(save_path):
|
230 |
+
conn = sqlite3.connect(save_path)
|
231 |
+
c = conn.cursor()
|
232 |
+
c.execute('CREATE TABLE IF NOT EXISTS embeddings (sequence text PRIMARY KEY, embedding blob)')
|
233 |
+
already_embedded = self._read_sequences_from_db(save_path)
|
234 |
+
to_embed = [seq for seq in self.all_seqs if seq not in already_embedded]
|
235 |
+
print_message(f"Loaded {len(already_embedded)} already embedded sequences from {save_path}\nEmbedding {len(to_embed)} new sequences")
|
236 |
+
return to_embed, save_path, {}
|
237 |
+
else:
|
238 |
+
print_message(f"No embeddings found in {save_path}")
|
239 |
+
return self.all_seqs, save_path, {}
|
240 |
+
|
241 |
+
else:
|
242 |
+
embeddings_dict = {}
|
243 |
+
save_path = os.path.join(self.embedding_save_dir, f'{model_name}_{self.matrix_embed}.pth')
|
244 |
+
if os.path.exists(save_path):
|
245 |
+
print_message(f"Loading embeddings from {save_path}")
|
246 |
+
embeddings_dict = torch_load(save_path)
|
247 |
+
print_message(f"Loaded {len(embeddings_dict)} embeddings from {save_path}")
|
248 |
+
# Cast existing embeddings to the specified dtype
|
249 |
+
#for seq in embeddings_dict:
|
250 |
+
# embeddings_dict[seq] = embeddings_dict[seq].to(self.embed_dtype)
|
251 |
+
to_embed = [seq for seq in self.all_seqs if seq not in embeddings_dict]
|
252 |
+
return to_embed, save_path, embeddings_dict
|
253 |
+
else:
|
254 |
+
print_message(f"No embeddings found in {save_path}")
|
255 |
+
return self.all_seqs, save_path, {}
|
256 |
+
|
257 |
+
def _embed_sequences(
|
258 |
+
self,
|
259 |
+
to_embed: List[str],
|
260 |
+
save_path: str,
|
261 |
+
embedding_model: any,
|
262 |
+
tokenizer: any,
|
263 |
+
embeddings_dict: dict[str, torch.Tensor]) -> Optional[dict[str, torch.Tensor]]:
|
264 |
+
os.makedirs(self.embedding_save_dir, exist_ok=True)
|
265 |
+
model = embedding_model.to(self.device).eval()
|
266 |
+
torch.compile(model)
|
267 |
+
device = self.device
|
268 |
+
collate_fn = build_collator(tokenizer)
|
269 |
+
print_message(f'Pooling types: {self.pooling_types}')
|
270 |
+
if self.pooling_types[0] == 'parti':
|
271 |
+
pooler = pool_parti
|
272 |
+
elif not self.matrix_embed:
|
273 |
+
pooler = Pooler(self.pooling_types)
|
274 |
+
else:
|
275 |
+
pooler = None
|
276 |
+
|
277 |
+
def _get_embeddings(residue_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
278 |
+
if residue_embeddings.ndim == 2 or self.matrix_embed: # sometimes already vector emb
|
279 |
+
return residue_embeddings
|
280 |
+
else:
|
281 |
+
return pooler(residue_embeddings, attention_mask)
|
282 |
+
|
283 |
+
dataset = ProteinDataset(to_embed)
|
284 |
+
dataloader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=collate_fn, shuffle=False)
|
285 |
+
|
286 |
+
if self.sql:
|
287 |
+
conn = sqlite3.connect(save_path)
|
288 |
+
c = conn.cursor()
|
289 |
+
c.execute('CREATE TABLE IF NOT EXISTS embeddings (sequence text PRIMARY KEY, embedding blob)')
|
290 |
+
|
291 |
+
with torch.no_grad():
|
292 |
+
for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc='Embedding batches'):
|
293 |
+
seqs = to_embed[i * self.batch_size:(i + 1) * self.batch_size]
|
294 |
+
input_ids, attention_mask = batch['input_ids'].to(device), batch['attention_mask'].to(device)
|
295 |
+
if self.pooling_types[0] == 'parti':
|
296 |
+
try:
|
297 |
+
residue_embeddings, attentions = model(input_ids, attention_mask, output_attentions=True)
|
298 |
+
embeddings = pooler(residue_embeddings, attentions, attention_mask).cpu()
|
299 |
+
except Exception as e:
|
300 |
+
print_message(f"Error in parti pooling: {e}\nDefaulting to mean pooling")
|
301 |
+
self.pooling_types = ['mean']
|
302 |
+
pooler = Pooler(self.pooling_types)
|
303 |
+
residue_embeddings = model(input_ids, attention_mask)
|
304 |
+
embeddings = pooler(residue_embeddings, attention_mask).cpu()
|
305 |
+
else:
|
306 |
+
residue_embeddings = model(input_ids, attention_mask)
|
307 |
+
embeddings = _get_embeddings(residue_embeddings, attention_mask).cpu()
|
308 |
+
|
309 |
+
for seq, emb, mask in zip(seqs, embeddings, attention_mask.cpu()):
|
310 |
+
if self.matrix_embed:
|
311 |
+
emb = emb[mask.bool()]
|
312 |
+
|
313 |
+
if self.sql:
|
314 |
+
c.execute("INSERT OR REPLACE INTO embeddings VALUES (?, ?)",
|
315 |
+
(seq, emb.numpy().tobytes())) # only supports float32
|
316 |
+
else:
|
317 |
+
embeddings_dict[seq] = emb.to(self.embed_dtype)
|
318 |
+
|
319 |
+
if (i + 1) % 100 == 0 and self.sql:
|
320 |
+
conn.commit()
|
321 |
+
|
322 |
+
if self.sql:
|
323 |
+
conn.commit()
|
324 |
+
conn.close()
|
325 |
+
return None
|
326 |
+
|
327 |
+
if self.save_embeddings:
|
328 |
+
print_message(f"Saving embeddings to {save_path}")
|
329 |
+
torch.save(embeddings_dict, save_path)
|
330 |
+
|
331 |
+
return embeddings_dict
|
332 |
+
|
333 |
+
def __call__(self, model_name: str):
|
334 |
+
if self.download_embeddings:
|
335 |
+
self._download_embeddings(model_name)
|
336 |
+
|
337 |
+
if self.device == 'cpu':
|
338 |
+
warnings.warn("Downloading embeddings is recommended for CPU usage - Embedding on CPU will be extremely slow!")
|
339 |
+
to_embed, save_path, embeddings_dict = self._read_embeddings_from_disk(model_name)
|
340 |
+
|
341 |
+
if len(to_embed) > 0:
|
342 |
+
print_message(f"Embedding {len(to_embed)} sequences with {model_name}")
|
343 |
+
model, tokenizer = get_base_model(model_name)
|
344 |
+
return self._embed_sequences(to_embed, save_path, model, tokenizer, embeddings_dict)
|
345 |
+
else:
|
346 |
+
print_message(f"No sequences to embed with {model_name}")
|
347 |
+
return None
|
348 |
+
|
349 |
+
|
350 |
+
if __name__ == '__main__':
|
351 |
+
### Embed all supported datasets with all supported models
|
352 |
+
# py -m embedder
|
353 |
+
import argparse
|
354 |
+
from huggingface_hub import upload_file, login
|
355 |
+
from data.supported_datasets import possible_with_vector_reps
|
356 |
+
from data.data_mixin import DataArguments, DataMixin
|
357 |
+
from base_models.get_base_models import BaseModelArguments, get_base_model
|
358 |
+
|
359 |
+
os.environ['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1' # prevent cache warning on Windows machines
|
360 |
+
|
361 |
+
parser = argparse.ArgumentParser()
|
362 |
+
parser.add_argument('--token', default=None, help='Huggingface token')
|
363 |
+
parser.add_argument('--batch_size', type=int, default=4)
|
364 |
+
parser.add_argument('--num_workers', type=int, default=0)
|
365 |
+
parser.add_argument('--embed_dtype', type=str, default='float16')
|
366 |
+
parser.add_argument('--embedding_save_dir', type=str, default='embeddings')
|
367 |
+
parser.add_argument('--download_dir', type=str, default='Synthyra/mean_pooled_embeddings')
|
368 |
+
args = parser.parse_args()
|
369 |
+
|
370 |
+
if args.token is not None:
|
371 |
+
login(args.token)
|
372 |
+
|
373 |
+
if args.embed_dtype == 'float16':
|
374 |
+
dtype = torch.float16
|
375 |
+
elif args.embed_dtype == 'bfloat16':
|
376 |
+
dtype = torch.bfloat16
|
377 |
+
elif args.embed_dtype == 'float32':
|
378 |
+
dtype = torch.float32
|
379 |
+
else:
|
380 |
+
raise ValueError(f"Invalid embedding dtype: {args.embed_dtype}")
|
381 |
+
|
382 |
+
# Get data
|
383 |
+
data_args = DataArguments(
|
384 |
+
data_names=possible_with_vector_reps,
|
385 |
+
max_length=1024,
|
386 |
+
trim=False
|
387 |
+
)
|
388 |
+
all_seqs = DataMixin(data_args).get_data()[1]
|
389 |
+
|
390 |
+
# Set up embedder
|
391 |
+
embedder_args = EmbeddingArguments(
|
392 |
+
batch_size=args.batch_size,
|
393 |
+
num_workers=args.num_workers,
|
394 |
+
download_embeddings=True,
|
395 |
+
matrix_embed=False,
|
396 |
+
pooling_types=['mean'],
|
397 |
+
save_embeddings=True,
|
398 |
+
embed_dtype=dtype,
|
399 |
+
sql=False,
|
400 |
+
embedding_save_dir='embeddings'
|
401 |
+
)
|
402 |
+
embedder = Embedder(embedder_args, all_seqs)
|
403 |
+
|
404 |
+
# Embed for each model
|
405 |
+
model_args = BaseModelArguments(model_names=['standard'])
|
406 |
+
for model_name in model_args.model_names:
|
407 |
+
_ = embedder(model_name)
|
408 |
+
save_path = os.path.join(args.embedding_save_dir, f'{model_name}_False.pth')
|
409 |
+
|
410 |
+
compressed_path = f"{save_path}.gz"
|
411 |
+
print(f"Compressing {save_path} to {compressed_path}")
|
412 |
+
with open(save_path, 'rb') as f_in:
|
413 |
+
with gzip.open(compressed_path, 'wb') as f_out:
|
414 |
+
f_out.write(f_in.read())
|
415 |
+
upload_path = compressed_path
|
416 |
+
path_in_repo = f'embeddings/{model_name}_False.pth.gz'
|
417 |
+
|
418 |
+
|
419 |
+
upload_file(
|
420 |
+
path_or_fileobj=upload_path,
|
421 |
+
path_in_repo=path_in_repo,
|
422 |
+
repo_id=args.download_dir,
|
423 |
+
repo_type='dataset'
|
424 |
+
)
|
425 |
+
|
426 |
+
print('Done')
|
data/src/protify/github_banner.png
ADDED
![]() |
Git LFS Details
|
data/src/protify/gui.py
ADDED
@@ -0,0 +1,1046 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import tkinter as tk
|
3 |
+
import argparse
|
4 |
+
import queue
|
5 |
+
import traceback
|
6 |
+
import webbrowser
|
7 |
+
import os
|
8 |
+
from types import SimpleNamespace
|
9 |
+
from tkinter import ttk, messagebox, filedialog
|
10 |
+
from base_models.get_base_models import BaseModelArguments, standard_models
|
11 |
+
from data.supported_datasets import supported_datasets, standard_data_benchmark, internal_datasets
|
12 |
+
from embedder import EmbeddingArguments
|
13 |
+
from probes.get_probe import ProbeArguments
|
14 |
+
from probes.trainers import TrainerArguments
|
15 |
+
from main import MainProcess
|
16 |
+
from concurrent.futures import ThreadPoolExecutor
|
17 |
+
from data.data_mixin import DataArguments
|
18 |
+
from probes.scikit_classes import ScikitArguments
|
19 |
+
from utils import print_message, print_done, print_title
|
20 |
+
from visualization.plot_result import create_plots
|
21 |
+
|
22 |
+
|
23 |
+
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
24 |
+
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
25 |
+
|
26 |
+
|
27 |
+
class BackgroundTask:
|
28 |
+
def __init__(self, target, *args, **kwargs):
|
29 |
+
self.target = target
|
30 |
+
self.args = args
|
31 |
+
self.kwargs = kwargs
|
32 |
+
self.result = None
|
33 |
+
self.error = None
|
34 |
+
self._complete = False
|
35 |
+
|
36 |
+
def run(self):
|
37 |
+
try:
|
38 |
+
self.result = self.target(*self.args, **self.kwargs)
|
39 |
+
except Exception as e:
|
40 |
+
self.error = e
|
41 |
+
print_message(f"Error in background task: {str(e)}")
|
42 |
+
traceback.print_exc()
|
43 |
+
finally:
|
44 |
+
self._complete = True
|
45 |
+
|
46 |
+
@property
|
47 |
+
def complete(self):
|
48 |
+
return self._complete
|
49 |
+
|
50 |
+
|
51 |
+
class GUI(MainProcess):
|
52 |
+
def __init__(self, master):
|
53 |
+
super().__init__(argparse.Namespace(), GUI=True) # Initialize MainProcess with empty namespace
|
54 |
+
self.master = master
|
55 |
+
self.master.title("Settings GUI")
|
56 |
+
self.master.geometry("600x800")
|
57 |
+
|
58 |
+
icon = tk.PhotoImage(file="protify_logo.png")
|
59 |
+
# Set the window icon
|
60 |
+
self.master.iconphoto(True, icon)
|
61 |
+
|
62 |
+
# Dictionary to store Tkinter variables for settings
|
63 |
+
self.settings_vars = {}
|
64 |
+
|
65 |
+
# Create the Notebook widget
|
66 |
+
self.notebook = ttk.Notebook(master)
|
67 |
+
self.notebook.pack(fill='both', expand=True)
|
68 |
+
|
69 |
+
# Create frames for each settings tab
|
70 |
+
self.info_tab = ttk.Frame(self.notebook)
|
71 |
+
self.data_tab = ttk.Frame(self.notebook)
|
72 |
+
self.embed_tab = ttk.Frame(self.notebook)
|
73 |
+
self.model_tab = ttk.Frame(self.notebook)
|
74 |
+
self.probe_tab = ttk.Frame(self.notebook)
|
75 |
+
self.trainer_tab = ttk.Frame(self.notebook)
|
76 |
+
self.scikit_tab = ttk.Frame(self.notebook)
|
77 |
+
self.replay_tab = ttk.Frame(self.notebook)
|
78 |
+
self.viz_tab = ttk.Frame(self.notebook)
|
79 |
+
|
80 |
+
# Add tabs to the notebook
|
81 |
+
self.notebook.add(self.info_tab, text="Info")
|
82 |
+
self.notebook.add(self.model_tab, text="Model")
|
83 |
+
self.notebook.add(self.data_tab, text="Data")
|
84 |
+
self.notebook.add(self.embed_tab, text="Embedding")
|
85 |
+
self.notebook.add(self.probe_tab, text="Probe")
|
86 |
+
self.notebook.add(self.trainer_tab, text="Trainer")
|
87 |
+
self.notebook.add(self.scikit_tab, text="Scikit")
|
88 |
+
self.notebook.add(self.replay_tab, text="Replay")
|
89 |
+
self.notebook.add(self.viz_tab, text="Visualization")
|
90 |
+
|
91 |
+
# Build these lines
|
92 |
+
self.task_queue = queue.Queue()
|
93 |
+
self.thread_pool = ThreadPoolExecutor(max_workers=1)
|
94 |
+
self.current_task = None
|
95 |
+
|
96 |
+
# Start the queue checker
|
97 |
+
self.check_task_queue()
|
98 |
+
|
99 |
+
# Build each tab
|
100 |
+
self.build_info_tab()
|
101 |
+
self.build_model_tab()
|
102 |
+
self.build_data_tab()
|
103 |
+
self.build_embed_tab()
|
104 |
+
self.build_probe_tab()
|
105 |
+
self.build_trainer_tab()
|
106 |
+
self.build_scikit_tab()
|
107 |
+
self.build_replay_tab()
|
108 |
+
self.build_viz_tab()
|
109 |
+
|
110 |
+
def check_task_queue(self):
|
111 |
+
"""Periodically check for completed background tasks"""
|
112 |
+
if self.current_task and self.current_task.complete:
|
113 |
+
if self.current_task.error:
|
114 |
+
print_message(f"Task failed: {self.current_task.error}")
|
115 |
+
self.current_task = None
|
116 |
+
|
117 |
+
if not self.current_task and not self.task_queue.empty():
|
118 |
+
self.current_task = self.task_queue.get()
|
119 |
+
self.thread_pool.submit(self.current_task.run)
|
120 |
+
|
121 |
+
# Schedule next check
|
122 |
+
self.master.after(100, self.check_task_queue)
|
123 |
+
|
124 |
+
def run_in_background(self, target, *args, **kwargs):
|
125 |
+
"""Queue a task to run in background"""
|
126 |
+
task = BackgroundTask(target, *args, **kwargs)
|
127 |
+
self.task_queue.put(task)
|
128 |
+
return task
|
129 |
+
|
130 |
+
def _open_url(self, url):
|
131 |
+
"""Open a URL in the default web browser"""
|
132 |
+
webbrowser.open_new_tab(url)
|
133 |
+
|
134 |
+
def build_info_tab(self):
|
135 |
+
# Create a frame for IDs
|
136 |
+
id_frame = ttk.LabelFrame(self.info_tab, text="Identification")
|
137 |
+
id_frame.pack(fill="x", padx=10, pady=5)
|
138 |
+
|
139 |
+
# Huggingface Username
|
140 |
+
ttk.Label(id_frame, text="Huggingface Username:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
141 |
+
self.settings_vars["huggingface_username"] = tk.StringVar(value="Synthyra")
|
142 |
+
entry_huggingface_username = ttk.Entry(id_frame, textvariable=self.settings_vars["huggingface_username"], width=30)
|
143 |
+
entry_huggingface_username.grid(row=0, column=1, padx=10, pady=5)
|
144 |
+
self.add_help_button(id_frame, 0, 2, "Your Hugging Face username for model downloads and uploads.")
|
145 |
+
|
146 |
+
# Huggingface token
|
147 |
+
ttk.Label(id_frame, text="Huggingface Token:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
148 |
+
self.settings_vars["huggingface_token"] = tk.StringVar(value="")
|
149 |
+
entry_huggingface_token = ttk.Entry(id_frame, textvariable=self.settings_vars["huggingface_token"], width=30)
|
150 |
+
entry_huggingface_token.grid(row=1, column=1, padx=10, pady=5)
|
151 |
+
self.add_help_button(id_frame, 1, 2, "Your Hugging Face API token for accessing gated or private models.")
|
152 |
+
|
153 |
+
# Wandb API key
|
154 |
+
ttk.Label(id_frame, text="Wandb API Key:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
155 |
+
self.settings_vars["wandb_api_key"] = tk.StringVar(value="")
|
156 |
+
entry_wandb_api_key = ttk.Entry(id_frame, textvariable=self.settings_vars["wandb_api_key"], width=30)
|
157 |
+
entry_wandb_api_key.grid(row=2, column=1, padx=10, pady=5)
|
158 |
+
self.add_help_button(id_frame, 2, 2, "Your Weights & Biases API key for experiment tracking.")
|
159 |
+
|
160 |
+
# Synthyra API key
|
161 |
+
ttk.Label(id_frame, text="Synthyra API Key:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
162 |
+
self.settings_vars["synthyra_api_key"] = tk.StringVar(value="")
|
163 |
+
entry_synthyra_api_key = ttk.Entry(id_frame, textvariable=self.settings_vars["synthyra_api_key"], width=30)
|
164 |
+
entry_synthyra_api_key.grid(row=3, column=1, padx=10, pady=5)
|
165 |
+
self.add_help_button(id_frame, 3, 2, "Your Synthyra API key for accessing premium features.")
|
166 |
+
|
167 |
+
# Create a frame for paths
|
168 |
+
paths_frame = ttk.LabelFrame(self.info_tab, text="Paths")
|
169 |
+
paths_frame.pack(fill="x", padx=10, pady=5)
|
170 |
+
|
171 |
+
ttk.Label(paths_frame, text='Home Directory:').grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
172 |
+
self.settings_vars["home_dir"] = tk.StringVar(value=os.getcwd())
|
173 |
+
entry_home_dir = ttk.Entry(paths_frame, textvariable=self.settings_vars["home_dir"], width=30)
|
174 |
+
entry_home_dir.grid(row=0, column=1, padx=10, pady=5)
|
175 |
+
self.add_help_button(paths_frame, 0, 2, "Home directory for Protify.")
|
176 |
+
|
177 |
+
# Log directory
|
178 |
+
ttk.Label(paths_frame, text="Log Directory:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
179 |
+
self.settings_vars["log_dir"] = tk.StringVar(value="logs")
|
180 |
+
entry_log_dir = ttk.Entry(paths_frame, textvariable=self.settings_vars["log_dir"], width=30)
|
181 |
+
entry_log_dir.grid(row=1, column=1, padx=10, pady=5)
|
182 |
+
self.add_help_button(paths_frame, 1, 2, "Directory where log files will be stored.")
|
183 |
+
|
184 |
+
# Results directory
|
185 |
+
ttk.Label(paths_frame, text="Results Directory:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
186 |
+
self.settings_vars["results_dir"] = tk.StringVar(value="results")
|
187 |
+
entry_results_dir = ttk.Entry(paths_frame, textvariable=self.settings_vars["results_dir"], width=30)
|
188 |
+
entry_results_dir.grid(row=2, column=1, padx=10, pady=5)
|
189 |
+
self.add_help_button(paths_frame, 2, 2, "Directory where results data will be stored.")
|
190 |
+
|
191 |
+
# Model save directory
|
192 |
+
ttk.Label(paths_frame, text="Model Save Directory:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
193 |
+
self.settings_vars["model_save_dir"] = tk.StringVar(value="weights")
|
194 |
+
entry_model_save = ttk.Entry(paths_frame, textvariable=self.settings_vars["model_save_dir"], width=30)
|
195 |
+
entry_model_save.grid(row=3, column=1, padx=10, pady=5)
|
196 |
+
self.add_help_button(paths_frame, 3, 2, "Directory where trained models will be saved.")
|
197 |
+
|
198 |
+
ttk.Label(paths_frame, text="Plots Directory:").grid(row=4, column=0, padx=10, pady=5, sticky="w")
|
199 |
+
self.settings_vars["plots_dir"] = tk.StringVar(value="plots")
|
200 |
+
entry_plots_dir = ttk.Entry(paths_frame, textvariable=self.settings_vars["plots_dir"], width=30)
|
201 |
+
entry_plots_dir.grid(row=4, column=1, padx=10, pady=5)
|
202 |
+
self.add_help_button(paths_frame, 4, 2, "Directory where plots and visualizations will be saved.")
|
203 |
+
|
204 |
+
# Embedding save directory
|
205 |
+
ttk.Label(paths_frame, text="Embedding Save Directory:").grid(row=5, column=0, padx=10, pady=5, sticky="w")
|
206 |
+
self.settings_vars["embedding_save_dir"] = tk.StringVar(value="embeddings")
|
207 |
+
entry_embed_save = ttk.Entry(paths_frame, textvariable=self.settings_vars["embedding_save_dir"], width=30)
|
208 |
+
entry_embed_save.grid(row=5, column=1, padx=10, pady=5)
|
209 |
+
self.add_help_button(paths_frame, 5, 2, "Directory where computed embeddings will be saved.")
|
210 |
+
|
211 |
+
# Download directory
|
212 |
+
ttk.Label(paths_frame, text="Download Directory:").grid(row=6, column=0, padx=10, pady=5, sticky="w")
|
213 |
+
self.settings_vars["download_dir"] = tk.StringVar(value="Synthyra/mean_pooled_embeddings")
|
214 |
+
entry_download = ttk.Entry(paths_frame, textvariable=self.settings_vars["download_dir"], width=30)
|
215 |
+
entry_download.grid(row=6, column=1, padx=10, pady=5)
|
216 |
+
self.add_help_button(paths_frame, 6, 2, "HuggingFace repository path for downloading pre-computed embeddings.")
|
217 |
+
|
218 |
+
# button to start logging
|
219 |
+
start_logging_button = ttk.Button(self.info_tab, text="Start session", command=self._session_start)
|
220 |
+
start_logging_button.pack(pady=10)
|
221 |
+
|
222 |
+
# Add logo and website link at the bottom of the info tab
|
223 |
+
try:
|
224 |
+
original_logo = tk.PhotoImage(file="synthyra_logo.png")
|
225 |
+
# Make logo even smaller (subsample by factor of 3)
|
226 |
+
logo = original_logo.subsample(3, 3)
|
227 |
+
|
228 |
+
# Create frame to hold logo and button side by side
|
229 |
+
bottom_frame = ttk.Frame(self.info_tab)
|
230 |
+
bottom_frame.pack(pady=(10, 20), fill="x")
|
231 |
+
|
232 |
+
# Place logo on the left side
|
233 |
+
logo_label = ttk.Label(bottom_frame, image=logo, cursor="hand2")
|
234 |
+
logo_label.image = logo # Keep a reference to prevent garbage collection
|
235 |
+
logo_label.pack(side=tk.LEFT, padx=(20, 10))
|
236 |
+
# Bind click event to the logo
|
237 |
+
logo_label.bind("<Button-1>", lambda e: self._open_url("https://synthyra.com"))
|
238 |
+
|
239 |
+
# Add a "Visit Website" button on the right side
|
240 |
+
visit_btn = ttk.Button(
|
241 |
+
bottom_frame,
|
242 |
+
text="Visit Synthyra.com",
|
243 |
+
command=lambda: self._open_url("https://synthyra.com"),
|
244 |
+
style="Link.TButton"
|
245 |
+
)
|
246 |
+
|
247 |
+
# Create a special style for the link button
|
248 |
+
style = ttk.Style()
|
249 |
+
style.configure("Link.TButton", font=("Helvetica", 12), foreground="blue")
|
250 |
+
|
251 |
+
visit_btn.pack(side=tk.LEFT, padx=(10, 20), pady=10)
|
252 |
+
|
253 |
+
except Exception as e:
|
254 |
+
print_message(f"Error setting up logo and link: {str(e)}")
|
255 |
+
|
256 |
+
def build_model_tab(self):
|
257 |
+
ttk.Label(self.model_tab, text="Model Names:").grid(row=0, column=0, padx=10, pady=5, sticky="nw")
|
258 |
+
|
259 |
+
self.model_listbox = tk.Listbox(self.model_tab, selectmode="extended", height=30)
|
260 |
+
for model_name in standard_models:
|
261 |
+
self.model_listbox.insert(tk.END, model_name)
|
262 |
+
self.model_listbox.grid(row=0, column=1, padx=10, pady=5, sticky="nw")
|
263 |
+
self.add_help_button(self.model_tab, 0, 2, "Select the language models to use for embedding. Multiple models can be selected.")
|
264 |
+
|
265 |
+
run_button = ttk.Button(self.model_tab, text="Select Models", command=self._select_models)
|
266 |
+
run_button.grid(row=99, column=0, columnspan=2, pady=(10, 10))
|
267 |
+
|
268 |
+
def build_data_tab(self):
|
269 |
+
# Max length (Spinbox)
|
270 |
+
ttk.Label(self.data_tab, text="Max Sequence Length:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
271 |
+
self.settings_vars["max_length"] = tk.IntVar(value=1024)
|
272 |
+
spin_max_length = ttk.Spinbox(
|
273 |
+
self.data_tab,
|
274 |
+
from_=1,
|
275 |
+
to=32768,
|
276 |
+
textvariable=self.settings_vars["max_length"]
|
277 |
+
)
|
278 |
+
spin_max_length.grid(row=0, column=1, padx=10, pady=5, sticky="w")
|
279 |
+
self.add_help_button(self.data_tab, 0, 2, "Maximum length of sequences (in tokens) to process.")
|
280 |
+
|
281 |
+
# Trim (Checkbox)
|
282 |
+
ttk.Label(self.data_tab, text="Trim Sequences:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
283 |
+
self.settings_vars["trim"] = tk.BooleanVar(value=False)
|
284 |
+
check_trim = ttk.Checkbutton(
|
285 |
+
self.data_tab,
|
286 |
+
variable=self.settings_vars["trim"]
|
287 |
+
)
|
288 |
+
check_trim.grid(row=1, column=1, padx=10, pady=5, sticky="w")
|
289 |
+
self.add_help_button(self.data_tab, 1, 2, "Whether to trim sequences to the specified max length.")
|
290 |
+
|
291 |
+
# Delimiter for data files
|
292 |
+
ttk.Label(self.data_tab, text="Delimiter:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
293 |
+
self.settings_vars["delimiter"] = tk.StringVar(value=",")
|
294 |
+
entry_delimiter = ttk.Entry(self.data_tab, textvariable=self.settings_vars["delimiter"], width=5)
|
295 |
+
entry_delimiter.grid(row=2, column=1, padx=10, pady=5, sticky="w")
|
296 |
+
self.add_help_button(self.data_tab, 2, 2, "Character used to separate columns in CSV data files.")
|
297 |
+
|
298 |
+
# Column names for data files (comma-separated)
|
299 |
+
ttk.Label(self.data_tab, text="Column Names (comma-separated):").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
300 |
+
self.settings_vars["col_names"] = tk.StringVar(value="seqs,labels")
|
301 |
+
entry_col_names = ttk.Entry(self.data_tab, textvariable=self.settings_vars["col_names"], width=20)
|
302 |
+
entry_col_names.grid(row=3, column=1, padx=10, pady=5, sticky="w")
|
303 |
+
self.add_help_button(self.data_tab, 3, 2, "Names of columns in data files, separate with commas.")
|
304 |
+
|
305 |
+
# Label + Listbox for dataset names
|
306 |
+
ttk.Label(self.data_tab, text="Dataset Names:").grid(row=4, column=0, padx=10, pady=5, sticky="nw")
|
307 |
+
self.data_listbox = tk.Listbox(self.data_tab, selectmode="extended", height=25, width=25)
|
308 |
+
for dataset_name in supported_datasets:
|
309 |
+
if dataset_name not in internal_datasets:
|
310 |
+
self.data_listbox.insert(tk.END, dataset_name)
|
311 |
+
self.data_listbox.grid(row=4, column=1, padx=10, pady=5, sticky="nw")
|
312 |
+
self.add_help_button(self.data_tab, 4, 2, "Select datasets to use. Multiple datasets can be selected.")
|
313 |
+
|
314 |
+
run_button = ttk.Button(self.data_tab, text="Get Data", command=self._get_data)
|
315 |
+
run_button.grid(row=99, column=0, columnspan=2, pady=(10, 10))
|
316 |
+
|
317 |
+
def build_embed_tab(self):
|
318 |
+
# batch_size
|
319 |
+
ttk.Label(self.embed_tab, text="Batch Size:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
320 |
+
self.settings_vars["batch_size"] = tk.IntVar(value=4)
|
321 |
+
spin_batch_size = ttk.Spinbox(self.embed_tab, from_=1, to=1024, textvariable=self.settings_vars["batch_size"])
|
322 |
+
spin_batch_size.grid(row=1, column=1, padx=10, pady=5)
|
323 |
+
self.add_help_button(self.embed_tab, 1, 2, "Number of sequences to process at once during embedding.")
|
324 |
+
|
325 |
+
# num_workers
|
326 |
+
ttk.Label(self.embed_tab, text="Num Workers:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
327 |
+
self.settings_vars["num_workers"] = tk.IntVar(value=0)
|
328 |
+
spin_num_workers = ttk.Spinbox(self.embed_tab, from_=0, to=64, textvariable=self.settings_vars["num_workers"])
|
329 |
+
spin_num_workers.grid(row=2, column=1, padx=10, pady=5)
|
330 |
+
self.add_help_button(self.embed_tab, 2, 2, "Number of worker processes for data loading. 0 means main process only.")
|
331 |
+
|
332 |
+
# download_embeddings
|
333 |
+
ttk.Label(self.embed_tab, text="Download Embeddings:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
334 |
+
self.settings_vars["download_embeddings"] = tk.BooleanVar(value=False)
|
335 |
+
check_download = ttk.Checkbutton(self.embed_tab, variable=self.settings_vars["download_embeddings"])
|
336 |
+
check_download.grid(row=3, column=1, padx=10, pady=5, sticky="w")
|
337 |
+
self.add_help_button(self.embed_tab, 3, 2, "Whether to download pre-computed embeddings from HuggingFace instead of computing them.")
|
338 |
+
|
339 |
+
# matrix_embed
|
340 |
+
ttk.Label(self.embed_tab, text="Matrix Embedding:").grid(row=4, column=0, padx=10, pady=5, sticky="w")
|
341 |
+
self.settings_vars["matrix_embed"] = tk.BooleanVar(value=False)
|
342 |
+
check_matrix = ttk.Checkbutton(self.embed_tab, variable=self.settings_vars["matrix_embed"])
|
343 |
+
check_matrix.grid(row=4, column=1, padx=10, pady=5, sticky="w")
|
344 |
+
self.add_help_button(self.embed_tab, 4, 2, "Whether to use matrix embedding (full embedding matrices) instead of pooled embeddings.")
|
345 |
+
|
346 |
+
# pooling_types
|
347 |
+
ttk.Label(self.embed_tab, text="Pooling Types (comma-separated):").grid(row=5, column=0, padx=10, pady=5, sticky="w")
|
348 |
+
self.settings_vars["embedding_pooling_types"] = tk.StringVar(value="mean")
|
349 |
+
entry_pooling = ttk.Entry(self.embed_tab, textvariable=self.settings_vars["embedding_pooling_types"], width=20)
|
350 |
+
entry_pooling.grid(row=5, column=1, padx=10, pady=5)
|
351 |
+
self.add_help_button(self.embed_tab, 5, 2, "Types of pooling to apply to embeddings, separate with commas.")
|
352 |
+
|
353 |
+
ttk.Label(self.embed_tab, text="Options: mean, max, min, norm, prod, median, std, var, cls, parti").grid(row=6, column=0, columnspan=2, padx=10, pady=2, sticky="w")
|
354 |
+
|
355 |
+
# embed_dtype
|
356 |
+
ttk.Label(self.embed_tab, text="Embedding DType:").grid(row=7, column=0, padx=10, pady=5, sticky="w")
|
357 |
+
self.settings_vars["embed_dtype"] = tk.StringVar(value="float32")
|
358 |
+
combo_dtype = ttk.Combobox(
|
359 |
+
self.embed_tab,
|
360 |
+
textvariable=self.settings_vars["embed_dtype"],
|
361 |
+
values=["float32", "float16", "bfloat16", "float8_e4m3fn", "float8_e5m2"]
|
362 |
+
)
|
363 |
+
combo_dtype.grid(row=7, column=1, padx=10, pady=5)
|
364 |
+
self.add_help_button(self.embed_tab, 7, 2, "Data type to use for storing embeddings (affects precision and size).")
|
365 |
+
|
366 |
+
# sql
|
367 |
+
ttk.Label(self.embed_tab, text="Use SQL:").grid(row=8, column=0, padx=10, pady=5, sticky="w")
|
368 |
+
self.settings_vars["sql"] = tk.BooleanVar(value=False)
|
369 |
+
check_sql = ttk.Checkbutton(self.embed_tab, variable=self.settings_vars["sql"])
|
370 |
+
check_sql.grid(row=8, column=1, padx=10, pady=5, sticky="w")
|
371 |
+
self.add_help_button(self.embed_tab, 8, 2, "Whether to use SQL database for storing embeddings instead of files.")
|
372 |
+
|
373 |
+
run_button = ttk.Button(self.embed_tab, text="Embed sequences to disk", command=self._get_embeddings)
|
374 |
+
run_button.grid(row=99, column=0, columnspan=2, pady=(10, 10))
|
375 |
+
|
376 |
+
def build_probe_tab(self):
|
377 |
+
# Probe Type
|
378 |
+
ttk.Label(self.probe_tab, text="Probe Type:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
379 |
+
self.settings_vars["probe_type"] = tk.StringVar(value="linear")
|
380 |
+
combo_probe = ttk.Combobox(
|
381 |
+
self.probe_tab,
|
382 |
+
textvariable=self.settings_vars["probe_type"],
|
383 |
+
values=["linear", "transformer", "retrievalnet"]
|
384 |
+
)
|
385 |
+
combo_probe.grid(row=0, column=1, padx=10, pady=5)
|
386 |
+
self.add_help_button(self.probe_tab, 0, 2, "Type of probe architecture to use (linear, transformer, or retrievalnet).")
|
387 |
+
|
388 |
+
# Tokenwise
|
389 |
+
ttk.Label(self.probe_tab, text="Tokenwise:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
390 |
+
self.settings_vars["tokenwise"] = tk.BooleanVar(value=False)
|
391 |
+
check_tokenwise = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["tokenwise"])
|
392 |
+
check_tokenwise.grid(row=1, column=1, padx=10, pady=5, sticky="w")
|
393 |
+
self.add_help_button(self.probe_tab, 1, 2, "Whether to use token-wise prediction (operate on each token) instead of sequence-level.")
|
394 |
+
|
395 |
+
# Pre Layer Norm
|
396 |
+
ttk.Label(self.probe_tab, text="Pre Layer Norm:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
397 |
+
self.settings_vars["pre_ln"] = tk.BooleanVar(value=True)
|
398 |
+
check_pre_ln = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["pre_ln"])
|
399 |
+
check_pre_ln.grid(row=2, column=1, padx=10, pady=5, sticky="w")
|
400 |
+
self.add_help_button(self.probe_tab, 2, 2, "Whether to use pre-layer normalization in transformer architecture.")
|
401 |
+
|
402 |
+
# Number of Layers
|
403 |
+
ttk.Label(self.probe_tab, text="Number of Layers:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
404 |
+
self.settings_vars["n_layers"] = tk.IntVar(value=1)
|
405 |
+
spin_n_layers = ttk.Spinbox(self.probe_tab, from_=1, to=100, textvariable=self.settings_vars["n_layers"])
|
406 |
+
spin_n_layers.grid(row=3, column=1, padx=10, pady=5)
|
407 |
+
self.add_help_button(self.probe_tab, 3, 2, "Number of layers in the probe architecture.")
|
408 |
+
|
409 |
+
# Hidden Dimension
|
410 |
+
ttk.Label(self.probe_tab, text="Hidden Dimension:").grid(row=4, column=0, padx=10, pady=5, sticky="w")
|
411 |
+
self.settings_vars["hidden_dim"] = tk.IntVar(value=8192)
|
412 |
+
spin_hidden_dim = ttk.Spinbox(self.probe_tab, from_=1, to=10000, textvariable=self.settings_vars["hidden_dim"])
|
413 |
+
spin_hidden_dim.grid(row=4, column=1, padx=10, pady=5)
|
414 |
+
self.add_help_button(self.probe_tab, 4, 2, "Size of hidden dimension in the probe model.")
|
415 |
+
|
416 |
+
# Dropout
|
417 |
+
ttk.Label(self.probe_tab, text="Dropout:").grid(row=5, column=0, padx=10, pady=5, sticky="w")
|
418 |
+
self.settings_vars["dropout"] = tk.DoubleVar(value=0.2)
|
419 |
+
spin_dropout = ttk.Spinbox(self.probe_tab, from_=0.0, to=1.0, increment=0.1, textvariable=self.settings_vars["dropout"])
|
420 |
+
spin_dropout.grid(row=5, column=1, padx=10, pady=5)
|
421 |
+
self.add_help_button(self.probe_tab, 5, 2, "Dropout probability for regularization (0.0-1.0).")
|
422 |
+
|
423 |
+
# Transformer Probe Settings
|
424 |
+
ttk.Label(self.probe_tab, text="=== Transformer Probe Settings ===").grid(row=6, column=0, columnspan=2, pady=10)
|
425 |
+
|
426 |
+
# FF Dimension
|
427 |
+
ttk.Label(self.probe_tab, text="Classifier Dimension:").grid(row=7, column=0, padx=10, pady=5, sticky="w")
|
428 |
+
self.settings_vars["classifier_dim"] = tk.IntVar(value=4096)
|
429 |
+
spin_classifier_dim = ttk.Spinbox(self.probe_tab, from_=1, to=10000, textvariable=self.settings_vars["classifier_dim"])
|
430 |
+
spin_classifier_dim.grid(row=7, column=1, padx=10, pady=5)
|
431 |
+
self.add_help_button(self.probe_tab, 7, 2, "Dimension of the classifier/feedforward layer in transformer probe.")
|
432 |
+
|
433 |
+
# Classifier Dropout
|
434 |
+
ttk.Label(self.probe_tab, text="Classifier Dropout:").grid(row=8, column=0, padx=10, pady=5, sticky="w")
|
435 |
+
self.settings_vars["classifier_dropout"] = tk.DoubleVar(value=0.2)
|
436 |
+
spin_class_dropout = ttk.Spinbox(self.probe_tab, from_=0.0, to=1.0, increment=0.1, textvariable=self.settings_vars["classifier_dropout"])
|
437 |
+
spin_class_dropout.grid(row=8, column=1, padx=10, pady=5)
|
438 |
+
self.add_help_button(self.probe_tab, 8, 2, "Dropout probability in the classifier layer (0.0-1.0).")
|
439 |
+
|
440 |
+
# Number of Heads
|
441 |
+
ttk.Label(self.probe_tab, text="Number of Heads:").grid(row=9, column=0, padx=10, pady=5, sticky="w")
|
442 |
+
self.settings_vars["n_heads"] = tk.IntVar(value=4)
|
443 |
+
spin_n_heads = ttk.Spinbox(self.probe_tab, from_=1, to=32, textvariable=self.settings_vars["n_heads"])
|
444 |
+
spin_n_heads.grid(row=9, column=1, padx=10, pady=5)
|
445 |
+
self.add_help_button(self.probe_tab, 9, 2, "Number of attention heads in transformer probe.")
|
446 |
+
|
447 |
+
# Rotary
|
448 |
+
ttk.Label(self.probe_tab, text="Rotary:").grid(row=10, column=0, padx=10, pady=5, sticky="w")
|
449 |
+
self.settings_vars["rotary"] = tk.BooleanVar(value=True)
|
450 |
+
check_rotary = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["rotary"])
|
451 |
+
check_rotary.grid(row=10, column=1, padx=10, pady=5, sticky="w")
|
452 |
+
self.add_help_button(self.probe_tab, 10, 2, "Whether to use rotary position embeddings in transformer.")
|
453 |
+
|
454 |
+
# Pooling Types
|
455 |
+
ttk.Label(self.probe_tab, text="Pooling Types (comma-separated):").grid(row=11, column=0, padx=10, pady=5, sticky="w")
|
456 |
+
self.settings_vars["probe_pooling_types"] = tk.StringVar(value="mean, cls")
|
457 |
+
entry_pooling = ttk.Entry(self.probe_tab, textvariable=self.settings_vars["probe_pooling_types"], width=20)
|
458 |
+
entry_pooling.grid(row=11, column=1, padx=10, pady=5)
|
459 |
+
self.add_help_button(self.probe_tab, 11, 2, "Types of pooling to use in the probe model, separate with commas.")
|
460 |
+
|
461 |
+
# Transformer Dropout
|
462 |
+
ttk.Label(self.probe_tab, text="Transformer Dropout:").grid(row=12, column=0, padx=10, pady=5, sticky="w")
|
463 |
+
self.settings_vars["transformer_dropout"] = tk.DoubleVar(value=0.1)
|
464 |
+
spin_transformer_dropout = ttk.Spinbox(self.probe_tab, from_=0.0, to=1.0, increment=0.1, textvariable=self.settings_vars["transformer_dropout"])
|
465 |
+
spin_transformer_dropout.grid(row=12, column=1, padx=10, pady=5, sticky="w")
|
466 |
+
self.add_help_button(self.probe_tab, 12, 2, "Dropout probability in the transformer layers (0.0-1.0).")
|
467 |
+
|
468 |
+
# Save Model
|
469 |
+
ttk.Label(self.probe_tab, text="Save Model:").grid(row=13, column=0, padx=10, pady=5, sticky="w")
|
470 |
+
self.settings_vars["save_model"] = tk.BooleanVar(value=False)
|
471 |
+
check_save_model = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["save_model"])
|
472 |
+
check_save_model.grid(row=13, column=1, padx=10, pady=5, sticky="w")
|
473 |
+
self.add_help_button(self.probe_tab, 13, 2, "Whether to save the trained probe model to disk.")
|
474 |
+
|
475 |
+
# Production Model
|
476 |
+
ttk.Label(self.probe_tab, text="Production Model:").grid(row=14, column=0, padx=10, pady=5, sticky="w")
|
477 |
+
self.settings_vars["production_model"] = tk.BooleanVar(value=False)
|
478 |
+
check_prod_model = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["production_model"])
|
479 |
+
check_prod_model.grid(row=14, column=1, padx=10, pady=5, sticky="w")
|
480 |
+
self.add_help_button(self.probe_tab, 14, 2, "Whether to prepare the model for production deployment.")
|
481 |
+
|
482 |
+
# LoRA Settings Section
|
483 |
+
ttk.Label(self.probe_tab, text="=== LoRA Settings ===").grid(row=15, column=0, columnspan=2, pady=10)
|
484 |
+
|
485 |
+
# Lora checkbox
|
486 |
+
ttk.Label(self.probe_tab, text="Use LoRA:").grid(row=16, column=0, padx=10, pady=5, sticky="w")
|
487 |
+
self.settings_vars["use_lora"] = tk.BooleanVar(value=False)
|
488 |
+
check_lora = ttk.Checkbutton(self.probe_tab, variable=self.settings_vars["use_lora"])
|
489 |
+
check_lora.grid(row=16, column=1, padx=10, pady=5, sticky="w")
|
490 |
+
self.add_help_button(self.probe_tab, 16, 2, "Whether to use Low-Rank Adaptation (LoRA) for fine-tuning.")
|
491 |
+
|
492 |
+
# LoRA r
|
493 |
+
ttk.Label(self.probe_tab, text="LoRA r:").grid(row=17, column=0, padx=10, pady=5, sticky="w")
|
494 |
+
self.settings_vars["lora_r"] = tk.IntVar(value=8)
|
495 |
+
spin_lora_r = ttk.Spinbox(self.probe_tab, from_=1, to=128, textvariable=self.settings_vars["lora_r"])
|
496 |
+
spin_lora_r.grid(row=17, column=1, padx=10, pady=5)
|
497 |
+
self.add_help_button(self.probe_tab, 17, 2, "Rank parameter r for LoRA (lower = more efficient, higher = more expressive).")
|
498 |
+
|
499 |
+
# LoRA alpha
|
500 |
+
ttk.Label(self.probe_tab, text="LoRA alpha:").grid(row=18, column=0, padx=10, pady=5, sticky="w")
|
501 |
+
self.settings_vars["lora_alpha"] = tk.DoubleVar(value=32.0)
|
502 |
+
spin_lora_alpha = ttk.Spinbox(self.probe_tab, from_=1.0, to=128.0, increment=1.0, textvariable=self.settings_vars["lora_alpha"])
|
503 |
+
spin_lora_alpha.grid(row=18, column=1, padx=10, pady=5)
|
504 |
+
self.add_help_button(self.probe_tab, 18, 2, "Alpha parameter for LoRA, controls update scale.")
|
505 |
+
|
506 |
+
# LoRA dropout
|
507 |
+
ttk.Label(self.probe_tab, text="LoRA dropout:").grid(row=19, column=0, padx=10, pady=5, sticky="w")
|
508 |
+
self.settings_vars["lora_dropout"] = tk.DoubleVar(value=0.01)
|
509 |
+
spin_lora_dropout = ttk.Spinbox(self.probe_tab, from_=0.0, to=0.5, increment=0.01, textvariable=self.settings_vars["lora_dropout"])
|
510 |
+
spin_lora_dropout.grid(row=19, column=1, padx=10, pady=5)
|
511 |
+
self.add_help_button(self.probe_tab, 19, 2, "Dropout probability for LoRA layers (0.0-0.5).")
|
512 |
+
|
513 |
+
# Add a button to create the probe
|
514 |
+
run_button = ttk.Button(self.probe_tab, text="Save Probe Arguments", command=self._create_probe_args)
|
515 |
+
run_button.grid(row=99, column=0, columnspan=2, pady=(10, 10))
|
516 |
+
|
517 |
+
def build_trainer_tab(self):
|
518 |
+
# Hybrid Probe checkbox
|
519 |
+
ttk.Label(self.trainer_tab, text="Hybrid Probe:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
520 |
+
self.settings_vars["hybrid_probe"] = tk.BooleanVar(value=False)
|
521 |
+
check_hybrid_probe = ttk.Checkbutton(self.trainer_tab, variable=self.settings_vars["hybrid_probe"])
|
522 |
+
check_hybrid_probe.grid(row=0, column=1, padx=10, pady=5, sticky="w")
|
523 |
+
self.add_help_button(self.trainer_tab, 0, 2, "Whether to use hybrid probe (combines neural and linear probes).")
|
524 |
+
|
525 |
+
# Full finetuning checkbox
|
526 |
+
ttk.Label(self.trainer_tab, text="Full Finetuning:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
527 |
+
self.settings_vars["full_finetuning"] = tk.BooleanVar(value=False)
|
528 |
+
check_full_ft = ttk.Checkbutton(self.trainer_tab, variable=self.settings_vars["full_finetuning"])
|
529 |
+
check_full_ft.grid(row=1, column=1, padx=10, pady=5, sticky="w")
|
530 |
+
self.add_help_button(self.trainer_tab, 1, 2, "Whether to perform full finetuning of the entire model.")
|
531 |
+
|
532 |
+
# num_epochs
|
533 |
+
ttk.Label(self.trainer_tab, text="Number of Epochs:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
534 |
+
self.settings_vars["num_epochs"] = tk.IntVar(value=200)
|
535 |
+
spin_num_epochs = ttk.Spinbox(self.trainer_tab, from_=1, to=1000, textvariable=self.settings_vars["num_epochs"])
|
536 |
+
spin_num_epochs.grid(row=2, column=1, padx=10, pady=5)
|
537 |
+
self.add_help_button(self.trainer_tab, 2, 2, "Number of training epochs (complete passes through the dataset).")
|
538 |
+
|
539 |
+
# probe_batch_size
|
540 |
+
ttk.Label(self.trainer_tab, text="Probe Batch Size:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
541 |
+
self.settings_vars["probe_batch_size"] = tk.IntVar(value=64)
|
542 |
+
spin_probe_batch_size = ttk.Spinbox(self.trainer_tab, from_=1, to=1000, textvariable=self.settings_vars["probe_batch_size"])
|
543 |
+
spin_probe_batch_size.grid(row=3, column=1, padx=10, pady=5)
|
544 |
+
self.add_help_button(self.trainer_tab, 3, 2, "Batch size for probe training.")
|
545 |
+
|
546 |
+
# base_batch_size
|
547 |
+
ttk.Label(self.trainer_tab, text="Base Batch Size:").grid(row=4, column=0, padx=10, pady=5, sticky="w")
|
548 |
+
self.settings_vars["base_batch_size"] = tk.IntVar(value=4)
|
549 |
+
spin_base_batch_size = ttk.Spinbox(self.trainer_tab, from_=1, to=1000, textvariable=self.settings_vars["base_batch_size"])
|
550 |
+
spin_base_batch_size.grid(row=4, column=1, padx=10, pady=5)
|
551 |
+
self.add_help_button(self.trainer_tab, 4, 2, "Batch size for base model training.")
|
552 |
+
|
553 |
+
# probe_grad_accum
|
554 |
+
ttk.Label(self.trainer_tab, text="Probe Grad Accum:").grid(row=5, column=0, padx=10, pady=5, sticky="w")
|
555 |
+
self.settings_vars["probe_grad_accum"] = tk.IntVar(value=1)
|
556 |
+
spin_probe_grad_accum = ttk.Spinbox(self.trainer_tab, from_=1, to=100, textvariable=self.settings_vars["probe_grad_accum"])
|
557 |
+
spin_probe_grad_accum.grid(row=5, column=1, padx=10, pady=5)
|
558 |
+
self.add_help_button(self.trainer_tab, 5, 2, "Gradient accumulation steps for probe training.")
|
559 |
+
|
560 |
+
# base_grad_accum
|
561 |
+
ttk.Label(self.trainer_tab, text="Base Grad Accum:").grid(row=6, column=0, padx=10, pady=5, sticky="w")
|
562 |
+
self.settings_vars["base_grad_accum"] = tk.IntVar(value=8)
|
563 |
+
spin_base_grad_accum = ttk.Spinbox(self.trainer_tab, from_=1, to=100, textvariable=self.settings_vars["base_grad_accum"])
|
564 |
+
spin_base_grad_accum.grid(row=6, column=1, padx=10, pady=5)
|
565 |
+
self.add_help_button(self.trainer_tab, 6, 2, "Gradient accumulation steps for base model training.")
|
566 |
+
|
567 |
+
# lr
|
568 |
+
ttk.Label(self.trainer_tab, text="Learning Rate:").grid(row=7, column=0, padx=10, pady=5, sticky="w")
|
569 |
+
self.settings_vars["lr"] = tk.DoubleVar(value=1e-4)
|
570 |
+
spin_lr = ttk.Spinbox(self.trainer_tab, from_=1e-6, to=1e-2, increment=1e-5, textvariable=self.settings_vars["lr"])
|
571 |
+
spin_lr.grid(row=7, column=1, padx=10, pady=5)
|
572 |
+
self.add_help_button(self.trainer_tab, 7, 2, "Learning rate for optimizer. Controls step size during training.")
|
573 |
+
|
574 |
+
# weight_decay
|
575 |
+
ttk.Label(self.trainer_tab, text="Weight Decay:").grid(row=8, column=0, padx=10, pady=5, sticky="w")
|
576 |
+
self.settings_vars["weight_decay"] = tk.DoubleVar(value=0.00)
|
577 |
+
spin_weight_decay = ttk.Spinbox(self.trainer_tab, from_=0.0, to=1.0, increment=0.01, textvariable=self.settings_vars["weight_decay"])
|
578 |
+
spin_weight_decay.grid(row=8, column=1, padx=10, pady=5)
|
579 |
+
self.add_help_button(self.trainer_tab, 8, 2, "L2 regularization factor to prevent overfitting (0.0-1.0).")
|
580 |
+
|
581 |
+
# patience
|
582 |
+
ttk.Label(self.trainer_tab, text="Patience:").grid(row=9, column=0, padx=10, pady=5, sticky="w")
|
583 |
+
self.settings_vars["patience"] = tk.IntVar(value=1)
|
584 |
+
spin_patience = ttk.Spinbox(self.trainer_tab, from_=1, to=100, textvariable=self.settings_vars["patience"])
|
585 |
+
spin_patience.grid(row=9, column=1, padx=10, pady=5, sticky="w")
|
586 |
+
self.add_help_button(self.trainer_tab, 9, 2, "Number of epochs with no improvement after which training will stop.")
|
587 |
+
|
588 |
+
# Random Seed
|
589 |
+
ttk.Label(self.trainer_tab, text="Random Seed:").grid(row=10, column=0, padx=10, pady=5, sticky="w")
|
590 |
+
self.settings_vars["seed"] = tk.IntVar(value=42)
|
591 |
+
spin_seed = ttk.Spinbox(self.trainer_tab, from_=0, to=10000, textvariable=self.settings_vars["seed"])
|
592 |
+
spin_seed.grid(row=10, column=1, padx=10, pady=5, sticky="w")
|
593 |
+
self.add_help_button(self.trainer_tab, 10, 2, "Random seed for reproducibility of experiments.")
|
594 |
+
|
595 |
+
run_button = ttk.Button(self.trainer_tab, text="Run trainer", command=self._run_trainer)
|
596 |
+
run_button.grid(row=99, column=0, columnspan=2, pady=(10, 10))
|
597 |
+
|
598 |
+
def build_scikit_tab(self):
|
599 |
+
# Create a frame for scikit settings
|
600 |
+
scikit_frame = ttk.LabelFrame(self.scikit_tab, text="Scikit-Learn Settings")
|
601 |
+
scikit_frame.pack(fill="x", padx=10, pady=5)
|
602 |
+
|
603 |
+
# Use Scikit
|
604 |
+
ttk.Label(scikit_frame, text="Use Scikit:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
605 |
+
self.settings_vars["use_scikit"] = tk.BooleanVar(value=False)
|
606 |
+
check_scikit = ttk.Checkbutton(scikit_frame, variable=self.settings_vars["use_scikit"])
|
607 |
+
check_scikit.grid(row=0, column=1, padx=10, pady=5, sticky="w")
|
608 |
+
self.add_help_button(scikit_frame, 0, 2, "Whether to use scikit-learn models instead of neural networks.")
|
609 |
+
|
610 |
+
# Scikit Iterations
|
611 |
+
ttk.Label(scikit_frame, text="Scikit Iterations:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
612 |
+
self.settings_vars["scikit_n_iter"] = tk.IntVar(value=10)
|
613 |
+
spin_scikit_n_iter = ttk.Spinbox(scikit_frame, from_=1, to=1000, textvariable=self.settings_vars["scikit_n_iter"])
|
614 |
+
spin_scikit_n_iter.grid(row=1, column=1, padx=10, pady=5, sticky="w")
|
615 |
+
self.add_help_button(scikit_frame, 1, 2, "Number of iterations for iterative scikit-learn models.")
|
616 |
+
|
617 |
+
# Scikit CV Folds
|
618 |
+
ttk.Label(scikit_frame, text="Scikit CV Folds:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
619 |
+
self.settings_vars["scikit_cv"] = tk.IntVar(value=3)
|
620 |
+
spin_scikit_cv = ttk.Spinbox(scikit_frame, from_=1, to=10, textvariable=self.settings_vars["scikit_cv"])
|
621 |
+
spin_scikit_cv.grid(row=2, column=1, padx=10, pady=5, sticky="w")
|
622 |
+
self.add_help_button(scikit_frame, 2, 2, "Number of cross-validation folds for model evaluation.")
|
623 |
+
|
624 |
+
# Scikit Random State
|
625 |
+
ttk.Label(scikit_frame, text="Scikit Random State:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
626 |
+
self.settings_vars["scikit_random_state"] = tk.IntVar(value=42)
|
627 |
+
spin_scikit_rand = ttk.Spinbox(scikit_frame, from_=0, to=10000, textvariable=self.settings_vars["scikit_random_state"])
|
628 |
+
spin_scikit_rand.grid(row=3, column=1, padx=10, pady=5, sticky="w")
|
629 |
+
self.add_help_button(scikit_frame, 3, 2, "Random seed for scikit-learn models to ensure reproducibility.")
|
630 |
+
|
631 |
+
# Scikit Model Name
|
632 |
+
ttk.Label(scikit_frame, text="Scikit Model Name (optional):").grid(row=4, column=0, padx=10, pady=5, sticky="w")
|
633 |
+
self.settings_vars["scikit_model_name"] = tk.StringVar(value="")
|
634 |
+
entry_scikit_name = ttk.Entry(scikit_frame, textvariable=self.settings_vars["scikit_model_name"], width=30)
|
635 |
+
entry_scikit_name.grid(row=4, column=1, padx=10, pady=5, sticky="w")
|
636 |
+
self.add_help_button(scikit_frame, 4, 2, "Optional name for the scikit-learn model. Leave blank to use default.")
|
637 |
+
|
638 |
+
# Number of Jobs/Processors
|
639 |
+
ttk.Label(scikit_frame, text="Number of Jobs:").grid(row=5, column=0, padx=10, pady=5, sticky="w")
|
640 |
+
self.settings_vars["n_jobs"] = tk.IntVar(value=1)
|
641 |
+
spin_n_jobs = ttk.Spinbox(scikit_frame, from_=1, to=32, textvariable=self.settings_vars["n_jobs"])
|
642 |
+
spin_n_jobs.grid(row=5, column=1, padx=10, pady=5, sticky="w")
|
643 |
+
self.add_help_button(scikit_frame, 5, 2, "Number of CPU cores to use for parallel processing. Use -1 for all cores.")
|
644 |
+
|
645 |
+
run_button = ttk.Button(self.scikit_tab, text="Run Scikit Models", command=self._run_scikit)
|
646 |
+
run_button.pack(pady=(20, 10))
|
647 |
+
|
648 |
+
def build_replay_tab(self):
|
649 |
+
# Create a frame for replay settings
|
650 |
+
replay_frame = ttk.LabelFrame(self.replay_tab, text="Log Replay Settings")
|
651 |
+
replay_frame.pack(fill="x", padx=10, pady=5)
|
652 |
+
|
653 |
+
# Replay log path
|
654 |
+
ttk.Label(replay_frame, text="Replay Log Path:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
655 |
+
self.settings_vars["replay_path"] = tk.StringVar(value="")
|
656 |
+
entry_replay = ttk.Entry(replay_frame, textvariable=self.settings_vars["replay_path"], width=40)
|
657 |
+
entry_replay.grid(row=0, column=1, padx=10, pady=5)
|
658 |
+
self.add_help_button(replay_frame, 0, 2, "Path to the log file to replay. Use Browse button to select a file.")
|
659 |
+
|
660 |
+
# Browse button for selecting log file
|
661 |
+
browse_button = ttk.Button(replay_frame, text="Browse", command=self._browse_replay_log)
|
662 |
+
browse_button.grid(row=0, column=2, padx=5, pady=5)
|
663 |
+
|
664 |
+
# Start replay button
|
665 |
+
replay_button = ttk.Button(replay_frame, text="Start Replay", command=self._start_replay)
|
666 |
+
replay_button.grid(row=1, column=0, columnspan=3, pady=20)
|
667 |
+
|
668 |
+
def build_viz_tab(self):
|
669 |
+
# Create a frame for visualization settings
|
670 |
+
viz_frame = ttk.LabelFrame(self.viz_tab, text="Visualization Settings")
|
671 |
+
viz_frame.pack(fill="x", padx=10, pady=5)
|
672 |
+
|
673 |
+
# Result ID entry
|
674 |
+
ttk.Label(viz_frame, text="Result ID:").grid(row=0, column=0, padx=10, pady=5, sticky="w")
|
675 |
+
self.settings_vars["result_id"] = tk.StringVar(value="")
|
676 |
+
entry_result_id = ttk.Entry(viz_frame, textvariable=self.settings_vars["result_id"], width=30)
|
677 |
+
entry_result_id.grid(row=0, column=1, padx=10, pady=5)
|
678 |
+
self.add_help_button(viz_frame, 0, 2, "ID of the result to visualize. Will look for results/{result_id}.tsv")
|
679 |
+
|
680 |
+
# Results file path
|
681 |
+
ttk.Label(viz_frame, text="Results File:").grid(row=1, column=0, padx=10, pady=5, sticky="w")
|
682 |
+
self.settings_vars["results_file"] = tk.StringVar(value="")
|
683 |
+
entry_results_file = ttk.Entry(viz_frame, textvariable=self.settings_vars["results_file"], width=30)
|
684 |
+
entry_results_file.grid(row=1, column=1, padx=10, pady=5)
|
685 |
+
|
686 |
+
# Browse button for selecting results file directly
|
687 |
+
browse_button = ttk.Button(viz_frame, text="Browse", command=self._browse_results_file)
|
688 |
+
browse_button.grid(row=1, column=2, padx=5, pady=5)
|
689 |
+
|
690 |
+
# Use current run checkbox
|
691 |
+
ttk.Label(viz_frame, text="Use Current Run:").grid(row=2, column=0, padx=10, pady=5, sticky="w")
|
692 |
+
self.settings_vars["use_current_run"] = tk.BooleanVar(value=True)
|
693 |
+
check_current_run = ttk.Checkbutton(viz_frame, variable=self.settings_vars["use_current_run"])
|
694 |
+
check_current_run.grid(row=2, column=1, padx=10, pady=5, sticky="w")
|
695 |
+
self.add_help_button(viz_frame, 2, 2, "Use results from the current run.")
|
696 |
+
|
697 |
+
# Output directory for plots
|
698 |
+
ttk.Label(viz_frame, text="Output Directory:").grid(row=3, column=0, padx=10, pady=5, sticky="w")
|
699 |
+
self.settings_vars["viz_output_dir"] = tk.StringVar(value="plots")
|
700 |
+
entry_output_dir = ttk.Entry(viz_frame, textvariable=self.settings_vars["viz_output_dir"], width=30)
|
701 |
+
entry_output_dir.grid(row=3, column=1, padx=10, pady=5)
|
702 |
+
self.add_help_button(viz_frame, 3, 2, "Directory where plots will be saved.")
|
703 |
+
|
704 |
+
|
705 |
+
# Generate plots button
|
706 |
+
generate_button = ttk.Button(viz_frame, text="Generate Plots", command=self._generate_plots)
|
707 |
+
generate_button.grid(row=99, column=0, columnspan=3, pady=20)
|
708 |
+
|
709 |
+
def add_help_button(self, parent, row, column, help_text):
|
710 |
+
"""Add a small help button that displays information when clicked"""
|
711 |
+
help_button = ttk.Button(parent, text="?", width=2,
|
712 |
+
command=lambda: messagebox.showinfo("Help", help_text))
|
713 |
+
help_button.grid(row=row, column=column, padx=(0,5), pady=5)
|
714 |
+
return help_button
|
715 |
+
|
716 |
+
def _session_start(self):
|
717 |
+
print_message("Starting Protify session...")
|
718 |
+
# Update session variables
|
719 |
+
hf_token = self.settings_vars["huggingface_token"].get()
|
720 |
+
synthyra_api_key = self.settings_vars["synthyra_api_key"].get()
|
721 |
+
wandb_api_key = self.settings_vars["wandb_api_key"].get()
|
722 |
+
|
723 |
+
def background_login():
|
724 |
+
if hf_token:
|
725 |
+
from huggingface_hub import login
|
726 |
+
login(hf_token)
|
727 |
+
print_message('Logged in to Hugging Face')
|
728 |
+
if wandb_api_key:
|
729 |
+
print_message('Wandb not integrated yet')
|
730 |
+
if synthyra_api_key:
|
731 |
+
print_message('Synthyra API not integrated yet')
|
732 |
+
|
733 |
+
self.full_args.hf_username = self.settings_vars["huggingface_username"].get()
|
734 |
+
self.full_args.hf_token = hf_token
|
735 |
+
self.full_args.synthyra_api_key = synthyra_api_key
|
736 |
+
self.full_args.wandb_api_key = wandb_api_key
|
737 |
+
self.full_args.home_dir = self.settings_vars["home_dir"].get()
|
738 |
+
|
739 |
+
def _make_true_dir(path):
|
740 |
+
true_path = os.path.join(self.full_args.home_dir, path)
|
741 |
+
os.makedirs(true_path, exist_ok=True)
|
742 |
+
return true_path
|
743 |
+
|
744 |
+
self.full_args.log_dir = _make_true_dir(self.settings_vars["log_dir"].get())
|
745 |
+
self.full_args.results_dir = _make_true_dir(self.settings_vars["results_dir"].get())
|
746 |
+
self.full_args.model_save_dir = _make_true_dir(self.settings_vars["model_save_dir"].get())
|
747 |
+
self.full_args.plots_dir = _make_true_dir(self.settings_vars["plots_dir"].get())
|
748 |
+
self.full_args.embedding_save_dir = _make_true_dir(self.settings_vars["embedding_save_dir"].get())
|
749 |
+
self.full_args.download_dir = _make_true_dir(self.settings_vars["download_dir"].get())
|
750 |
+
|
751 |
+
self.full_args.replay_path = None
|
752 |
+
self.logger_args = SimpleNamespace(**self.full_args.__dict__)
|
753 |
+
self.start_log_gui()
|
754 |
+
|
755 |
+
print_message(f"Session and logging started for id {self.random_id}")
|
756 |
+
print_done()
|
757 |
+
|
758 |
+
self.run_in_background(background_login)
|
759 |
+
|
760 |
+
def _select_models(self):
|
761 |
+
print_message("Selecting models...")
|
762 |
+
# Gather selected model names
|
763 |
+
selected_indices = self.model_listbox.curselection()
|
764 |
+
selected_models = [self.model_listbox.get(i) for i in selected_indices]
|
765 |
+
|
766 |
+
# If no selection, default to the entire standard_benchmark
|
767 |
+
if not selected_models:
|
768 |
+
selected_models = standard_models
|
769 |
+
|
770 |
+
# Update full_args with model settings
|
771 |
+
self.full_args.model_names = selected_models
|
772 |
+
print_message(self.full_args.model_names)
|
773 |
+
# Create model args from full args
|
774 |
+
self.model_args = BaseModelArguments(**self.full_args.__dict__)
|
775 |
+
|
776 |
+
print("Model Args:")
|
777 |
+
for k, v in self.model_args.__dict__.items():
|
778 |
+
if k != 'model_names':
|
779 |
+
print(f"{k}:\n{v}")
|
780 |
+
print("=========================\n")
|
781 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
782 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
783 |
+
self._write_args()
|
784 |
+
print_done()
|
785 |
+
|
786 |
+
def _get_data(self):
|
787 |
+
print_message("=== Getting Data ===")
|
788 |
+
print_message("Loading and preparing datasets...")
|
789 |
+
|
790 |
+
# Gather settings
|
791 |
+
selected_indices = self.data_listbox.curselection()
|
792 |
+
selected_datasets = [self.data_listbox.get(i) for i in selected_indices]
|
793 |
+
|
794 |
+
if not selected_datasets:
|
795 |
+
selected_datasets = standard_data_benchmark
|
796 |
+
|
797 |
+
def background_get_data():
|
798 |
+
# Update full_args with data settings
|
799 |
+
self.full_args.data_names = selected_datasets
|
800 |
+
self.full_args.data_dirs = []
|
801 |
+
self.full_args.max_length = self.settings_vars["max_length"].get()
|
802 |
+
self.full_args.trim = self.settings_vars["trim"].get()
|
803 |
+
self.full_args.delimiter = self.settings_vars["delimiter"].get()
|
804 |
+
self.full_args.col_names = self.settings_vars["col_names"].get().split(",")
|
805 |
+
|
806 |
+
# Update mixin attributes
|
807 |
+
self._max_length = self.full_args.max_length
|
808 |
+
self._trim = self.full_args.trim
|
809 |
+
self._delimiter = self.full_args.delimiter
|
810 |
+
self._col_names = self.full_args.col_names
|
811 |
+
|
812 |
+
# Create data args and get datasets
|
813 |
+
self.data_args = DataArguments(**self.full_args.__dict__)
|
814 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
815 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
816 |
+
|
817 |
+
self._write_args()
|
818 |
+
self.get_datasets()
|
819 |
+
print_message("Data downloaded and stored")
|
820 |
+
print_done()
|
821 |
+
|
822 |
+
self.run_in_background(background_get_data)
|
823 |
+
|
824 |
+
def _get_embeddings(self):
|
825 |
+
if not self.all_seqs:
|
826 |
+
print_message('Sequences are not loaded yet. Please run the data tab first.')
|
827 |
+
return
|
828 |
+
|
829 |
+
# Gather settings
|
830 |
+
print_message("Computing embeddings...")
|
831 |
+
pooling_str = self.settings_vars["embedding_pooling_types"].get().strip()
|
832 |
+
pooling_list = [p.strip() for p in pooling_str.split(",") if p.strip()]
|
833 |
+
dtype_str = self.settings_vars["embed_dtype"].get()
|
834 |
+
dtype_val = self.dtype_map.get(dtype_str, torch.float32)
|
835 |
+
|
836 |
+
def background_get_embeddings():
|
837 |
+
# Update full args
|
838 |
+
self.full_args.all_seqs = self.all_seqs
|
839 |
+
self.full_args.embedding_batch_size = self.settings_vars["batch_size"].get()
|
840 |
+
self.full_args.embedding_num_workers = self.settings_vars["num_workers"].get()
|
841 |
+
self.full_args.download_embeddings = self.settings_vars["download_embeddings"].get()
|
842 |
+
self.full_args.matrix_embed = self.settings_vars["matrix_embed"].get()
|
843 |
+
self.full_args.embedding_pooling_types = pooling_list
|
844 |
+
self.full_args.save_embeddings = True
|
845 |
+
self.full_args.embed_dtype = dtype_val
|
846 |
+
self.full_args.sql = self.settings_vars["sql"].get()
|
847 |
+
self._sql = self.full_args.sql
|
848 |
+
self._full = self.full_args.matrix_embed
|
849 |
+
|
850 |
+
self.embedding_args = EmbeddingArguments(**self.full_args.__dict__)
|
851 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
852 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
853 |
+
self._write_args()
|
854 |
+
|
855 |
+
print_message("Saving embeddings to disk")
|
856 |
+
self.save_embeddings_to_disk()
|
857 |
+
print_message("Embeddings saved to disk")
|
858 |
+
print_done()
|
859 |
+
|
860 |
+
self.run_in_background(background_get_embeddings)
|
861 |
+
|
862 |
+
def _create_probe_args(self):
|
863 |
+
print_message("Creating probe arguments...")
|
864 |
+
|
865 |
+
# Convert pooling types string to list
|
866 |
+
probe_pooling_types = [p.strip() for p in self.settings_vars["probe_pooling_types"].get().split(",")]
|
867 |
+
|
868 |
+
# Update full_args with probe settings
|
869 |
+
self.full_args.probe_type = self.settings_vars["probe_type"].get()
|
870 |
+
self.full_args.tokenwise = self.settings_vars["tokenwise"].get()
|
871 |
+
self.full_args.hidden_dim = self.settings_vars["hidden_dim"].get()
|
872 |
+
self.full_args.dropout = self.settings_vars["dropout"].get()
|
873 |
+
self.full_args.n_layers = self.settings_vars["n_layers"].get()
|
874 |
+
self.full_args.pre_ln = self.settings_vars["pre_ln"].get()
|
875 |
+
self.full_args.classifier_dim = self.settings_vars["classifier_dim"].get()
|
876 |
+
self.full_args.transformer_dropout = self.settings_vars["transformer_dropout"].get()
|
877 |
+
self.full_args.classifier_dropout = self.settings_vars["classifier_dropout"].get()
|
878 |
+
self.full_args.n_heads = self.settings_vars["n_heads"].get()
|
879 |
+
self.full_args.rotary = self.settings_vars["rotary"].get()
|
880 |
+
self.full_args.probe_pooling_types = probe_pooling_types
|
881 |
+
self.full_args.save_model = self.settings_vars["save_model"].get()
|
882 |
+
self.full_args.production_model = self.settings_vars["production_model"].get()
|
883 |
+
self.full_args.use_lora = self.settings_vars["use_lora"].get()
|
884 |
+
self.full_args.lora_r = self.settings_vars["lora_r"].get()
|
885 |
+
self.full_args.lora_alpha = self.settings_vars["lora_alpha"].get()
|
886 |
+
self.full_args.lora_dropout = self.settings_vars["lora_dropout"].get()
|
887 |
+
|
888 |
+
# Create probe args from full args
|
889 |
+
self.probe_args = ProbeArguments(**self.full_args.__dict__)
|
890 |
+
|
891 |
+
print_message("Probe Arguments:")
|
892 |
+
for k, v in self.probe_args.__dict__.items():
|
893 |
+
if k != 'model_names':
|
894 |
+
print(f"{k}:\n{v}")
|
895 |
+
print("========================\n")
|
896 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
897 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
898 |
+
self._write_args()
|
899 |
+
print_done()
|
900 |
+
|
901 |
+
def _run_trainer(self):
|
902 |
+
print_message("Starting training process...")
|
903 |
+
# Gather settings
|
904 |
+
self.full_args.use_lora = self.settings_vars["use_lora"].get()
|
905 |
+
self.full_args.hybrid_probe = self.settings_vars["hybrid_probe"].get()
|
906 |
+
self.full_args.full_finetuning = self.settings_vars["full_finetuning"].get()
|
907 |
+
self.full_args.lora_r = self.settings_vars["lora_r"].get()
|
908 |
+
self.full_args.lora_alpha = self.settings_vars["lora_alpha"].get()
|
909 |
+
self.full_args.lora_dropout = self.settings_vars["lora_dropout"].get()
|
910 |
+
self.full_args.num_epochs = self.settings_vars["num_epochs"].get()
|
911 |
+
self.full_args.trainer_batch_size = self.settings_vars["probe_batch_size"].get()
|
912 |
+
self.full_args.gradient_accumulation_steps = self.settings_vars["probe_grad_accum"].get()
|
913 |
+
self.full_args.lr = self.settings_vars["lr"].get()
|
914 |
+
self.full_args.weight_decay = self.settings_vars["weight_decay"].get()
|
915 |
+
self.full_args.patience = self.settings_vars["patience"].get()
|
916 |
+
self.full_args.seed = self.settings_vars["seed"].get()
|
917 |
+
|
918 |
+
def background_run_trainer():
|
919 |
+
self.trainer_args = TrainerArguments(**self.full_args.__dict__)
|
920 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
921 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
922 |
+
self._write_args()
|
923 |
+
|
924 |
+
if self.full_args.full_finetuning:
|
925 |
+
self.run_full_finetuning()
|
926 |
+
elif self.full_args.hybrid_probe:
|
927 |
+
self.run_hybrid_probes()
|
928 |
+
else:
|
929 |
+
self.run_nn_probes()
|
930 |
+
print_done()
|
931 |
+
|
932 |
+
self.run_in_background(background_run_trainer)
|
933 |
+
|
934 |
+
def _run_scikit(self):
|
935 |
+
print_message("Running scikit-learn models...")
|
936 |
+
# Gather settings for scikit
|
937 |
+
self.full_args.use_scikit = self.settings_vars["use_scikit"].get()
|
938 |
+
self.full_args.scikit_n_iter = self.settings_vars["scikit_n_iter"].get()
|
939 |
+
self.full_args.scikit_cv = self.settings_vars["scikit_cv"].get()
|
940 |
+
self.full_args.scikit_random_state = self.settings_vars["scikit_random_state"].get()
|
941 |
+
self.full_args.scikit_model_name = self.settings_vars["scikit_model_name"].get()
|
942 |
+
self.full_args.n_jobs = self.settings_vars["n_jobs"].get()
|
943 |
+
|
944 |
+
def background_run_scikit():
|
945 |
+
self.scikit_args = ScikitArguments(**self.full_args.__dict__)
|
946 |
+
args_dict = {k: v for k, v in self.full_args.__dict__.items() if k != 'all_seqs' and 'token' not in k.lower() and 'api' not in k.lower()}
|
947 |
+
self.logger_args = SimpleNamespace(**args_dict)
|
948 |
+
self._write_args()
|
949 |
+
|
950 |
+
self.run_scikit_scheme()
|
951 |
+
print_done()
|
952 |
+
|
953 |
+
self.run_in_background(background_run_scikit)
|
954 |
+
|
955 |
+
def _browse_replay_log(self):
|
956 |
+
filename = filedialog.askopenfilename(
|
957 |
+
title="Select Replay Log",
|
958 |
+
filetypes=(("Txt files", "*.txt"), ("All files", "*.*"))
|
959 |
+
)
|
960 |
+
if filename:
|
961 |
+
self.settings_vars["replay_path"].set(filename)
|
962 |
+
|
963 |
+
def _start_replay(self):
|
964 |
+
replay_path = self.settings_vars["replay_path"].get()
|
965 |
+
if not replay_path:
|
966 |
+
print_message("Please select a replay log file first")
|
967 |
+
return
|
968 |
+
|
969 |
+
print_message("Starting replay from log file...")
|
970 |
+
|
971 |
+
def background_replay():
|
972 |
+
from logger import LogReplayer
|
973 |
+
replayer = LogReplayer(replay_path)
|
974 |
+
replay_args = replayer.parse_log()
|
975 |
+
replay_args.replay_path = replay_path
|
976 |
+
|
977 |
+
# Create a new MainProcess instance with replay_args
|
978 |
+
main = MainProcess(replay_args, GUI=False)
|
979 |
+
for k, v in main.full_args.__dict__.items():
|
980 |
+
print(f"{k}:\t{v}")
|
981 |
+
|
982 |
+
# Run the replay on this MainProcess instance
|
983 |
+
replayer.run_replay(main)
|
984 |
+
print_done()
|
985 |
+
|
986 |
+
self.run_in_background(background_replay)
|
987 |
+
|
988 |
+
def _browse_results_file(self):
|
989 |
+
filename = filedialog.askopenfilename(
|
990 |
+
title="Select Results File",
|
991 |
+
filetypes=(("TSV files", "*.tsv"), ("All files", "*.*"))
|
992 |
+
)
|
993 |
+
if filename:
|
994 |
+
self.settings_vars["results_file"].set(filename)
|
995 |
+
# Set use_current_run to False since we're selecting a specific file
|
996 |
+
self.settings_vars["use_current_run"].set(False)
|
997 |
+
|
998 |
+
def _generate_plots(self):
|
999 |
+
print_message("Generating visualization plots...")
|
1000 |
+
|
1001 |
+
# Determine which results file to use
|
1002 |
+
results_file = None
|
1003 |
+
|
1004 |
+
if self.settings_vars["use_current_run"].get() and hasattr(self, 'random_id'):
|
1005 |
+
# Use the current run's random ID
|
1006 |
+
results_file = os.path.join(self.settings_vars["results_dir"].get(), f"{self.random_id}.tsv")
|
1007 |
+
print_message(f"Using current run results: {results_file}")
|
1008 |
+
elif self.settings_vars["results_file"].get():
|
1009 |
+
# Use explicitly selected file
|
1010 |
+
results_file = self.settings_vars["results_file"].get()
|
1011 |
+
print_message(f"Using selected results file: {results_file}")
|
1012 |
+
elif self.settings_vars["result_id"].get():
|
1013 |
+
# Use the specified result ID
|
1014 |
+
result_id = self.settings_vars["result_id"].get()
|
1015 |
+
results_file = os.path.join(self.settings_vars["results_dir"].get(), f"{result_id}.tsv")
|
1016 |
+
print_message(f"Using results file for ID {result_id}: {results_file}")
|
1017 |
+
else:
|
1018 |
+
print_message("No results file specified. Please enter a Result ID, browse for a file, or complete a run first.")
|
1019 |
+
return
|
1020 |
+
|
1021 |
+
# Check if the results file exists
|
1022 |
+
if not os.path.exists(results_file):
|
1023 |
+
print_message(f"Results file not found: {results_file}")
|
1024 |
+
return
|
1025 |
+
|
1026 |
+
# Get output directory
|
1027 |
+
output_dir = self.settings_vars["viz_output_dir"].get()
|
1028 |
+
def background_generate_plots():
|
1029 |
+
# Call the plot generation function
|
1030 |
+
print_message(f"Generating plots in {output_dir}...")
|
1031 |
+
create_plots(results_file, output_dir)
|
1032 |
+
print_message("Plots generated successfully!")
|
1033 |
+
print_done()
|
1034 |
+
|
1035 |
+
self.run_in_background(background_generate_plots)
|
1036 |
+
|
1037 |
+
|
1038 |
+
def main():
|
1039 |
+
root = tk.Tk()
|
1040 |
+
app = GUI(root)
|
1041 |
+
print_title("Protify")
|
1042 |
+
root.mainloop()
|
1043 |
+
|
1044 |
+
|
1045 |
+
if __name__ == "__main__":
|
1046 |
+
main()
|
data/src/protify/logger.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import functools
|
3 |
+
import json
|
4 |
+
import csv
|
5 |
+
import os
|
6 |
+
import datetime
|
7 |
+
import ast
|
8 |
+
import random
|
9 |
+
import string
|
10 |
+
import subprocess
|
11 |
+
import sys
|
12 |
+
from pathlib import Path
|
13 |
+
from types import SimpleNamespace
|
14 |
+
from utils import print_message
|
15 |
+
|
16 |
+
|
17 |
+
def log_method_calls(func):
|
18 |
+
"""Decorator to log each call of the decorated method."""
|
19 |
+
@functools.wraps(func)
|
20 |
+
def wrapper(self, *args, **kwargs):
|
21 |
+
self.logger.info(f"Called method: {func.__name__}")
|
22 |
+
return func(self, *args, **kwargs)
|
23 |
+
return wrapper
|
24 |
+
|
25 |
+
|
26 |
+
class MetricsLogger:
|
27 |
+
"""
|
28 |
+
Logs method calls to a text file, and keeps a TSV-based matrix of metrics:
|
29 |
+
- Rows = dataset names
|
30 |
+
- Columns = model names
|
31 |
+
- Cells = JSON-encoded dictionaries of metrics
|
32 |
+
"""
|
33 |
+
|
34 |
+
def __init__(self, args):
|
35 |
+
self.logger_args = args
|
36 |
+
self._section_break = '\n' + '=' * 55 + '\n'
|
37 |
+
|
38 |
+
def _start_file(self):
|
39 |
+
args = self.logger_args
|
40 |
+
self.log_dir = args.log_dir
|
41 |
+
self.results_dir = args.results_dir
|
42 |
+
os.makedirs(self.log_dir, exist_ok=True)
|
43 |
+
os.makedirs(self.results_dir, exist_ok=True)
|
44 |
+
|
45 |
+
# Generate random ID with date and 4-letter code
|
46 |
+
random_letters = ''.join(random.choices(string.ascii_uppercase, k=4))
|
47 |
+
date_str = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
|
48 |
+
self.random_id = f"{date_str}_{random_letters}"
|
49 |
+
|
50 |
+
if args.replay_path is not None:
|
51 |
+
self.random_id = 'replay_' + args.replay_path.split('/')[-1].split('.')[0]
|
52 |
+
self.log_file = os.path.join(self.log_dir, f"{self.random_id}.txt")
|
53 |
+
self.results_file = os.path.join(self.results_dir, f"{self.random_id}.tsv")
|
54 |
+
|
55 |
+
def _minimial_logger(self):
|
56 |
+
# Set up a minimal logger
|
57 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
58 |
+
self.logger.setLevel(logging.INFO)
|
59 |
+
|
60 |
+
# Avoid adding multiple handlers if re-instantiated
|
61 |
+
if not self.logger.handlers:
|
62 |
+
handler = logging.FileHandler(self.log_file, mode='a')
|
63 |
+
handler.setLevel(logging.INFO)
|
64 |
+
# Simple formatter without duplicating date/time
|
65 |
+
formatter = logging.Formatter('%(levelname)s - %(message)s')
|
66 |
+
handler.setFormatter(formatter)
|
67 |
+
self.logger.addHandler(handler)
|
68 |
+
|
69 |
+
# TSV tracking
|
70 |
+
self.results_file = self.results_file
|
71 |
+
self.logger_data_tracking = {} # { dataset_name: { model_name: metrics_dict } }
|
72 |
+
|
73 |
+
def _write_args(self):
|
74 |
+
with open(self.log_file, 'a') as f:
|
75 |
+
f.write(self._section_break)
|
76 |
+
for k, v in self.logger_args.__dict__.items():
|
77 |
+
if 'token' not in k.lower() and 'api' not in k.lower():
|
78 |
+
f.write(f"{k}:\t{v}\n")
|
79 |
+
f.write(self._section_break)
|
80 |
+
|
81 |
+
def start_log_main(self):
|
82 |
+
self._start_file()
|
83 |
+
|
84 |
+
with open(self.log_file, 'w') as f:
|
85 |
+
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
86 |
+
if self.logger_args.replay_path is not None:
|
87 |
+
message = f'=== REPLAY OF {self.logger_args.replay_path} ===\n'
|
88 |
+
f.write(message)
|
89 |
+
header = f"=== Logging session started at {now} ===\n"
|
90 |
+
f.write(header)
|
91 |
+
self._write_args()
|
92 |
+
|
93 |
+
self._minimial_logger()
|
94 |
+
|
95 |
+
def start_log_gui(self):
|
96 |
+
self._start_file()
|
97 |
+
with open(self.log_file, 'w') as f:
|
98 |
+
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
99 |
+
if self.logger_args.replay_path is not None:
|
100 |
+
message = f'=== REPLAY OF {self.logger_args.replay_path} ===\n'
|
101 |
+
f.write(message)
|
102 |
+
header = f"=== Logging session started at {now} ===\n"
|
103 |
+
f.write(header)
|
104 |
+
f.write(self._section_break)
|
105 |
+
self._minimial_logger()
|
106 |
+
|
107 |
+
def load_tsv(self):
|
108 |
+
"""Load existing TSV data into self.logger_data_tracking (row=dataset, col=model)."""
|
109 |
+
with open(self.results_file, 'r', newline='', encoding='utf-8') as f:
|
110 |
+
reader = csv.reader(f, delimiter='\t')
|
111 |
+
header = next(reader, None)
|
112 |
+
if not header:
|
113 |
+
return
|
114 |
+
|
115 |
+
model_names = header[1:]
|
116 |
+
for row in reader:
|
117 |
+
if row:
|
118 |
+
ds = row[0]
|
119 |
+
self.logger_data_tracking[ds] = {}
|
120 |
+
for i, model in enumerate(model_names, start=1):
|
121 |
+
cell_val = row[i].strip()
|
122 |
+
if cell_val:
|
123 |
+
try:
|
124 |
+
self.logger_data_tracking[ds][model] = json.loads(cell_val)
|
125 |
+
except json.JSONDecodeError:
|
126 |
+
self.logger_data_tracking[ds][model] = {"_raw": cell_val}
|
127 |
+
|
128 |
+
def write_results(self):
|
129 |
+
# Get all unique datasets and models
|
130 |
+
datasets = sorted(self.logger_data_tracking.keys())
|
131 |
+
all_models = set()
|
132 |
+
for ds_data in self.logger_data_tracking.values():
|
133 |
+
all_models.update(ds_data.keys())
|
134 |
+
|
135 |
+
# Calculate average eval_loss for each model
|
136 |
+
model_scores = {}
|
137 |
+
for model in all_models:
|
138 |
+
losses = []
|
139 |
+
for ds in datasets:
|
140 |
+
if (ds in self.logger_data_tracking and
|
141 |
+
model in self.logger_data_tracking[ds] and
|
142 |
+
'eval_loss' in self.logger_data_tracking[ds][model]):
|
143 |
+
losses.append(self.logger_data_tracking[ds][model]['eval_loss'])
|
144 |
+
if losses:
|
145 |
+
model_scores[model] = sum(losses) / len(losses)
|
146 |
+
else:
|
147 |
+
model_scores[model] = float('inf') # Models without eval_loss go last
|
148 |
+
|
149 |
+
# Sort models by average eval_loss
|
150 |
+
model_names = sorted(model_scores.keys(), key=lambda m: model_scores[m])
|
151 |
+
|
152 |
+
with open(self.results_file, 'w', newline='', encoding='utf-8') as f:
|
153 |
+
writer = csv.writer(f, delimiter='\t')
|
154 |
+
writer.writerow(["dataset"] + model_names)
|
155 |
+
for ds in datasets:
|
156 |
+
row = [ds]
|
157 |
+
for model in model_names:
|
158 |
+
# Get metrics if they exist, otherwise empty dict
|
159 |
+
metrics = self.logger_data_tracking.get(ds, {}).get(model, {})
|
160 |
+
row.append(json.dumps(metrics))
|
161 |
+
writer.writerow(row)
|
162 |
+
|
163 |
+
def log_metrics(self, dataset, model, metrics_dict, split_name=None):
|
164 |
+
try:
|
165 |
+
# Remove time-related metrics
|
166 |
+
metrics_dict = {k: v for k, v in metrics_dict.items()
|
167 |
+
if 'time' not in k.lower() and 'second' not in k.lower()}
|
168 |
+
|
169 |
+
# Log the metrics
|
170 |
+
if split_name is not None:
|
171 |
+
self.logger.info(f"Storing metrics for {dataset}/{model} ({split_name}): {metrics_dict}")
|
172 |
+
else:
|
173 |
+
self.logger.info(f"Storing metrics for {dataset}/{model}: {metrics_dict}")
|
174 |
+
|
175 |
+
# Initialize nested dictionaries if they don't exist
|
176 |
+
if dataset not in self.logger_data_tracking:
|
177 |
+
self.logger_data_tracking[dataset] = {}
|
178 |
+
|
179 |
+
# Store the metrics
|
180 |
+
self.logger_data_tracking[dataset][model] = metrics_dict
|
181 |
+
|
182 |
+
# Write results after each update to ensure nothing is lost
|
183 |
+
self.write_results()
|
184 |
+
|
185 |
+
except Exception as e:
|
186 |
+
self.logger.error(f"Error logging metrics for {dataset}/{model}: {str(e)}")
|
187 |
+
|
188 |
+
def end_log(self):
|
189 |
+
# Try multiple commands to get pip list
|
190 |
+
pip_commands = [
|
191 |
+
'python -m pip list',
|
192 |
+
'py -m pip list',
|
193 |
+
'pip list',
|
194 |
+
'pip3 list',
|
195 |
+
f'{sys.executable} -m pip list' # Use current Python interpreter
|
196 |
+
]
|
197 |
+
|
198 |
+
pip_list = "Could not retrieve pip list"
|
199 |
+
for cmd in pip_commands:
|
200 |
+
try:
|
201 |
+
process = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
202 |
+
if process.returncode == 0 and process.stdout.strip():
|
203 |
+
pip_list = process.stdout.strip()
|
204 |
+
break
|
205 |
+
except Exception:
|
206 |
+
continue
|
207 |
+
|
208 |
+
# Try to get nvidia-smi output, handle case where it's not available
|
209 |
+
try:
|
210 |
+
nvidia_info = os.popen('nvidia-smi').read().strip()
|
211 |
+
except:
|
212 |
+
nvidia_info = "nvidia-smi not available"
|
213 |
+
|
214 |
+
# Get system info
|
215 |
+
import platform
|
216 |
+
system_info = {
|
217 |
+
'platform': platform.platform(),
|
218 |
+
'processor': platform.processor(),
|
219 |
+
'machine': platform.machine()
|
220 |
+
}
|
221 |
+
|
222 |
+
# Get Python version and executable path
|
223 |
+
python_version = platform.python_version()
|
224 |
+
python_executable = sys.executable
|
225 |
+
|
226 |
+
# Log all information with proper formatting
|
227 |
+
self.logger.info(self._section_break)
|
228 |
+
self.logger.info("System Information:")
|
229 |
+
self.logger.info(f"Python Version: {python_version}")
|
230 |
+
self.logger.info(f"Python Executable: {python_executable}")
|
231 |
+
for key, value in system_info.items():
|
232 |
+
self.logger.info(f"{key.title()}: {value}")
|
233 |
+
|
234 |
+
self.logger.info("\nInstalled Packages:")
|
235 |
+
self.logger.info(pip_list)
|
236 |
+
|
237 |
+
self.logger.info("\nGPU Information:")
|
238 |
+
self.logger.info(nvidia_info)
|
239 |
+
self.logger.info(self._section_break)
|
240 |
+
|
241 |
+
|
242 |
+
class LogReplayer:
|
243 |
+
def __init__(self, log_file_path):
|
244 |
+
self.log_file = Path(log_file_path)
|
245 |
+
self.arguments = {}
|
246 |
+
self.method_calls = []
|
247 |
+
|
248 |
+
def parse_log(self):
|
249 |
+
"""
|
250 |
+
Reads the log file line by line. Extracts:
|
251 |
+
1) Global arguments into self.arguments
|
252 |
+
2) Method calls into self.method_calls (in order)
|
253 |
+
"""
|
254 |
+
if not self.log_file.exists():
|
255 |
+
raise FileNotFoundError(f"Log file not found: {self.log_file}")
|
256 |
+
|
257 |
+
with open(self.log_file, 'r') as file:
|
258 |
+
header = next(file)
|
259 |
+
for line in file:
|
260 |
+
if line.startswith('='):
|
261 |
+
continue
|
262 |
+
elif line.startswith('INFO'):
|
263 |
+
method = line.split(': ')[-1].strip()
|
264 |
+
self.method_calls.append(method)
|
265 |
+
elif ':\t' in line:
|
266 |
+
key, value = line.split(':\t')
|
267 |
+
key, value = key.strip(), value.strip()
|
268 |
+
try:
|
269 |
+
value = ast.literal_eval(value)
|
270 |
+
except (ValueError, SyntaxError):
|
271 |
+
pass
|
272 |
+
self.arguments[key] = value
|
273 |
+
|
274 |
+
return SimpleNamespace(**self.arguments)
|
275 |
+
|
276 |
+
def run_replay(self, target_obj):
|
277 |
+
"""
|
278 |
+
Replays the collected method calls on `target_obj`.
|
279 |
+
`target_obj` is an instance of the class/script that we want to replay.
|
280 |
+
"""
|
281 |
+
for method in self.method_calls:
|
282 |
+
print_message(f"Replaying call to: {method}()")
|
283 |
+
func = getattr(target_obj, method, None)
|
284 |
+
if not func:
|
285 |
+
print_message(f"Warning: {method} not found on target object.")
|
286 |
+
continue
|
287 |
+
func()
|