add model
Browse files- README.md +45 -0
- adapter_config.json +40 -0
- head_config.json +20 -0
- pytorch_adapter.bin +3 -0
- pytorch_model_head.bin +3 -0
README.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- roberta
|
| 4 |
+
- adapter-transformers
|
| 5 |
+
datasets:
|
| 6 |
+
- glue
|
| 7 |
+
language:
|
| 8 |
+
- en
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# Adapter `SALT-NLP/pfadapter-roberta-base-sst2-combined-value` for roberta-base
|
| 12 |
+
|
| 13 |
+
An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [glue](https://huggingface.co/datasets/glue/) dataset and includes a prediction head for classification.
|
| 14 |
+
|
| 15 |
+
This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library.
|
| 16 |
+
|
| 17 |
+
## Usage
|
| 18 |
+
|
| 19 |
+
First, install `adapter-transformers`:
|
| 20 |
+
|
| 21 |
+
```
|
| 22 |
+
pip install -U adapter-transformers
|
| 23 |
+
```
|
| 24 |
+
_Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_
|
| 25 |
+
|
| 26 |
+
Now, the adapter can be loaded and activated like this:
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
from transformers import AutoAdapterModel
|
| 30 |
+
|
| 31 |
+
model = AutoAdapterModel.from_pretrained("roberta-base")
|
| 32 |
+
adapter_name = model.load_adapter("SALT-NLP/pfadapter-roberta-base-sst2-combined-value", source="hf", set_active=True)
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
## Architecture & Training
|
| 36 |
+
|
| 37 |
+
<!-- Add some description here -->
|
| 38 |
+
|
| 39 |
+
## Evaluation results
|
| 40 |
+
|
| 41 |
+
<!-- Add some description here -->
|
| 42 |
+
|
| 43 |
+
## Citation
|
| 44 |
+
|
| 45 |
+
<!-- Add some description here -->
|
adapter_config.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"adapter_residual_before_ln": false,
|
| 4 |
+
"cross_adapter": false,
|
| 5 |
+
"factorized_phm_W": true,
|
| 6 |
+
"factorized_phm_rule": false,
|
| 7 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
| 8 |
+
"init_weights": "bert",
|
| 9 |
+
"inv_adapter": null,
|
| 10 |
+
"inv_adapter_reduction_factor": null,
|
| 11 |
+
"is_parallel": false,
|
| 12 |
+
"learn_phm": true,
|
| 13 |
+
"leave_out": [],
|
| 14 |
+
"ln_after": false,
|
| 15 |
+
"ln_before": false,
|
| 16 |
+
"mh_adapter": false,
|
| 17 |
+
"non_linearity": "relu",
|
| 18 |
+
"original_ln_after": true,
|
| 19 |
+
"original_ln_before": true,
|
| 20 |
+
"output_adapter": true,
|
| 21 |
+
"phm_bias": true,
|
| 22 |
+
"phm_c_init": "normal",
|
| 23 |
+
"phm_dim": 4,
|
| 24 |
+
"phm_init_range": 0.0001,
|
| 25 |
+
"phm_layer": false,
|
| 26 |
+
"phm_rank": 1,
|
| 27 |
+
"reduction_factor": 16,
|
| 28 |
+
"residual_before_ln": true,
|
| 29 |
+
"scaling": 1.0,
|
| 30 |
+
"shared_W_phm": false,
|
| 31 |
+
"shared_phm_rule": true,
|
| 32 |
+
"use_gating": false
|
| 33 |
+
},
|
| 34 |
+
"hidden_size": 768,
|
| 35 |
+
"model_class": "RobertaAdapterModel",
|
| 36 |
+
"model_name": "roberta-base",
|
| 37 |
+
"model_type": "roberta",
|
| 38 |
+
"name": "sst2",
|
| 39 |
+
"version": "3.1.0"
|
| 40 |
+
}
|
head_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"activation_function": "tanh",
|
| 4 |
+
"bias": true,
|
| 5 |
+
"head_type": "classification",
|
| 6 |
+
"label2id": {
|
| 7 |
+
"negative": 0,
|
| 8 |
+
"positive": 1
|
| 9 |
+
},
|
| 10 |
+
"layers": 2,
|
| 11 |
+
"num_labels": 2,
|
| 12 |
+
"use_pooler": false
|
| 13 |
+
},
|
| 14 |
+
"hidden_size": 768,
|
| 15 |
+
"model_class": "RobertaAdapterModel",
|
| 16 |
+
"model_name": "roberta-base",
|
| 17 |
+
"model_type": "roberta",
|
| 18 |
+
"name": "sst2",
|
| 19 |
+
"version": "3.1.0"
|
| 20 |
+
}
|
pytorch_adapter.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b9fcf39839cbd16140ad7a3d9aee3b957ba2b95bd42eb74a73e78df30dc1d6e
|
| 3 |
+
size 3595272
|
pytorch_model_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05ffaa47941bd27b0bae204eb3563818506372be7eb6462d9e942501e99cbf4b
|
| 3 |
+
size 2370171
|