Upload cfg.yaml
Browse files
cfg.yaml
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
architecture:
|
| 2 |
+
backbone_dtype: float16
|
| 3 |
+
force_embedding_gradients: false
|
| 4 |
+
gradient_checkpointing: true
|
| 5 |
+
intermediate_dropout: 0.0
|
| 6 |
+
pretrained: true
|
| 7 |
+
pretrained_weights: ''
|
| 8 |
+
augmentation:
|
| 9 |
+
random_parent_probability: 0.0
|
| 10 |
+
skip_parent_probability: 0.0
|
| 11 |
+
token_mask_probability: 0.0
|
| 12 |
+
dataset:
|
| 13 |
+
add_eos_token_to_answer: true
|
| 14 |
+
add_eos_token_to_prompt: true
|
| 15 |
+
answer_column: output
|
| 16 |
+
data_sample: 1.0
|
| 17 |
+
data_sample_choice:
|
| 18 |
+
- Train
|
| 19 |
+
- Validation
|
| 20 |
+
limit_chained_samples: true
|
| 21 |
+
mask_prompt_labels: true
|
| 22 |
+
parent_id_column: parent_id
|
| 23 |
+
prompt_column:
|
| 24 |
+
- instruction
|
| 25 |
+
text_answer_separator: <|answer|>
|
| 26 |
+
text_prompt_start: <|prompt|>
|
| 27 |
+
train_dataframe: data/user/oasst/train_full_allrank.pq
|
| 28 |
+
validation_dataframe: data/user/oasst/gpt4_val_v0.csv
|
| 29 |
+
validation_size: 0.01
|
| 30 |
+
validation_strategy: custom
|
| 31 |
+
environment:
|
| 32 |
+
compile_model: false
|
| 33 |
+
find_unused_parameters: false
|
| 34 |
+
gpus:
|
| 35 |
+
- '0'
|
| 36 |
+
- '1'
|
| 37 |
+
- '2'
|
| 38 |
+
mixed_precision: true
|
| 39 |
+
number_of_workers: 8
|
| 40 |
+
seed: -1
|
| 41 |
+
trust_remote_code: false
|
| 42 |
+
use_fsdp: false
|
| 43 |
+
experiment_name: h2ogpt-gm-oasst1-en-2048-open-llama-7b
|
| 44 |
+
llm_backbone: openlm-research/open_llama_7b
|
| 45 |
+
logging:
|
| 46 |
+
logger: Neptune
|
| 47 |
+
neptune_project: Zoo/h2o-llm
|
| 48 |
+
number_of_texts: 10
|
| 49 |
+
output_directory: output/user/h2ogpt-gm-oasst1-en-2048-open-llama-7b/
|
| 50 |
+
prediction:
|
| 51 |
+
batch_size_inference: 0
|
| 52 |
+
do_sample: false
|
| 53 |
+
max_length_inference: 1024
|
| 54 |
+
metric: GPT3.5
|
| 55 |
+
min_length_inference: 2
|
| 56 |
+
num_beams: 1
|
| 57 |
+
num_history: 2
|
| 58 |
+
repetition_penalty: 1.2
|
| 59 |
+
stop_tokens: ''
|
| 60 |
+
temperature: 0.3
|
| 61 |
+
top_k: 0
|
| 62 |
+
top_p: 1.0
|
| 63 |
+
problem_type: text_causal_language_modeling
|
| 64 |
+
tokenizer:
|
| 65 |
+
add_prefix_space: false
|
| 66 |
+
add_prompt_answer_tokens: false
|
| 67 |
+
max_length: 2048
|
| 68 |
+
max_length_answer: 1024
|
| 69 |
+
max_length_prompt: 2048
|
| 70 |
+
padding_quantile: 1.0
|
| 71 |
+
use_fast: false
|
| 72 |
+
training:
|
| 73 |
+
batch_size: 3
|
| 74 |
+
differential_learning_rate: 1.0e-05
|
| 75 |
+
differential_learning_rate_layers: []
|
| 76 |
+
drop_last_batch: true
|
| 77 |
+
epochs: 1
|
| 78 |
+
evaluate_before_training: false
|
| 79 |
+
evaluation_epochs: 0.5
|
| 80 |
+
grad_accumulation: 1
|
| 81 |
+
gradient_clip: 0.0
|
| 82 |
+
learning_rate: 0.0001
|
| 83 |
+
lora: true
|
| 84 |
+
lora_alpha: 32
|
| 85 |
+
lora_dropout: 0.05
|
| 86 |
+
lora_r: 16
|
| 87 |
+
lora_target_modules: q_proj,k_proj,v_proj,o_proj,gate_proj,down_proj,up_proj
|
| 88 |
+
loss_function: TokenCrossEntropy
|
| 89 |
+
optimizer: AdamW
|
| 90 |
+
save_best_checkpoint: false
|
| 91 |
+
schedule: Cosine
|
| 92 |
+
train_validation_data: false
|
| 93 |
+
warmup_epochs: 0.0
|
| 94 |
+
weight_decay: 0.0
|