| { | |
| "lora_r": 16, | |
| "lora_alpha": 16, | |
| "lora_dropout": 0.05, | |
| "lora_target_modules": "q_proj,v_proj", | |
| "tuning_strategy": "lora", | |
| "num_trainable_layers": 2, | |
| "output_dir": "sft_prefill/prompt_id_3/qwen2-VL-7B-Instruct-syn-count-lora", | |
| "num_train_epochs": 2, | |
| "learning_rate": 0.0002, | |
| "per_device_train_batch_size": 16, | |
| "per_device_eval_batch_size": 16, | |
| "gradient_accumulation_steps": 1, | |
| "logging_steps": 10, | |
| "eval_steps": 200, | |
| "save_steps": 200, | |
| "warmup_ratio": 0.03, | |
| "weight_decay": 0.0, | |
| "max_grad_norm": 0.3, | |
| "lr_scheduler_type": "constant", | |
| "bf16": true, | |
| "tf32": true, | |
| "gradient_checkpointing": true, | |
| "optim": "adamw_torch_fused", | |
| "ft_type": "SFT_Prefill", | |
| "data_type": "small", | |
| "prompt_id": 3 | |
| } |