| ./run_clm_flax.py \ | |
| --output_dir="./" \ | |
| --model_type="gpt2" \ | |
| --model_name_or_path="./" \ | |
| --config_name="./" \ | |
| --tokenizer_name="./" \ | |
| --dataset_name="oscar" \ | |
| --dataset_config_name="unshuffled_deduplicated_es" \ | |
| --do_train \ | |
| --do_eval \ | |
| --block_size="512" \ | |
| --per_device_train_batch_size="64" \ | |
| --per_device_eval_batch_size="64" \ | |
| --adafactor \ | |
| --learning_rate="3e-3" --warmup_steps="1000" \ | |
| --adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.001" \ | |
| --overwrite_output_dir \ | |
| --num_train_epochs="20" \ | |
| --logging_steps="100" \ | |
| --save_steps="2500" \ | |
| --eval_steps="1000000" \ | |
| --preprocessing_num_workers="64" | |