| { | |
| "best_global_step": 224, | |
| "best_metric": 0.9227272727272727, | |
| "best_model_checkpoint": "font-identifier/checkpoint-224", | |
| "epoch": 22.0, | |
| "eval_steps": 500, | |
| "global_step": 308, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 9.430075645446777, | |
| "learning_rate": 6.428571428571429e-06, | |
| "loss": 3.3213, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.06818181818181818, | |
| "eval_loss": 3.2453081607818604, | |
| "eval_runtime": 2.4333, | |
| "eval_samples_per_second": 90.411, | |
| "eval_steps_per_second": 5.753, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.4363636363636363, | |
| "grad_norm": 9.360711097717285, | |
| "learning_rate": 1.357142857142857e-05, | |
| "loss": 3.1711, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.12727272727272726, | |
| "eval_loss": 3.0051403045654297, | |
| "eval_runtime": 2.0612, | |
| "eval_samples_per_second": 106.735, | |
| "eval_steps_per_second": 6.792, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.1454545454545455, | |
| "grad_norm": 8.500679016113281, | |
| "learning_rate": 2.0714285714285718e-05, | |
| "loss": 2.983, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.8727272727272726, | |
| "grad_norm": 9.82868766784668, | |
| "learning_rate": 2.785714285714286e-05, | |
| "loss": 2.8729, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.20909090909090908, | |
| "eval_loss": 2.6284613609313965, | |
| "eval_runtime": 2.0503, | |
| "eval_samples_per_second": 107.302, | |
| "eval_steps_per_second": 6.828, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 3.581818181818182, | |
| "grad_norm": 7.700014114379883, | |
| "learning_rate": 3.5e-05, | |
| "loss": 2.562, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.37727272727272726, | |
| "eval_loss": 2.160020112991333, | |
| "eval_runtime": 2.3625, | |
| "eval_samples_per_second": 93.121, | |
| "eval_steps_per_second": 5.926, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 4.290909090909091, | |
| "grad_norm": 7.527115821838379, | |
| "learning_rate": 4.214285714285714e-05, | |
| "loss": 2.2406, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 5.579476833343506, | |
| "learning_rate": 4.928571428571429e-05, | |
| "loss": 1.8675, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.5363636363636364, | |
| "eval_loss": 1.639161229133606, | |
| "eval_runtime": 2.0879, | |
| "eval_samples_per_second": 105.372, | |
| "eval_steps_per_second": 6.705, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 5.7272727272727275, | |
| "grad_norm": 6.313197135925293, | |
| "learning_rate": 4.928571428571429e-05, | |
| "loss": 1.6359, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6681818181818182, | |
| "eval_loss": 1.2266921997070312, | |
| "eval_runtime": 2.1098, | |
| "eval_samples_per_second": 104.274, | |
| "eval_steps_per_second": 6.636, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 6.4363636363636365, | |
| "grad_norm": 5.8209004402160645, | |
| "learning_rate": 4.84920634920635e-05, | |
| "loss": 1.3499, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.6818181818181818, | |
| "eval_loss": 1.0587564706802368, | |
| "eval_runtime": 2.9116, | |
| "eval_samples_per_second": 75.56, | |
| "eval_steps_per_second": 4.808, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 7.1454545454545455, | |
| "grad_norm": 6.231616973876953, | |
| "learning_rate": 4.7698412698412706e-05, | |
| "loss": 1.2501, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 7.872727272727273, | |
| "grad_norm": 5.9844584465026855, | |
| "learning_rate": 4.690476190476191e-05, | |
| "loss": 1.076, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.6909090909090909, | |
| "eval_loss": 0.8790816068649292, | |
| "eval_runtime": 4.5841, | |
| "eval_samples_per_second": 47.992, | |
| "eval_steps_per_second": 3.054, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 8.581818181818182, | |
| "grad_norm": 5.29907751083374, | |
| "learning_rate": 4.6111111111111115e-05, | |
| "loss": 0.9811, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.7545454545454545, | |
| "eval_loss": 0.7573429346084595, | |
| "eval_runtime": 4.4185, | |
| "eval_samples_per_second": 49.79, | |
| "eval_steps_per_second": 3.168, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 9.290909090909091, | |
| "grad_norm": 5.92799711227417, | |
| "learning_rate": 4.531746031746032e-05, | |
| "loss": 0.8655, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 3.577127695083618, | |
| "learning_rate": 4.4523809523809525e-05, | |
| "loss": 0.7309, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.7818181818181819, | |
| "eval_loss": 0.6194924712181091, | |
| "eval_runtime": 4.0981, | |
| "eval_samples_per_second": 53.683, | |
| "eval_steps_per_second": 3.416, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 10.727272727272727, | |
| "grad_norm": 5.252236843109131, | |
| "learning_rate": 4.373015873015873e-05, | |
| "loss": 0.7776, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.8, | |
| "eval_loss": 0.5426079034805298, | |
| "eval_runtime": 4.0014, | |
| "eval_samples_per_second": 54.981, | |
| "eval_steps_per_second": 3.499, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 11.436363636363636, | |
| "grad_norm": 4.398848533630371, | |
| "learning_rate": 4.2936507936507935e-05, | |
| "loss": 0.7365, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.8772727272727273, | |
| "eval_loss": 0.40285420417785645, | |
| "eval_runtime": 4.3238, | |
| "eval_samples_per_second": 50.882, | |
| "eval_steps_per_second": 3.238, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 12.145454545454545, | |
| "grad_norm": 5.8070549964904785, | |
| "learning_rate": 4.214285714285714e-05, | |
| "loss": 0.6116, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 12.872727272727273, | |
| "grad_norm": 6.383784770965576, | |
| "learning_rate": 4.134920634920635e-05, | |
| "loss": 0.5767, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.8363636363636363, | |
| "eval_loss": 0.4417766332626343, | |
| "eval_runtime": 4.8355, | |
| "eval_samples_per_second": 45.497, | |
| "eval_steps_per_second": 2.895, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 13.581818181818182, | |
| "grad_norm": 4.959994316101074, | |
| "learning_rate": 4.055555555555556e-05, | |
| "loss": 0.5838, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.8818181818181818, | |
| "eval_loss": 0.35380780696868896, | |
| "eval_runtime": 3.8077, | |
| "eval_samples_per_second": 57.777, | |
| "eval_steps_per_second": 3.677, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 14.290909090909091, | |
| "grad_norm": 5.2857770919799805, | |
| "learning_rate": 3.976190476190476e-05, | |
| "loss": 0.4826, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 3.971461296081543, | |
| "learning_rate": 3.896825396825397e-05, | |
| "loss": 0.4491, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.8636363636363636, | |
| "eval_loss": 0.3833666145801544, | |
| "eval_runtime": 4.0015, | |
| "eval_samples_per_second": 54.979, | |
| "eval_steps_per_second": 3.499, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 15.727272727272727, | |
| "grad_norm": 4.3175764083862305, | |
| "learning_rate": 3.817460317460317e-05, | |
| "loss": 0.5056, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.9227272727272727, | |
| "eval_loss": 0.27014854550361633, | |
| "eval_runtime": 3.7487, | |
| "eval_samples_per_second": 58.687, | |
| "eval_steps_per_second": 3.735, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 16.436363636363637, | |
| "grad_norm": 4.672767162322998, | |
| "learning_rate": 3.7380952380952386e-05, | |
| "loss": 0.4364, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_accuracy": 0.8818181818181818, | |
| "eval_loss": 0.3142429292201996, | |
| "eval_runtime": 3.9413, | |
| "eval_samples_per_second": 55.819, | |
| "eval_steps_per_second": 3.552, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 17.145454545454545, | |
| "grad_norm": 5.194856643676758, | |
| "learning_rate": 3.658730158730159e-05, | |
| "loss": 0.4631, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 17.87272727272727, | |
| "grad_norm": 5.319342136383057, | |
| "learning_rate": 3.5793650793650795e-05, | |
| "loss": 0.364, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.9136363636363637, | |
| "eval_loss": 0.2616939842700958, | |
| "eval_runtime": 3.7398, | |
| "eval_samples_per_second": 58.827, | |
| "eval_steps_per_second": 3.744, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 18.581818181818182, | |
| "grad_norm": 5.951942443847656, | |
| "learning_rate": 3.5e-05, | |
| "loss": 0.3845, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_accuracy": 0.8818181818181818, | |
| "eval_loss": 0.3091982901096344, | |
| "eval_runtime": 4.1464, | |
| "eval_samples_per_second": 53.059, | |
| "eval_steps_per_second": 3.376, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 19.29090909090909, | |
| "grad_norm": 4.990575790405273, | |
| "learning_rate": 3.420634920634921e-05, | |
| "loss": 0.4096, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 4.905520439147949, | |
| "learning_rate": 3.3412698412698413e-05, | |
| "loss": 0.3873, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.9136363636363637, | |
| "eval_loss": 0.23085929453372955, | |
| "eval_runtime": 4.0568, | |
| "eval_samples_per_second": 54.23, | |
| "eval_steps_per_second": 3.451, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 20.727272727272727, | |
| "grad_norm": 3.991994857788086, | |
| "learning_rate": 3.261904761904762e-05, | |
| "loss": 0.3397, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_accuracy": 0.9181818181818182, | |
| "eval_loss": 0.22669094800949097, | |
| "eval_runtime": 3.7565, | |
| "eval_samples_per_second": 58.566, | |
| "eval_steps_per_second": 3.727, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 21.436363636363637, | |
| "grad_norm": 3.291044235229492, | |
| "learning_rate": 3.182539682539682e-05, | |
| "loss": 0.3731, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_accuracy": 0.9136363636363637, | |
| "eval_loss": 0.22054576873779297, | |
| "eval_runtime": 3.7364, | |
| "eval_samples_per_second": 58.88, | |
| "eval_steps_per_second": 3.747, | |
| "step": 308 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.9562236774907904e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |