| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999829030603522, | |
| "eval_steps": 250, | |
| "global_step": 1462, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05060694135749701, | |
| "grad_norm": 2.961400270462036, | |
| "learning_rate": 4.965986394557823e-07, | |
| "loss": 4.4344, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.10121388271499401, | |
| "grad_norm": 3.643557548522949, | |
| "learning_rate": 1e-06, | |
| "loss": 4.4596, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.15182082407249103, | |
| "grad_norm": 3.3004343509674072, | |
| "learning_rate": 9.437262357414448e-07, | |
| "loss": 4.3698, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.17096939647803044, | |
| "eval_loss": 4.173080921173096, | |
| "eval_runtime": 18.3767, | |
| "eval_samples_per_second": 27.208, | |
| "eval_steps_per_second": 13.604, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.20242776542998803, | |
| "grad_norm": 3.9712746143341064, | |
| "learning_rate": 8.874524714828897e-07, | |
| "loss": 4.1264, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.253034706787485, | |
| "grad_norm": 5.5848774909973145, | |
| "learning_rate": 8.311787072243346e-07, | |
| "loss": 3.8829, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.30364164814498207, | |
| "grad_norm": 4.0708184242248535, | |
| "learning_rate": 7.749049429657795e-07, | |
| "loss": 3.5096, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.3419387929560609, | |
| "eval_loss": 3.149691581726074, | |
| "eval_runtime": 18.3567, | |
| "eval_samples_per_second": 27.238, | |
| "eval_steps_per_second": 13.619, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35424858950247906, | |
| "grad_norm": 3.6866307258605957, | |
| "learning_rate": 7.186311787072243e-07, | |
| "loss": 3.1506, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.40485553085997605, | |
| "grad_norm": 4.073938369750977, | |
| "learning_rate": 6.623574144486692e-07, | |
| "loss": 2.9908, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.45546247221747305, | |
| "grad_norm": 3.710505485534668, | |
| "learning_rate": 6.060836501901141e-07, | |
| "loss": 2.7593, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.50606941357497, | |
| "grad_norm": 2.3573338985443115, | |
| "learning_rate": 5.498098859315589e-07, | |
| "loss": 2.6213, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.5129081894340913, | |
| "eval_loss": 2.573621988296509, | |
| "eval_runtime": 18.3572, | |
| "eval_samples_per_second": 27.237, | |
| "eval_steps_per_second": 13.619, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5566763549324671, | |
| "grad_norm": 2.2275450229644775, | |
| "learning_rate": 4.935361216730038e-07, | |
| "loss": 2.5332, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 0.6072832962899641, | |
| "grad_norm": 2.2295360565185547, | |
| "learning_rate": 4.372623574144487e-07, | |
| "loss": 2.484, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 0.6578902376474611, | |
| "grad_norm": 2.1964755058288574, | |
| "learning_rate": 3.8098859315589356e-07, | |
| "loss": 2.4305, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 0.6838775859121218, | |
| "eval_loss": 2.3979878425598145, | |
| "eval_runtime": 18.3621, | |
| "eval_samples_per_second": 27.23, | |
| "eval_steps_per_second": 13.615, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7084971790049581, | |
| "grad_norm": 2.5384292602539062, | |
| "learning_rate": 3.247148288973384e-07, | |
| "loss": 2.3982, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 0.7591041203624551, | |
| "grad_norm": 1.8956412076950073, | |
| "learning_rate": 2.6844106463878326e-07, | |
| "loss": 2.3504, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.8097110617199521, | |
| "grad_norm": 1.7790541648864746, | |
| "learning_rate": 2.1216730038022811e-07, | |
| "loss": 2.3653, | |
| "step": 1184 | |
| }, | |
| { | |
| "epoch": 0.8548469823901521, | |
| "eval_loss": 2.334463119506836, | |
| "eval_runtime": 18.3571, | |
| "eval_samples_per_second": 27.237, | |
| "eval_steps_per_second": 13.619, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8603180030774491, | |
| "grad_norm": 2.580681562423706, | |
| "learning_rate": 1.55893536121673e-07, | |
| "loss": 2.317, | |
| "step": 1258 | |
| }, | |
| { | |
| "epoch": 0.9109249444349461, | |
| "grad_norm": 1.333031415939331, | |
| "learning_rate": 9.961977186311786e-08, | |
| "loss": 2.3346, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 0.9615318857924432, | |
| "grad_norm": 1.8250882625579834, | |
| "learning_rate": 4.3346007604562734e-08, | |
| "loss": 2.3081, | |
| "step": 1406 | |
| }, | |
| { | |
| "epoch": 0.999829030603522, | |
| "step": 1462, | |
| "total_flos": 4.82073055174656e+16, | |
| "train_loss": 3.015217604943731, | |
| "train_runtime": 1458.2015, | |
| "train_samples_per_second": 8.022, | |
| "train_steps_per_second": 1.003 | |
| } | |
| ], | |
| "logging_steps": 74, | |
| "max_steps": 1462, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.82073055174656e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |