| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9780564263322884, | |
| "eval_steps": 500, | |
| "global_step": 39, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.025078369905956112, | |
| "grad_norm": 4.108294486999512, | |
| "learning_rate": 4.9918932703355256e-05, | |
| "loss": 1.5881, | |
| "num_input_tokens_seen": 1572864, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.050156739811912224, | |
| "grad_norm": 2.3771207332611084, | |
| "learning_rate": 4.967625656594782e-05, | |
| "loss": 1.2787, | |
| "num_input_tokens_seen": 3145728, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.07523510971786834, | |
| "grad_norm": 1.290763020515442, | |
| "learning_rate": 4.92735454356513e-05, | |
| "loss": 1.1866, | |
| "num_input_tokens_seen": 4718592, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.10031347962382445, | |
| "grad_norm": 1.9561363458633423, | |
| "learning_rate": 4.8713411048678635e-05, | |
| "loss": 1.1526, | |
| "num_input_tokens_seen": 6291456, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.12539184952978055, | |
| "grad_norm": 1.645265817642212, | |
| "learning_rate": 4.799948609147061e-05, | |
| "loss": 1.1538, | |
| "num_input_tokens_seen": 7864320, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.15047021943573669, | |
| "grad_norm": 1.6553821563720703, | |
| "learning_rate": 4.713640064133025e-05, | |
| "loss": 1.0872, | |
| "num_input_tokens_seen": 9437184, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1755485893416928, | |
| "grad_norm": 0.831769585609436, | |
| "learning_rate": 4.6129752138594874e-05, | |
| "loss": 1.0878, | |
| "num_input_tokens_seen": 11010048, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.2006269592476489, | |
| "grad_norm": 1.176440954208374, | |
| "learning_rate": 4.498606908508754e-05, | |
| "loss": 1.0683, | |
| "num_input_tokens_seen": 12582912, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.22570532915360503, | |
| "grad_norm": 1.0760396718978882, | |
| "learning_rate": 4.371276870427753e-05, | |
| "loss": 1.0934, | |
| "num_input_tokens_seen": 14155776, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.2507836990595611, | |
| "grad_norm": 1.0180983543395996, | |
| "learning_rate": 4.231810883773999e-05, | |
| "loss": 1.0793, | |
| "num_input_tokens_seen": 15728640, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.27586206896551724, | |
| "grad_norm": 0.8989325165748596, | |
| "learning_rate": 4.0811134389884433e-05, | |
| "loss": 1.049, | |
| "num_input_tokens_seen": 17301504, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.30094043887147337, | |
| "grad_norm": 0.8567007780075073, | |
| "learning_rate": 3.920161866827889e-05, | |
| "loss": 1.0616, | |
| "num_input_tokens_seen": 18874368, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.32601880877742945, | |
| "grad_norm": 0.8186928033828735, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 1.0608, | |
| "num_input_tokens_seen": 20447232, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.3510971786833856, | |
| "grad_norm": 0.8171782493591309, | |
| "learning_rate": 3.5717314035076355e-05, | |
| "loss": 1.0509, | |
| "num_input_tokens_seen": 22020096, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.3761755485893417, | |
| "grad_norm": 0.7357198596000671, | |
| "learning_rate": 3.386512217606339e-05, | |
| "loss": 1.0264, | |
| "num_input_tokens_seen": 23592960, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.4012539184952978, | |
| "grad_norm": 0.6611493825912476, | |
| "learning_rate": 3.195543659791132e-05, | |
| "loss": 1.0232, | |
| "num_input_tokens_seen": 25165824, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.4263322884012539, | |
| "grad_norm": 0.7407145500183105, | |
| "learning_rate": 3.0000642344401113e-05, | |
| "loss": 1.0238, | |
| "num_input_tokens_seen": 26738688, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.45141065830721006, | |
| "grad_norm": 0.608465313911438, | |
| "learning_rate": 2.8013417006383076e-05, | |
| "loss": 1.0305, | |
| "num_input_tokens_seen": 28311552, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.47648902821316613, | |
| "grad_norm": 0.500586211681366, | |
| "learning_rate": 2.600664850273538e-05, | |
| "loss": 1.0035, | |
| "num_input_tokens_seen": 29884416, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.5015673981191222, | |
| "grad_norm": 0.4884806275367737, | |
| "learning_rate": 2.399335149726463e-05, | |
| "loss": 0.9984, | |
| "num_input_tokens_seen": 31457280, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5266457680250783, | |
| "grad_norm": 0.4910697937011719, | |
| "learning_rate": 2.1986582993616926e-05, | |
| "loss": 1.0343, | |
| "num_input_tokens_seen": 33030144, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.5517241379310345, | |
| "grad_norm": 0.4629478454589844, | |
| "learning_rate": 1.9999357655598893e-05, | |
| "loss": 1.003, | |
| "num_input_tokens_seen": 34603008, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.5768025078369906, | |
| "grad_norm": 0.370900422334671, | |
| "learning_rate": 1.8044563402088684e-05, | |
| "loss": 0.9702, | |
| "num_input_tokens_seen": 36175872, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.6018808777429467, | |
| "grad_norm": 0.34206250309944153, | |
| "learning_rate": 1.613487782393661e-05, | |
| "loss": 1.0174, | |
| "num_input_tokens_seen": 37748736, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.6269592476489029, | |
| "grad_norm": 0.37745535373687744, | |
| "learning_rate": 1.4282685964923642e-05, | |
| "loss": 1.0035, | |
| "num_input_tokens_seen": 39321600, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.6520376175548589, | |
| "grad_norm": 0.28456300497055054, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 1.0231, | |
| "num_input_tokens_seen": 40894464, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.677115987460815, | |
| "grad_norm": 0.30013516545295715, | |
| "learning_rate": 1.0798381331721109e-05, | |
| "loss": 0.9964, | |
| "num_input_tokens_seen": 42467328, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.7021943573667712, | |
| "grad_norm": 0.22873762249946594, | |
| "learning_rate": 9.18886561011557e-06, | |
| "loss": 0.9719, | |
| "num_input_tokens_seen": 44040192, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.2122729867696762, | |
| "learning_rate": 7.681891162260015e-06, | |
| "loss": 0.995, | |
| "num_input_tokens_seen": 45613056, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.7523510971786834, | |
| "grad_norm": 0.19211077690124512, | |
| "learning_rate": 6.28723129572247e-06, | |
| "loss": 1.0011, | |
| "num_input_tokens_seen": 47185920, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.7774294670846394, | |
| "grad_norm": 0.236570343375206, | |
| "learning_rate": 5.013930914912476e-06, | |
| "loss": 1.0124, | |
| "num_input_tokens_seen": 48758784, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.8025078369905956, | |
| "grad_norm": 0.2403160035610199, | |
| "learning_rate": 3.8702478614051355e-06, | |
| "loss": 0.9797, | |
| "num_input_tokens_seen": 50331648, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.8275862068965517, | |
| "grad_norm": 0.20583193004131317, | |
| "learning_rate": 2.8635993586697553e-06, | |
| "loss": 0.9685, | |
| "num_input_tokens_seen": 51904512, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.8526645768025078, | |
| "grad_norm": 0.20351257920265198, | |
| "learning_rate": 2.0005139085293945e-06, | |
| "loss": 1.0069, | |
| "num_input_tokens_seen": 53477376, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.877742946708464, | |
| "grad_norm": 0.20642909407615662, | |
| "learning_rate": 1.286588951321363e-06, | |
| "loss": 0.9941, | |
| "num_input_tokens_seen": 55050240, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.9028213166144201, | |
| "grad_norm": 0.2136221081018448, | |
| "learning_rate": 7.264545643486997e-07, | |
| "loss": 1.023, | |
| "num_input_tokens_seen": 56623104, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.9278996865203761, | |
| "grad_norm": 0.2254924178123474, | |
| "learning_rate": 3.237434340521789e-07, | |
| "loss": 0.9832, | |
| "num_input_tokens_seen": 58195968, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.9529780564263323, | |
| "grad_norm": 0.21043558418750763, | |
| "learning_rate": 8.106729664475176e-08, | |
| "loss": 0.9791, | |
| "num_input_tokens_seen": 59768832, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.9780564263322884, | |
| "grad_norm": 0.2032707780599594, | |
| "learning_rate": 0.0, | |
| "loss": 1.0038, | |
| "num_input_tokens_seen": 61341696, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.9780564263322884, | |
| "num_input_tokens_seen": 61341696, | |
| "step": 39, | |
| "total_flos": 2.7621888463963423e+18, | |
| "train_loss": 1.053089643136049, | |
| "train_runtime": 6598.8894, | |
| "train_samples_per_second": 2.316, | |
| "train_steps_per_second": 0.006 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 39, | |
| "num_input_tokens_seen": 61341696, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.7621888463963423e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |