| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 200, | |
| "global_step": 210, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02857142857142857, | |
| "grad_norm": 19.810293087197298, | |
| "learning_rate": 5e-08, | |
| "loss": 1.6319, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05714285714285714, | |
| "grad_norm": 20.10065322014341, | |
| "learning_rate": 1e-07, | |
| "loss": 1.5294, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.08571428571428572, | |
| "grad_norm": 22.139565739974667, | |
| "learning_rate": 1.5e-07, | |
| "loss": 1.568, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.11428571428571428, | |
| "grad_norm": 17.607582690653754, | |
| "learning_rate": 2e-07, | |
| "loss": 1.6628, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 24.910794234048186, | |
| "learning_rate": 2.5e-07, | |
| "loss": 1.5738, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.17142857142857143, | |
| "grad_norm": 18.006572442480767, | |
| "learning_rate": 3e-07, | |
| "loss": 1.5898, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 22.152647510768777, | |
| "learning_rate": 3.5e-07, | |
| "loss": 1.5891, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.22857142857142856, | |
| "grad_norm": 17.044462125735237, | |
| "learning_rate": 4e-07, | |
| "loss": 1.5277, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.2571428571428571, | |
| "grad_norm": 19.26164281649356, | |
| "learning_rate": 4.5e-07, | |
| "loss": 1.5504, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 20.409183726640773, | |
| "learning_rate": 5e-07, | |
| "loss": 1.5509, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3142857142857143, | |
| "grad_norm": 16.865224915691606, | |
| "learning_rate": 5.5e-07, | |
| "loss": 1.5752, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.34285714285714286, | |
| "grad_norm": 15.722428651072727, | |
| "learning_rate": 6e-07, | |
| "loss": 1.6208, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.37142857142857144, | |
| "grad_norm": 13.23419740428603, | |
| "learning_rate": 6.5e-07, | |
| "loss": 1.515, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 12.45173447148941, | |
| "learning_rate": 7e-07, | |
| "loss": 1.4752, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 10.684936055938318, | |
| "learning_rate": 7.5e-07, | |
| "loss": 1.5073, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 8.612999241840305, | |
| "learning_rate": 8e-07, | |
| "loss": 1.4206, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.4857142857142857, | |
| "grad_norm": 9.64409059662565, | |
| "learning_rate": 8.499999999999999e-07, | |
| "loss": 1.3659, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.5142857142857142, | |
| "grad_norm": 9.53721910636901, | |
| "learning_rate": 9e-07, | |
| "loss": 1.4999, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.5428571428571428, | |
| "grad_norm": 15.44618942298652, | |
| "learning_rate": 9.499999999999999e-07, | |
| "loss": 1.452, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 7.797878397007685, | |
| "learning_rate": 1e-06, | |
| "loss": 1.4059, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 8.611751272514192, | |
| "learning_rate": 9.999316524962345e-07, | |
| "loss": 1.3416, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.6285714285714286, | |
| "grad_norm": 8.60172100284345, | |
| "learning_rate": 9.99726628670463e-07, | |
| "loss": 1.3334, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.6571428571428571, | |
| "grad_norm": 7.707986778608113, | |
| "learning_rate": 9.993849845741523e-07, | |
| "loss": 1.3032, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.6857142857142857, | |
| "grad_norm": 7.692312413668571, | |
| "learning_rate": 9.989068136093872e-07, | |
| "loss": 1.3742, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 6.364211690724886, | |
| "learning_rate": 9.982922465033348e-07, | |
| "loss": 1.2641, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.7428571428571429, | |
| "grad_norm": 6.85599706491363, | |
| "learning_rate": 9.975414512725056e-07, | |
| "loss": 1.2624, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.7714285714285715, | |
| "grad_norm": 6.401247246252304, | |
| "learning_rate": 9.966546331768192e-07, | |
| "loss": 1.258, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 9.111986647021705, | |
| "learning_rate": 9.956320346634875e-07, | |
| "loss": 1.3228, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.8285714285714286, | |
| "grad_norm": 6.326533367398195, | |
| "learning_rate": 9.944739353007341e-07, | |
| "loss": 1.2034, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 6.561308869602955, | |
| "learning_rate": 9.931806517013612e-07, | |
| "loss": 1.1795, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8857142857142857, | |
| "grad_norm": 5.9273022974036795, | |
| "learning_rate": 9.917525374361911e-07, | |
| "loss": 1.2288, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 5.348130355740025, | |
| "learning_rate": 9.901899829374047e-07, | |
| "loss": 1.1764, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.9428571428571428, | |
| "grad_norm": 5.274551340001603, | |
| "learning_rate": 9.884934153917996e-07, | |
| "loss": 1.1402, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.9714285714285714, | |
| "grad_norm": 5.681514700875701, | |
| "learning_rate": 9.866632986240029e-07, | |
| "loss": 1.1604, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 5.77664764652199, | |
| "learning_rate": 9.847001329696652e-07, | |
| "loss": 1.1567, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.0285714285714285, | |
| "grad_norm": 5.836094968508217, | |
| "learning_rate": 9.826044551386742e-07, | |
| "loss": 1.084, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.0571428571428572, | |
| "grad_norm": 5.827344070494953, | |
| "learning_rate": 9.803768380684242e-07, | |
| "loss": 1.1305, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.0857142857142856, | |
| "grad_norm": 5.1906563321747115, | |
| "learning_rate": 9.780178907671788e-07, | |
| "loss": 1.0574, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.1142857142857143, | |
| "grad_norm": 5.562551195695788, | |
| "learning_rate": 9.755282581475767e-07, | |
| "loss": 1.1391, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 5.717325403158778, | |
| "learning_rate": 9.729086208503173e-07, | |
| "loss": 1.1267, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.1714285714285715, | |
| "grad_norm": 5.969025543565558, | |
| "learning_rate": 9.701596950580807e-07, | |
| "loss": 1.1055, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 4.274011328484305, | |
| "learning_rate": 9.672822322997304e-07, | |
| "loss": 1.0392, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 1.2285714285714286, | |
| "grad_norm": 4.4561906748259545, | |
| "learning_rate": 9.642770192448535e-07, | |
| "loss": 1.0393, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 1.2571428571428571, | |
| "grad_norm": 4.618703002376858, | |
| "learning_rate": 9.611448774886923e-07, | |
| "loss": 1.0795, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 4.473713036590507, | |
| "learning_rate": 9.578866633275286e-07, | |
| "loss": 1.0194, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.3142857142857143, | |
| "grad_norm": 4.3150195602071415, | |
| "learning_rate": 9.545032675245813e-07, | |
| "loss": 1.0409, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 1.342857142857143, | |
| "grad_norm": 4.847108292314838, | |
| "learning_rate": 9.509956150664795e-07, | |
| "loss": 1.0475, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 1.3714285714285714, | |
| "grad_norm": 4.716522743464805, | |
| "learning_rate": 9.473646649103817e-07, | |
| "loss": 1.046, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 4.942648131785751, | |
| "learning_rate": 9.436114097218058e-07, | |
| "loss": 0.9876, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 5.1753554721234565, | |
| "learning_rate": 9.397368756032444e-07, | |
| "loss": 1.0394, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.457142857142857, | |
| "grad_norm": 4.996816148990504, | |
| "learning_rate": 9.357421218136386e-07, | |
| "loss": 0.9647, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 1.4857142857142858, | |
| "grad_norm": 5.982295572473842, | |
| "learning_rate": 9.316282404787869e-07, | |
| "loss": 1.0986, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.5142857142857142, | |
| "grad_norm": 4.701714221899932, | |
| "learning_rate": 9.273963562927694e-07, | |
| "loss": 1.0493, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 1.5428571428571427, | |
| "grad_norm": 4.514923820929648, | |
| "learning_rate": 9.230476262104676e-07, | |
| "loss": 0.9847, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.5714285714285714, | |
| "grad_norm": 4.701919910281847, | |
| "learning_rate": 9.185832391312642e-07, | |
| "loss": 0.9794, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 5.175054800986126, | |
| "learning_rate": 9.1400441557401e-07, | |
| "loss": 1.0607, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.6285714285714286, | |
| "grad_norm": 4.080487196055978, | |
| "learning_rate": 9.093124073433462e-07, | |
| "loss": 0.9856, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.657142857142857, | |
| "grad_norm": 4.168373937047882, | |
| "learning_rate": 9.045084971874737e-07, | |
| "loss": 0.9107, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.6857142857142857, | |
| "grad_norm": 4.223886586576581, | |
| "learning_rate": 8.995939984474623e-07, | |
| "loss": 0.9733, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 4.513527637157093, | |
| "learning_rate": 8.945702546981968e-07, | |
| "loss": 0.9476, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.7428571428571429, | |
| "grad_norm": 6.610164639785705, | |
| "learning_rate": 8.894386393810562e-07, | |
| "loss": 0.9161, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.7714285714285714, | |
| "grad_norm": 4.703651247017542, | |
| "learning_rate": 8.842005554284295e-07, | |
| "loss": 1.0058, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 4.843849342547474, | |
| "learning_rate": 8.788574348801674e-07, | |
| "loss": 0.8815, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.8285714285714287, | |
| "grad_norm": 4.850275003883815, | |
| "learning_rate": 8.734107384920769e-07, | |
| "loss": 0.9945, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.8571428571428572, | |
| "grad_norm": 5.453604273607254, | |
| "learning_rate": 8.678619553365658e-07, | |
| "loss": 0.9822, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.8857142857142857, | |
| "grad_norm": 5.3760203289286155, | |
| "learning_rate": 8.622126023955445e-07, | |
| "loss": 0.9781, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.9142857142857141, | |
| "grad_norm": 3.7890108184255062, | |
| "learning_rate": 8.564642241456986e-07, | |
| "loss": 0.9188, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.9428571428571428, | |
| "grad_norm": 4.482132592462384, | |
| "learning_rate": 8.506183921362442e-07, | |
| "loss": 0.9193, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.9714285714285715, | |
| "grad_norm": 5.311797532408616, | |
| "learning_rate": 8.446767045592829e-07, | |
| "loss": 0.9563, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.950363360704343, | |
| "learning_rate": 8.386407858128706e-07, | |
| "loss": 0.9172, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.0285714285714285, | |
| "grad_norm": 4.784370125705732, | |
| "learning_rate": 8.325122860569241e-07, | |
| "loss": 0.8843, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 2.057142857142857, | |
| "grad_norm": 3.63025112886302, | |
| "learning_rate": 8.262928807620843e-07, | |
| "loss": 0.8547, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 2.085714285714286, | |
| "grad_norm": 5.218576028144485, | |
| "learning_rate": 8.199842702516582e-07, | |
| "loss": 0.9414, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 2.1142857142857143, | |
| "grad_norm": 4.677814392084542, | |
| "learning_rate": 8.135881792367685e-07, | |
| "loss": 0.8748, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 4.400225941517684, | |
| "learning_rate": 8.071063563448339e-07, | |
| "loss": 0.8646, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.1714285714285713, | |
| "grad_norm": 4.629375190268374, | |
| "learning_rate": 8.005405736415125e-07, | |
| "loss": 0.8744, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 4.1365979610935, | |
| "learning_rate": 7.938926261462365e-07, | |
| "loss": 0.8735, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 2.2285714285714286, | |
| "grad_norm": 4.796843279900597, | |
| "learning_rate": 7.871643313414718e-07, | |
| "loss": 0.9161, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 2.257142857142857, | |
| "grad_norm": 4.297808554492733, | |
| "learning_rate": 7.803575286758363e-07, | |
| "loss": 0.8119, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 2.2857142857142856, | |
| "grad_norm": 4.229241471681201, | |
| "learning_rate": 7.734740790612136e-07, | |
| "loss": 0.8698, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.314285714285714, | |
| "grad_norm": 5.474712766868538, | |
| "learning_rate": 7.665158643639969e-07, | |
| "loss": 0.8673, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 2.342857142857143, | |
| "grad_norm": 5.8097182446336495, | |
| "learning_rate": 7.594847868906076e-07, | |
| "loss": 0.8561, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 2.3714285714285714, | |
| "grad_norm": 6.090812187892629, | |
| "learning_rate": 7.523827688674219e-07, | |
| "loss": 0.9032, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 4.2456911319822614, | |
| "learning_rate": 7.452117519152541e-07, | |
| "loss": 0.8431, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 2.4285714285714284, | |
| "grad_norm": 4.562925662849317, | |
| "learning_rate": 7.379736965185368e-07, | |
| "loss": 0.844, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 2.4571428571428573, | |
| "grad_norm": 4.517660029971984, | |
| "learning_rate": 7.306705814893439e-07, | |
| "loss": 0.8071, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 2.4857142857142858, | |
| "grad_norm": 4.970820782182107, | |
| "learning_rate": 7.233044034264033e-07, | |
| "loss": 0.8726, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 2.5142857142857142, | |
| "grad_norm": 4.817397128446438, | |
| "learning_rate": 7.158771761692464e-07, | |
| "loss": 0.8651, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 2.5428571428571427, | |
| "grad_norm": 4.521311016914525, | |
| "learning_rate": 7.083909302476452e-07, | |
| "loss": 0.8023, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 5.57347106069374, | |
| "learning_rate": 7.008477123264847e-07, | |
| "loss": 0.8107, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 4.958424900327067, | |
| "learning_rate": 6.932495846462261e-07, | |
| "loss": 0.8321, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 2.6285714285714286, | |
| "grad_norm": 4.34206205323412, | |
| "learning_rate": 6.855986244591103e-07, | |
| "loss": 0.8003, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 2.657142857142857, | |
| "grad_norm": 4.417137471491474, | |
| "learning_rate": 6.778969234612583e-07, | |
| "loss": 0.81, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 2.685714285714286, | |
| "grad_norm": 4.714391490425693, | |
| "learning_rate": 6.701465872208216e-07, | |
| "loss": 0.8362, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 2.7142857142857144, | |
| "grad_norm": 3.835031010544762, | |
| "learning_rate": 6.623497346023417e-07, | |
| "loss": 0.7408, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 2.742857142857143, | |
| "grad_norm": 4.43328902038276, | |
| "learning_rate": 6.545084971874736e-07, | |
| "loss": 0.803, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 2.7714285714285714, | |
| "grad_norm": 5.573988325671216, | |
| "learning_rate": 6.466250186922324e-07, | |
| "loss": 0.8499, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 5.080352906047553, | |
| "learning_rate": 6.387014543809223e-07, | |
| "loss": 0.8536, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 2.8285714285714287, | |
| "grad_norm": 4.433727646568338, | |
| "learning_rate": 6.307399704769098e-07, | |
| "loss": 0.8167, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 4.72983530688845, | |
| "learning_rate": 6.227427435703995e-07, | |
| "loss": 0.8247, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.8857142857142857, | |
| "grad_norm": 4.031675844503338, | |
| "learning_rate": 6.147119600233758e-07, | |
| "loss": 0.7923, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 2.914285714285714, | |
| "grad_norm": 4.657562682714027, | |
| "learning_rate": 6.066498153718734e-07, | |
| "loss": 0.8259, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 2.942857142857143, | |
| "grad_norm": 3.742914618609364, | |
| "learning_rate": 5.985585137257401e-07, | |
| "loss": 0.7416, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 2.9714285714285715, | |
| "grad_norm": 5.392956655820792, | |
| "learning_rate": 5.90440267166055e-07, | |
| "loss": 0.86, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 4.370818695376769, | |
| "learning_rate": 5.82297295140367e-07, | |
| "loss": 0.7756, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 3.0285714285714285, | |
| "grad_norm": 4.1080059715561426, | |
| "learning_rate": 5.741318238559209e-07, | |
| "loss": 0.7417, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 3.057142857142857, | |
| "grad_norm": 5.242835158751313, | |
| "learning_rate": 5.659460856710345e-07, | |
| "loss": 0.7413, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 3.085714285714286, | |
| "grad_norm": 4.213242126183361, | |
| "learning_rate": 5.577423184847931e-07, | |
| "loss": 0.774, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 3.1142857142857143, | |
| "grad_norm": 5.14212832829309, | |
| "learning_rate": 5.495227651252315e-07, | |
| "loss": 0.7596, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 3.142857142857143, | |
| "grad_norm": 4.536657005455878, | |
| "learning_rate": 5.412896727361662e-07, | |
| "loss": 0.7494, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.1714285714285713, | |
| "grad_norm": 5.180517603140962, | |
| "learning_rate": 5.330452921628497e-07, | |
| "loss": 0.8146, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 4.499166275543811, | |
| "learning_rate": 5.247918773366111e-07, | |
| "loss": 0.7437, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 3.2285714285714286, | |
| "grad_norm": 4.78892229810655, | |
| "learning_rate": 5.165316846586541e-07, | |
| "loss": 0.732, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 3.257142857142857, | |
| "grad_norm": 4.643947668361775, | |
| "learning_rate": 5.082669723831793e-07, | |
| "loss": 0.769, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 3.2857142857142856, | |
| "grad_norm": 4.79036988016491, | |
| "learning_rate": 5e-07, | |
| "loss": 0.7586, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 3.314285714285714, | |
| "grad_norm": 5.1592671986124845, | |
| "learning_rate": 4.917330276168208e-07, | |
| "loss": 0.7513, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 3.342857142857143, | |
| "grad_norm": 3.944922328543468, | |
| "learning_rate": 4.834683153413459e-07, | |
| "loss": 0.7184, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 3.3714285714285714, | |
| "grad_norm": 4.429646542430914, | |
| "learning_rate": 4.752081226633888e-07, | |
| "loss": 0.6937, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 5.016602862879482, | |
| "learning_rate": 4.669547078371503e-07, | |
| "loss": 0.7638, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 3.4285714285714284, | |
| "grad_norm": 4.443230989038584, | |
| "learning_rate": 4.5871032726383385e-07, | |
| "loss": 0.7029, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 3.4571428571428573, | |
| "grad_norm": 4.244778886390162, | |
| "learning_rate": 4.5047723487476864e-07, | |
| "loss": 0.7298, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 3.4857142857142858, | |
| "grad_norm": 5.068619137512623, | |
| "learning_rate": 4.4225768151520694e-07, | |
| "loss": 0.7716, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 3.5142857142857142, | |
| "grad_norm": 4.652036232274711, | |
| "learning_rate": 4.340539143289655e-07, | |
| "loss": 0.7276, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 3.5428571428571427, | |
| "grad_norm": 4.95389292196556, | |
| "learning_rate": 4.258681761440789e-07, | |
| "loss": 0.7345, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 4.275659541196405, | |
| "learning_rate": 4.1770270485963294e-07, | |
| "loss": 0.7323, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 4.607417501587952, | |
| "learning_rate": 4.095597328339452e-07, | |
| "loss": 0.7606, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 3.6285714285714286, | |
| "grad_norm": 4.794919375541705, | |
| "learning_rate": 4.0144148627425986e-07, | |
| "loss": 0.7149, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 3.657142857142857, | |
| "grad_norm": 4.102606927748694, | |
| "learning_rate": 3.9335018462812664e-07, | |
| "loss": 0.6956, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 3.685714285714286, | |
| "grad_norm": 4.243593175311764, | |
| "learning_rate": 3.8528803997662423e-07, | |
| "loss": 0.6894, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 3.7142857142857144, | |
| "grad_norm": 4.76702832294717, | |
| "learning_rate": 3.772572564296004e-07, | |
| "loss": 0.7188, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 3.742857142857143, | |
| "grad_norm": 4.651784836564843, | |
| "learning_rate": 3.692600295230901e-07, | |
| "loss": 0.7059, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 3.7714285714285714, | |
| "grad_norm": 4.977499267065962, | |
| "learning_rate": 3.612985456190778e-07, | |
| "loss": 0.7022, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "grad_norm": 4.629091116356692, | |
| "learning_rate": 3.533749813077677e-07, | |
| "loss": 0.692, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 3.8285714285714287, | |
| "grad_norm": 5.180269168283943, | |
| "learning_rate": 3.454915028125263e-07, | |
| "loss": 0.7245, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 3.857142857142857, | |
| "grad_norm": 4.804587126213361, | |
| "learning_rate": 3.3765026539765827e-07, | |
| "loss": 0.6965, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 3.8857142857142857, | |
| "grad_norm": 5.24711927553455, | |
| "learning_rate": 3.2985341277917846e-07, | |
| "loss": 0.7271, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 3.914285714285714, | |
| "grad_norm": 4.306919498942911, | |
| "learning_rate": 3.221030765387417e-07, | |
| "loss": 0.7287, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 3.942857142857143, | |
| "grad_norm": 5.1155454022030815, | |
| "learning_rate": 3.1440137554088953e-07, | |
| "loss": 0.7441, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 3.9714285714285715, | |
| "grad_norm": 4.184043691688485, | |
| "learning_rate": 3.06750415353774e-07, | |
| "loss": 0.7349, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.262350945325998, | |
| "learning_rate": 2.9915228767351535e-07, | |
| "loss": 0.7377, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.0285714285714285, | |
| "grad_norm": 4.103707140569048, | |
| "learning_rate": 2.916090697523549e-07, | |
| "loss": 0.6864, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 4.057142857142857, | |
| "grad_norm": 4.9358404348712765, | |
| "learning_rate": 2.841228238307536e-07, | |
| "loss": 0.7073, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 4.085714285714285, | |
| "grad_norm": 4.224580170912438, | |
| "learning_rate": 2.7669559657359673e-07, | |
| "loss": 0.6669, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 4.114285714285714, | |
| "grad_norm": 4.571605759259632, | |
| "learning_rate": 2.6932941851065615e-07, | |
| "loss": 0.6673, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 4.142857142857143, | |
| "grad_norm": 4.161244011003443, | |
| "learning_rate": 2.620263034814632e-07, | |
| "loss": 0.6644, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 4.171428571428572, | |
| "grad_norm": 4.128269627797568, | |
| "learning_rate": 2.547882480847461e-07, | |
| "loss": 0.6568, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "grad_norm": 4.562112299679241, | |
| "learning_rate": 2.476172311325783e-07, | |
| "loss": 0.7048, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 4.228571428571429, | |
| "grad_norm": 5.336142167342819, | |
| "learning_rate": 2.4051521310939254e-07, | |
| "loss": 0.7082, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 4.257142857142857, | |
| "grad_norm": 4.796197499964798, | |
| "learning_rate": 2.3348413563600323e-07, | |
| "loss": 0.6497, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 4.285714285714286, | |
| "grad_norm": 4.825647232190606, | |
| "learning_rate": 2.2652592093878665e-07, | |
| "loss": 0.6851, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 4.314285714285714, | |
| "grad_norm": 4.462461397938811, | |
| "learning_rate": 2.1964247132416368e-07, | |
| "loss": 0.6974, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 4.3428571428571425, | |
| "grad_norm": 4.829646877353178, | |
| "learning_rate": 2.128356686585282e-07, | |
| "loss": 0.6439, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 4.371428571428572, | |
| "grad_norm": 5.043886293339906, | |
| "learning_rate": 2.0610737385376348e-07, | |
| "loss": 0.7215, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "grad_norm": 4.598743427591961, | |
| "learning_rate": 1.9945942635848745e-07, | |
| "loss": 0.644, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 4.428571428571429, | |
| "grad_norm": 3.8253679810994012, | |
| "learning_rate": 1.9289364365516607e-07, | |
| "loss": 0.6397, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 4.457142857142857, | |
| "grad_norm": 4.807293265176188, | |
| "learning_rate": 1.8641182076323148e-07, | |
| "loss": 0.689, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 4.485714285714286, | |
| "grad_norm": 3.8062822348294363, | |
| "learning_rate": 1.8001572974834168e-07, | |
| "loss": 0.6466, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 4.514285714285714, | |
| "grad_norm": 4.883594185819761, | |
| "learning_rate": 1.7370711923791564e-07, | |
| "loss": 0.6691, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 4.542857142857143, | |
| "grad_norm": 5.143361513426633, | |
| "learning_rate": 1.674877139430758e-07, | |
| "loss": 0.6432, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 4.571428571428571, | |
| "grad_norm": 4.99547680018691, | |
| "learning_rate": 1.6135921418712955e-07, | |
| "loss": 0.6685, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "grad_norm": 4.6838462077186795, | |
| "learning_rate": 1.553232954407171e-07, | |
| "loss": 0.657, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 4.628571428571428, | |
| "grad_norm": 4.841965011015589, | |
| "learning_rate": 1.493816078637557e-07, | |
| "loss": 0.6671, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 4.6571428571428575, | |
| "grad_norm": 5.2138374952108535, | |
| "learning_rate": 1.435357758543015e-07, | |
| "loss": 0.7393, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 4.685714285714286, | |
| "grad_norm": 4.7664344007331225, | |
| "learning_rate": 1.3778739760445552e-07, | |
| "loss": 0.6904, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 4.714285714285714, | |
| "grad_norm": 4.757741131293643, | |
| "learning_rate": 1.321380446634342e-07, | |
| "loss": 0.7093, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 4.742857142857143, | |
| "grad_norm": 5.064771039141748, | |
| "learning_rate": 1.2658926150792322e-07, | |
| "loss": 0.6588, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 4.771428571428571, | |
| "grad_norm": 4.745894552114562, | |
| "learning_rate": 1.2114256511983274e-07, | |
| "loss": 0.6914, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "grad_norm": 4.5842953923274425, | |
| "learning_rate": 1.1579944457157059e-07, | |
| "loss": 0.6819, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 4.828571428571428, | |
| "grad_norm": 6.02007293004415, | |
| "learning_rate": 1.1056136061894384e-07, | |
| "loss": 0.7107, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 4.857142857142857, | |
| "grad_norm": 4.04792450108683, | |
| "learning_rate": 1.0542974530180327e-07, | |
| "loss": 0.6577, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 4.885714285714286, | |
| "grad_norm": 4.334397719277511, | |
| "learning_rate": 1.0040600155253764e-07, | |
| "loss": 0.6926, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 4.914285714285715, | |
| "grad_norm": 4.763783176385905, | |
| "learning_rate": 9.549150281252632e-08, | |
| "loss": 0.6824, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 4.942857142857143, | |
| "grad_norm": 3.8987128530052075, | |
| "learning_rate": 9.068759265665382e-08, | |
| "loss": 0.6293, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 4.9714285714285715, | |
| "grad_norm": 5.260545837340523, | |
| "learning_rate": 8.599558442598998e-08, | |
| "loss": 0.7605, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 4.28845521901137, | |
| "learning_rate": 8.141676086873573e-08, | |
| "loss": 0.6685, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 5.0285714285714285, | |
| "grad_norm": 4.317069591639633, | |
| "learning_rate": 7.695237378953224e-08, | |
| "loss": 0.6178, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 5.057142857142857, | |
| "grad_norm": 4.673363595181425, | |
| "learning_rate": 7.260364370723043e-08, | |
| "loss": 0.6338, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 5.085714285714285, | |
| "grad_norm": 4.179297774765176, | |
| "learning_rate": 6.837175952121304e-08, | |
| "loss": 0.656, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 5.114285714285714, | |
| "grad_norm": 4.2740125604739685, | |
| "learning_rate": 6.42578781863613e-08, | |
| "loss": 0.7062, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 5.142857142857143, | |
| "grad_norm": 3.7179175942036378, | |
| "learning_rate": 6.026312439675551e-08, | |
| "loss": 0.6132, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 5.171428571428572, | |
| "grad_norm": 4.586691811463999, | |
| "learning_rate": 5.638859027819409e-08, | |
| "loss": 0.6846, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 5.2, | |
| "grad_norm": 4.74824620606461, | |
| "learning_rate": 5.263533508961826e-08, | |
| "loss": 0.695, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 5.228571428571429, | |
| "grad_norm": 4.552865468215117, | |
| "learning_rate": 4.9004384933520547e-08, | |
| "loss": 0.6363, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 5.257142857142857, | |
| "grad_norm": 3.868803646466012, | |
| "learning_rate": 4.549673247541874e-08, | |
| "loss": 0.6197, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 5.285714285714286, | |
| "grad_norm": 4.257422013394126, | |
| "learning_rate": 4.2113336672471245e-08, | |
| "loss": 0.6325, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 5.314285714285714, | |
| "grad_norm": 4.138411901026596, | |
| "learning_rate": 3.8855122511307626e-08, | |
| "loss": 0.6074, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 5.3428571428571425, | |
| "grad_norm": 4.386100060157392, | |
| "learning_rate": 3.572298075514652e-08, | |
| "loss": 0.6298, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 5.371428571428572, | |
| "grad_norm": 3.899133464905973, | |
| "learning_rate": 3.271776770026963e-08, | |
| "loss": 0.5991, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 5.4, | |
| "grad_norm": 5.227219263192165, | |
| "learning_rate": 2.9840304941919416e-08, | |
| "loss": 0.6917, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 5.428571428571429, | |
| "grad_norm": 4.194540185559828, | |
| "learning_rate": 2.7091379149682682e-08, | |
| "loss": 0.6592, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 5.457142857142857, | |
| "grad_norm": 4.103130224713111, | |
| "learning_rate": 2.4471741852423233e-08, | |
| "loss": 0.6467, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 5.485714285714286, | |
| "grad_norm": 4.401362210384649, | |
| "learning_rate": 2.1982109232821176e-08, | |
| "loss": 0.6471, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 5.514285714285714, | |
| "grad_norm": 4.4025758572699365, | |
| "learning_rate": 1.9623161931575926e-08, | |
| "loss": 0.6537, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 5.542857142857143, | |
| "grad_norm": 4.940644969360135, | |
| "learning_rate": 1.7395544861325718e-08, | |
| "loss": 0.6647, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 5.571428571428571, | |
| "grad_norm": 4.562635610658606, | |
| "learning_rate": 1.5299867030334813e-08, | |
| "loss": 0.6422, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 5.6, | |
| "grad_norm": 4.58494040795585, | |
| "learning_rate": 1.3336701375997127e-08, | |
| "loss": 0.7141, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 5.628571428571428, | |
| "grad_norm": 4.240912508697285, | |
| "learning_rate": 1.1506584608200364e-08, | |
| "loss": 0.6423, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 5.6571428571428575, | |
| "grad_norm": 4.474353548561071, | |
| "learning_rate": 9.810017062595321e-09, | |
| "loss": 0.6362, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 5.685714285714286, | |
| "grad_norm": 5.263243471264244, | |
| "learning_rate": 8.247462563808816e-09, | |
| "loss": 0.6436, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 5.714285714285714, | |
| "grad_norm": 5.629514926131208, | |
| "learning_rate": 6.819348298638839e-09, | |
| "loss": 0.6434, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 5.714285714285714, | |
| "eval_loss": 1.0673894882202148, | |
| "eval_runtime": 1.3433, | |
| "eval_samples_per_second": 17.122, | |
| "eval_steps_per_second": 0.744, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 5.742857142857143, | |
| "grad_norm": 4.388069390318575, | |
| "learning_rate": 5.526064699265753e-09, | |
| "loss": 0.6946, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 5.771428571428571, | |
| "grad_norm": 3.85292435393061, | |
| "learning_rate": 4.367965336512403e-09, | |
| "loss": 0.6113, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "grad_norm": 4.3104727339441755, | |
| "learning_rate": 3.3453668231809283e-09, | |
| "loss": 0.6493, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 5.828571428571428, | |
| "grad_norm": 4.21000584494849, | |
| "learning_rate": 2.458548727494292e-09, | |
| "loss": 0.653, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 5.857142857142857, | |
| "grad_norm": 5.114411845255924, | |
| "learning_rate": 1.7077534966650765e-09, | |
| "loss": 0.6527, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 5.885714285714286, | |
| "grad_norm": 4.988848182549562, | |
| "learning_rate": 1.0931863906127325e-09, | |
| "loss": 0.6935, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 5.914285714285715, | |
| "grad_norm": 4.005152725886777, | |
| "learning_rate": 6.150154258476314e-10, | |
| "loss": 0.6559, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 5.942857142857143, | |
| "grad_norm": 6.395756261520298, | |
| "learning_rate": 2.733713295369755e-10, | |
| "loss": 0.693, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 5.9714285714285715, | |
| "grad_norm": 5.465002272924693, | |
| "learning_rate": 6.834750376549791e-11, | |
| "loss": 0.7231, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 4.273940345648053, | |
| "learning_rate": 0.0, | |
| "loss": 0.6288, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 210, | |
| "total_flos": 18587454013440.0, | |
| "train_loss": 0.8877790099098569, | |
| "train_runtime": 1031.8496, | |
| "train_samples_per_second": 12.932, | |
| "train_steps_per_second": 0.204 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 210, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 18587454013440.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |