| { | |
| "best_metric": 0.0650700181722641, | |
| "best_model_checkpoint": "nb-bert-base-pos-ltgnorne-lr3e-5-e5/checkpoint-3924", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 3924, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0254841997961264, | |
| "grad_norm": 4.497822284698486, | |
| "learning_rate": 1.4663951120162932e-06, | |
| "loss": 2.8613, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.0509683995922528, | |
| "grad_norm": 4.501702308654785, | |
| "learning_rate": 2.993890020366599e-06, | |
| "loss": 2.6378, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0764525993883792, | |
| "grad_norm": 3.225400447845459, | |
| "learning_rate": 4.521384928716905e-06, | |
| "loss": 2.3048, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.1019367991845056, | |
| "grad_norm": 2.7836780548095703, | |
| "learning_rate": 6.048879837067211e-06, | |
| "loss": 1.9637, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.127420998980632, | |
| "grad_norm": 2.729814291000366, | |
| "learning_rate": 7.5763747454175154e-06, | |
| "loss": 1.5022, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.1529051987767584, | |
| "grad_norm": 2.058290958404541, | |
| "learning_rate": 9.103869653767821e-06, | |
| "loss": 0.9212, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1783893985728848, | |
| "grad_norm": 2.431959629058838, | |
| "learning_rate": 1.0631364562118128e-05, | |
| "loss": 0.4504, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.2038735983690112, | |
| "grad_norm": 3.45493221282959, | |
| "learning_rate": 1.2158859470468432e-05, | |
| "loss": 0.2543, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.22935779816513763, | |
| "grad_norm": 5.069894790649414, | |
| "learning_rate": 1.3686354378818739e-05, | |
| "loss": 0.1895, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.254841997961264, | |
| "grad_norm": 2.2221596240997314, | |
| "learning_rate": 1.5213849287169042e-05, | |
| "loss": 0.1606, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.2803261977573904, | |
| "grad_norm": 0.943159282207489, | |
| "learning_rate": 1.6741344195519347e-05, | |
| "loss": 0.1191, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3058103975535168, | |
| "grad_norm": 1.4674568176269531, | |
| "learning_rate": 1.8268839103869655e-05, | |
| "loss": 0.1128, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3312945973496432, | |
| "grad_norm": 2.1226422786712646, | |
| "learning_rate": 1.979633401221996e-05, | |
| "loss": 0.118, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.3567787971457696, | |
| "grad_norm": 1.1651442050933838, | |
| "learning_rate": 2.1323828920570265e-05, | |
| "loss": 0.1069, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.382262996941896, | |
| "grad_norm": 2.2320973873138428, | |
| "learning_rate": 2.285132382892057e-05, | |
| "loss": 0.1289, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.4077471967380224, | |
| "grad_norm": 2.247786521911621, | |
| "learning_rate": 2.4378818737270878e-05, | |
| "loss": 0.1052, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4332313965341488, | |
| "grad_norm": 3.606870651245117, | |
| "learning_rate": 2.5906313645621183e-05, | |
| "loss": 0.0896, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.45871559633027525, | |
| "grad_norm": 0.5746941566467285, | |
| "learning_rate": 2.7433808553971487e-05, | |
| "loss": 0.093, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.4841997961264016, | |
| "grad_norm": 1.509137749671936, | |
| "learning_rate": 2.8961303462321792e-05, | |
| "loss": 0.0928, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.509683995922528, | |
| "grad_norm": 1.3736240863800049, | |
| "learning_rate": 2.9945627548708655e-05, | |
| "loss": 0.0933, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5351681957186545, | |
| "grad_norm": 2.923942804336548, | |
| "learning_rate": 2.9775713638423202e-05, | |
| "loss": 0.0717, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.5606523955147809, | |
| "grad_norm": 1.3372660875320435, | |
| "learning_rate": 2.9605799728137742e-05, | |
| "loss": 0.0757, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.5861365953109072, | |
| "grad_norm": 2.0181891918182373, | |
| "learning_rate": 2.943588581785229e-05, | |
| "loss": 0.0796, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.6116207951070336, | |
| "grad_norm": 1.258780837059021, | |
| "learning_rate": 2.9265971907566832e-05, | |
| "loss": 0.076, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.6371049949031601, | |
| "grad_norm": 2.01127028465271, | |
| "learning_rate": 2.909605799728138e-05, | |
| "loss": 0.0791, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.6625891946992865, | |
| "grad_norm": 1.2377755641937256, | |
| "learning_rate": 2.8926144086995923e-05, | |
| "loss": 0.065, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.6880733944954128, | |
| "grad_norm": 2.2438857555389404, | |
| "learning_rate": 2.8756230176710466e-05, | |
| "loss": 0.0716, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.7135575942915392, | |
| "grad_norm": 1.5645134449005127, | |
| "learning_rate": 2.8586316266425013e-05, | |
| "loss": 0.057, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.7390417940876657, | |
| "grad_norm": 1.144615650177002, | |
| "learning_rate": 2.841640235613956e-05, | |
| "loss": 0.0598, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.764525993883792, | |
| "grad_norm": 1.8201572895050049, | |
| "learning_rate": 2.82464884458541e-05, | |
| "loss": 0.0648, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.7900101936799184, | |
| "grad_norm": 0.7320142388343811, | |
| "learning_rate": 2.8076574535568647e-05, | |
| "loss": 0.0561, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.8154943934760448, | |
| "grad_norm": 1.0732890367507935, | |
| "learning_rate": 2.790666062528319e-05, | |
| "loss": 0.0498, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.8409785932721713, | |
| "grad_norm": 2.709826707839966, | |
| "learning_rate": 2.7736746714997733e-05, | |
| "loss": 0.0701, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.8664627930682977, | |
| "grad_norm": 1.808638572692871, | |
| "learning_rate": 2.756683280471228e-05, | |
| "loss": 0.0676, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.891946992864424, | |
| "grad_norm": 5.431212425231934, | |
| "learning_rate": 2.7396918894426824e-05, | |
| "loss": 0.0504, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.9174311926605505, | |
| "grad_norm": 2.724369525909424, | |
| "learning_rate": 2.722700498414137e-05, | |
| "loss": 0.0539, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.9429153924566769, | |
| "grad_norm": 0.9031899571418762, | |
| "learning_rate": 2.7057091073855914e-05, | |
| "loss": 0.0479, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.9683995922528033, | |
| "grad_norm": 1.0398321151733398, | |
| "learning_rate": 2.6887177163570457e-05, | |
| "loss": 0.0569, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.9938837920489296, | |
| "grad_norm": 4.719447612762451, | |
| "learning_rate": 2.6717263253285004e-05, | |
| "loss": 0.0587, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9830243515786693, | |
| "eval_f1": 0.9817582841221721, | |
| "eval_loss": 0.0652645155787468, | |
| "eval_precision": 0.980058076479026, | |
| "eval_recall": 0.9834644010672658, | |
| "eval_runtime": 4.374, | |
| "eval_samples_per_second": 443.301, | |
| "eval_steps_per_second": 13.946, | |
| "step": 981 | |
| }, | |
| { | |
| "epoch": 1.019367991845056, | |
| "grad_norm": 0.5373720526695251, | |
| "learning_rate": 2.6547349342999548e-05, | |
| "loss": 0.0465, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0448521916411824, | |
| "grad_norm": 0.31597161293029785, | |
| "learning_rate": 2.637743543271409e-05, | |
| "loss": 0.0347, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1.070336391437309, | |
| "grad_norm": 0.08823961019515991, | |
| "learning_rate": 2.6207521522428638e-05, | |
| "loss": 0.0387, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.0958205912334353, | |
| "grad_norm": 1.0735955238342285, | |
| "learning_rate": 2.603760761214318e-05, | |
| "loss": 0.0327, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.1213047910295617, | |
| "grad_norm": 0.6932256817817688, | |
| "learning_rate": 2.5867693701857728e-05, | |
| "loss": 0.0332, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.146788990825688, | |
| "grad_norm": 1.1093873977661133, | |
| "learning_rate": 2.569777979157227e-05, | |
| "loss": 0.0456, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.1722731906218145, | |
| "grad_norm": 3.3177976608276367, | |
| "learning_rate": 2.5527865881286815e-05, | |
| "loss": 0.0381, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.1977573904179408, | |
| "grad_norm": 5.12001371383667, | |
| "learning_rate": 2.535795197100136e-05, | |
| "loss": 0.0351, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.2232415902140672, | |
| "grad_norm": 0.8894160389900208, | |
| "learning_rate": 2.5188038060715902e-05, | |
| "loss": 0.034, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.2487257900101936, | |
| "grad_norm": 0.8692567348480225, | |
| "learning_rate": 2.501812415043045e-05, | |
| "loss": 0.0482, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 1.2742099898063202, | |
| "grad_norm": 0.5012984275817871, | |
| "learning_rate": 2.4848210240144995e-05, | |
| "loss": 0.0515, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.2996941896024465, | |
| "grad_norm": 1.6234453916549683, | |
| "learning_rate": 2.467829632985954e-05, | |
| "loss": 0.0303, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 1.325178389398573, | |
| "grad_norm": 0.32807931303977966, | |
| "learning_rate": 2.4508382419574082e-05, | |
| "loss": 0.0371, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.3506625891946993, | |
| "grad_norm": 0.29069948196411133, | |
| "learning_rate": 2.4338468509288626e-05, | |
| "loss": 0.0381, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 1.3761467889908257, | |
| "grad_norm": 0.2267286479473114, | |
| "learning_rate": 2.4168554599003172e-05, | |
| "loss": 0.0335, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.401630988786952, | |
| "grad_norm": 0.6649707555770874, | |
| "learning_rate": 2.399864068871772e-05, | |
| "loss": 0.028, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 1.4271151885830786, | |
| "grad_norm": 0.4773367941379547, | |
| "learning_rate": 2.382872677843226e-05, | |
| "loss": 0.0378, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.452599388379205, | |
| "grad_norm": 0.7523585557937622, | |
| "learning_rate": 2.3658812868146806e-05, | |
| "loss": 0.0276, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 1.4780835881753314, | |
| "grad_norm": 0.694980800151825, | |
| "learning_rate": 2.3488898957861353e-05, | |
| "loss": 0.0488, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.5035677879714577, | |
| "grad_norm": 1.879299521446228, | |
| "learning_rate": 2.3318985047575896e-05, | |
| "loss": 0.0336, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 1.529051987767584, | |
| "grad_norm": 0.8800904750823975, | |
| "learning_rate": 2.314907113729044e-05, | |
| "loss": 0.035, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.5545361875637105, | |
| "grad_norm": 3.8236799240112305, | |
| "learning_rate": 2.2979157227004983e-05, | |
| "loss": 0.035, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 1.5800203873598369, | |
| "grad_norm": 0.3992440402507782, | |
| "learning_rate": 2.280924331671953e-05, | |
| "loss": 0.0292, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.6055045871559632, | |
| "grad_norm": 0.6917338371276855, | |
| "learning_rate": 2.2639329406434073e-05, | |
| "loss": 0.0318, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 1.6309887869520896, | |
| "grad_norm": 2.00313663482666, | |
| "learning_rate": 2.2469415496148617e-05, | |
| "loss": 0.0357, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.656472986748216, | |
| "grad_norm": 0.45046761631965637, | |
| "learning_rate": 2.2299501585863164e-05, | |
| "loss": 0.0408, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 1.6819571865443423, | |
| "grad_norm": 0.23316547274589539, | |
| "learning_rate": 2.212958767557771e-05, | |
| "loss": 0.0357, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.707441386340469, | |
| "grad_norm": 0.5247312784194946, | |
| "learning_rate": 2.195967376529225e-05, | |
| "loss": 0.0386, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 1.7329255861365953, | |
| "grad_norm": 6.273862361907959, | |
| "learning_rate": 2.1789759855006797e-05, | |
| "loss": 0.0321, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.7584097859327217, | |
| "grad_norm": 0.7023279666900635, | |
| "learning_rate": 2.161984594472134e-05, | |
| "loss": 0.0399, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 1.7838939857288483, | |
| "grad_norm": 2.0238797664642334, | |
| "learning_rate": 2.1449932034435888e-05, | |
| "loss": 0.038, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.8093781855249746, | |
| "grad_norm": 0.5117468237876892, | |
| "learning_rate": 2.128001812415043e-05, | |
| "loss": 0.0293, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 1.834862385321101, | |
| "grad_norm": 0.052200186997652054, | |
| "learning_rate": 2.1110104213864974e-05, | |
| "loss": 0.0231, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.8603465851172274, | |
| "grad_norm": 0.2978757619857788, | |
| "learning_rate": 2.094019030357952e-05, | |
| "loss": 0.0253, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 1.8858307849133538, | |
| "grad_norm": 0.7025816440582275, | |
| "learning_rate": 2.0770276393294068e-05, | |
| "loss": 0.037, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.9113149847094801, | |
| "grad_norm": 2.105027914047241, | |
| "learning_rate": 2.0600362483008608e-05, | |
| "loss": 0.0402, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 1.9367991845056065, | |
| "grad_norm": 1.320366621017456, | |
| "learning_rate": 2.0430448572723155e-05, | |
| "loss": 0.0299, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.9622833843017329, | |
| "grad_norm": 1.0839900970458984, | |
| "learning_rate": 2.02605346624377e-05, | |
| "loss": 0.0465, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 1.9877675840978593, | |
| "grad_norm": 1.192915678024292, | |
| "learning_rate": 2.0090620752152242e-05, | |
| "loss": 0.0356, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9837729534545775, | |
| "eval_f1": 0.9838220601896482, | |
| "eval_loss": 0.06676298379898071, | |
| "eval_precision": 0.9823585004725401, | |
| "eval_recall": 0.9852899873613257, | |
| "eval_runtime": 4.1484, | |
| "eval_samples_per_second": 467.414, | |
| "eval_steps_per_second": 14.705, | |
| "step": 1962 | |
| }, | |
| { | |
| "epoch": 2.0132517838939856, | |
| "grad_norm": 0.05750567093491554, | |
| "learning_rate": 1.992070684186679e-05, | |
| "loss": 0.0254, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 2.038735983690112, | |
| "grad_norm": 0.5030219554901123, | |
| "learning_rate": 1.9750792931581332e-05, | |
| "loss": 0.016, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.0642201834862384, | |
| "grad_norm": 0.5581088066101074, | |
| "learning_rate": 1.958087902129588e-05, | |
| "loss": 0.0274, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 2.0897043832823647, | |
| "grad_norm": 0.4084581434726715, | |
| "learning_rate": 1.941096511101042e-05, | |
| "loss": 0.0185, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 2.115188583078491, | |
| "grad_norm": 0.5078397989273071, | |
| "learning_rate": 1.9241051200724966e-05, | |
| "loss": 0.012, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 2.140672782874618, | |
| "grad_norm": 1.139233946800232, | |
| "learning_rate": 1.9071137290439512e-05, | |
| "loss": 0.0219, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 2.1661569826707443, | |
| "grad_norm": 0.3767656683921814, | |
| "learning_rate": 1.8901223380154056e-05, | |
| "loss": 0.0232, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 2.1916411824668707, | |
| "grad_norm": 0.4100117087364197, | |
| "learning_rate": 1.87313094698686e-05, | |
| "loss": 0.0209, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 2.217125382262997, | |
| "grad_norm": 0.3346911370754242, | |
| "learning_rate": 1.8561395559583146e-05, | |
| "loss": 0.0142, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 2.2426095820591234, | |
| "grad_norm": 0.44971033930778503, | |
| "learning_rate": 1.839148164929769e-05, | |
| "loss": 0.0207, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 2.26809378185525, | |
| "grad_norm": 0.0442005917429924, | |
| "learning_rate": 1.8228364295423653e-05, | |
| "loss": 0.0255, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 2.293577981651376, | |
| "grad_norm": 0.018714234232902527, | |
| "learning_rate": 1.8058450385138197e-05, | |
| "loss": 0.0149, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.3190621814475025, | |
| "grad_norm": 1.526605248451233, | |
| "learning_rate": 1.788853647485274e-05, | |
| "loss": 0.0182, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 2.344546381243629, | |
| "grad_norm": 0.1787564605474472, | |
| "learning_rate": 1.7718622564567287e-05, | |
| "loss": 0.0153, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.3700305810397553, | |
| "grad_norm": 1.280836582183838, | |
| "learning_rate": 1.755550521069325e-05, | |
| "loss": 0.0259, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 2.3955147808358817, | |
| "grad_norm": 0.41557249426841736, | |
| "learning_rate": 1.7385591300407794e-05, | |
| "loss": 0.0192, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 2.420998980632008, | |
| "grad_norm": 0.4954489469528198, | |
| "learning_rate": 1.7215677390122338e-05, | |
| "loss": 0.0244, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 2.4464831804281344, | |
| "grad_norm": 0.25688356161117554, | |
| "learning_rate": 1.7045763479836884e-05, | |
| "loss": 0.0178, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.4719673802242608, | |
| "grad_norm": 0.1957683563232422, | |
| "learning_rate": 1.6875849569551428e-05, | |
| "loss": 0.0193, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 2.497451580020387, | |
| "grad_norm": 0.0999796912074089, | |
| "learning_rate": 1.670593565926597e-05, | |
| "loss": 0.0239, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 2.522935779816514, | |
| "grad_norm": 0.20732557773590088, | |
| "learning_rate": 1.6536021748980518e-05, | |
| "loss": 0.0213, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 2.5484199796126403, | |
| "grad_norm": 0.1995435357093811, | |
| "learning_rate": 1.636610783869506e-05, | |
| "loss": 0.0187, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.5739041794087667, | |
| "grad_norm": 0.25311583280563354, | |
| "learning_rate": 1.6196193928409605e-05, | |
| "loss": 0.0272, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 2.599388379204893, | |
| "grad_norm": 0.4466765224933624, | |
| "learning_rate": 1.602628001812415e-05, | |
| "loss": 0.0212, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 2.6248725790010194, | |
| "grad_norm": 0.12018584460020065, | |
| "learning_rate": 1.5856366107838695e-05, | |
| "loss": 0.023, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 2.650356778797146, | |
| "grad_norm": 0.6461846232414246, | |
| "learning_rate": 1.5686452197553242e-05, | |
| "loss": 0.023, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.675840978593272, | |
| "grad_norm": 0.08898350596427917, | |
| "learning_rate": 1.5516538287267785e-05, | |
| "loss": 0.0208, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 2.7013251783893986, | |
| "grad_norm": 0.18236540257930756, | |
| "learning_rate": 1.534662437698233e-05, | |
| "loss": 0.0172, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 2.726809378185525, | |
| "grad_norm": 0.09674384444952011, | |
| "learning_rate": 1.5176710466696876e-05, | |
| "loss": 0.0176, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 2.7522935779816513, | |
| "grad_norm": 0.5571973919868469, | |
| "learning_rate": 1.5006796556411417e-05, | |
| "loss": 0.0219, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.7777777777777777, | |
| "grad_norm": 0.7178367376327515, | |
| "learning_rate": 1.4836882646125962e-05, | |
| "loss": 0.0175, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 2.803261977573904, | |
| "grad_norm": 1.9719717502593994, | |
| "learning_rate": 1.4666968735840508e-05, | |
| "loss": 0.0152, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 2.8287461773700304, | |
| "grad_norm": 0.782957136631012, | |
| "learning_rate": 1.4497054825555051e-05, | |
| "loss": 0.0199, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 2.8542303771661572, | |
| "grad_norm": 1.0010164976119995, | |
| "learning_rate": 1.4327140915269598e-05, | |
| "loss": 0.0181, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.879714576962283, | |
| "grad_norm": 0.12285976111888885, | |
| "learning_rate": 1.4157227004984141e-05, | |
| "loss": 0.0167, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 2.90519877675841, | |
| "grad_norm": 0.4028477668762207, | |
| "learning_rate": 1.3987313094698686e-05, | |
| "loss": 0.0194, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 2.930682976554536, | |
| "grad_norm": 0.14099347591400146, | |
| "learning_rate": 1.381739918441323e-05, | |
| "loss": 0.0137, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 2.9561671763506627, | |
| "grad_norm": 0.23822110891342163, | |
| "learning_rate": 1.3647485274127777e-05, | |
| "loss": 0.0142, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.981651376146789, | |
| "grad_norm": 0.32884514331817627, | |
| "learning_rate": 1.347757136384232e-05, | |
| "loss": 0.0159, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.984609626139416, | |
| "eval_f1": 0.9847512882532338, | |
| "eval_loss": 0.07444457709789276, | |
| "eval_precision": 0.983269163458173, | |
| "eval_recall": 0.9862378879370874, | |
| "eval_runtime": 4.2552, | |
| "eval_samples_per_second": 455.676, | |
| "eval_steps_per_second": 14.335, | |
| "step": 2943 | |
| }, | |
| { | |
| "epoch": 3.0071355759429155, | |
| "grad_norm": 0.492354154586792, | |
| "learning_rate": 1.3307657453556865e-05, | |
| "loss": 0.0185, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 3.032619775739042, | |
| "grad_norm": 0.17601044476032257, | |
| "learning_rate": 1.3137743543271409e-05, | |
| "loss": 0.0124, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 3.058103975535168, | |
| "grad_norm": 0.3945290446281433, | |
| "learning_rate": 1.2967829632985955e-05, | |
| "loss": 0.0092, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.0835881753312946, | |
| "grad_norm": 0.4073391258716583, | |
| "learning_rate": 1.2797915722700499e-05, | |
| "loss": 0.0073, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 3.109072375127421, | |
| "grad_norm": 0.31910163164138794, | |
| "learning_rate": 1.2628001812415044e-05, | |
| "loss": 0.0102, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 3.1345565749235473, | |
| "grad_norm": 0.15057243406772614, | |
| "learning_rate": 1.2458087902129587e-05, | |
| "loss": 0.0134, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 3.1600407747196737, | |
| "grad_norm": 0.24187231063842773, | |
| "learning_rate": 1.2288173991844132e-05, | |
| "loss": 0.0128, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 3.1855249745158, | |
| "grad_norm": 0.2588340640068054, | |
| "learning_rate": 1.2118260081558678e-05, | |
| "loss": 0.0083, | |
| "step": 3125 | |
| }, | |
| { | |
| "epoch": 3.2110091743119265, | |
| "grad_norm": 0.2568691372871399, | |
| "learning_rate": 1.1948346171273221e-05, | |
| "loss": 0.0126, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 3.236493374108053, | |
| "grad_norm": 0.17430084943771362, | |
| "learning_rate": 1.1778432260987766e-05, | |
| "loss": 0.0092, | |
| "step": 3175 | |
| }, | |
| { | |
| "epoch": 3.261977573904179, | |
| "grad_norm": 0.8101614713668823, | |
| "learning_rate": 1.1608518350702311e-05, | |
| "loss": 0.0116, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 3.287461773700306, | |
| "grad_norm": 0.5102636218070984, | |
| "learning_rate": 1.1438604440416856e-05, | |
| "loss": 0.0089, | |
| "step": 3225 | |
| }, | |
| { | |
| "epoch": 3.3129459734964324, | |
| "grad_norm": 0.3798113763332367, | |
| "learning_rate": 1.12686905301314e-05, | |
| "loss": 0.02, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 3.3384301732925588, | |
| "grad_norm": 0.09234433621168137, | |
| "learning_rate": 1.1098776619845945e-05, | |
| "loss": 0.0086, | |
| "step": 3275 | |
| }, | |
| { | |
| "epoch": 3.363914373088685, | |
| "grad_norm": 0.2918802499771118, | |
| "learning_rate": 1.092886270956049e-05, | |
| "loss": 0.0114, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 3.3893985728848115, | |
| "grad_norm": 1.4922930002212524, | |
| "learning_rate": 1.0758948799275035e-05, | |
| "loss": 0.0082, | |
| "step": 3325 | |
| }, | |
| { | |
| "epoch": 3.414882772680938, | |
| "grad_norm": 0.1827608048915863, | |
| "learning_rate": 1.0589034888989579e-05, | |
| "loss": 0.0086, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 3.4403669724770642, | |
| "grad_norm": 1.0763217210769653, | |
| "learning_rate": 1.0419120978704124e-05, | |
| "loss": 0.0091, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 3.4658511722731906, | |
| "grad_norm": 0.6879101395606995, | |
| "learning_rate": 1.0249207068418669e-05, | |
| "loss": 0.0089, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 3.491335372069317, | |
| "grad_norm": 0.19432714581489563, | |
| "learning_rate": 1.0079293158133214e-05, | |
| "loss": 0.0116, | |
| "step": 3425 | |
| }, | |
| { | |
| "epoch": 3.5168195718654434, | |
| "grad_norm": 1.8157362937927246, | |
| "learning_rate": 9.909379247847757e-06, | |
| "loss": 0.0098, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 3.5423037716615697, | |
| "grad_norm": 1.372643232345581, | |
| "learning_rate": 9.739465337562302e-06, | |
| "loss": 0.0116, | |
| "step": 3475 | |
| }, | |
| { | |
| "epoch": 3.567787971457696, | |
| "grad_norm": 0.549085259437561, | |
| "learning_rate": 9.569551427276846e-06, | |
| "loss": 0.0141, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.5932721712538225, | |
| "grad_norm": 0.33806735277175903, | |
| "learning_rate": 9.399637516991391e-06, | |
| "loss": 0.0135, | |
| "step": 3525 | |
| }, | |
| { | |
| "epoch": 3.6187563710499493, | |
| "grad_norm": 0.3857904374599457, | |
| "learning_rate": 9.229723606705936e-06, | |
| "loss": 0.0114, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 3.6442405708460752, | |
| "grad_norm": 0.23262465000152588, | |
| "learning_rate": 9.05980969642048e-06, | |
| "loss": 0.0097, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 3.669724770642202, | |
| "grad_norm": 0.26560139656066895, | |
| "learning_rate": 8.889895786135025e-06, | |
| "loss": 0.0099, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.695208970438328, | |
| "grad_norm": 0.18409286439418793, | |
| "learning_rate": 8.71998187584957e-06, | |
| "loss": 0.0108, | |
| "step": 3625 | |
| }, | |
| { | |
| "epoch": 3.720693170234455, | |
| "grad_norm": 0.47193974256515503, | |
| "learning_rate": 8.550067965564115e-06, | |
| "loss": 0.0118, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 3.746177370030581, | |
| "grad_norm": 0.3622497022151947, | |
| "learning_rate": 8.380154055278658e-06, | |
| "loss": 0.0079, | |
| "step": 3675 | |
| }, | |
| { | |
| "epoch": 3.7716615698267075, | |
| "grad_norm": 0.04132566228508949, | |
| "learning_rate": 8.210240144993203e-06, | |
| "loss": 0.0265, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.797145769622834, | |
| "grad_norm": 0.27798858284950256, | |
| "learning_rate": 8.040326234707749e-06, | |
| "loss": 0.0077, | |
| "step": 3725 | |
| }, | |
| { | |
| "epoch": 3.8226299694189603, | |
| "grad_norm": 0.34569182991981506, | |
| "learning_rate": 7.870412324422294e-06, | |
| "loss": 0.0135, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 3.8481141692150866, | |
| "grad_norm": 0.2387148141860962, | |
| "learning_rate": 7.700498414136837e-06, | |
| "loss": 0.0086, | |
| "step": 3775 | |
| }, | |
| { | |
| "epoch": 3.873598369011213, | |
| "grad_norm": 0.12353157252073288, | |
| "learning_rate": 7.530584503851381e-06, | |
| "loss": 0.0104, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.8990825688073394, | |
| "grad_norm": 0.038017626851797104, | |
| "learning_rate": 7.3606705935659265e-06, | |
| "loss": 0.0173, | |
| "step": 3825 | |
| }, | |
| { | |
| "epoch": 3.9245667686034658, | |
| "grad_norm": 4.909209251403809, | |
| "learning_rate": 7.190756683280472e-06, | |
| "loss": 0.0158, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 3.950050968399592, | |
| "grad_norm": 0.4103800654411316, | |
| "learning_rate": 7.020842772995016e-06, | |
| "loss": 0.009, | |
| "step": 3875 | |
| }, | |
| { | |
| "epoch": 3.9755351681957185, | |
| "grad_norm": 0.574148952960968, | |
| "learning_rate": 6.850928862709561e-06, | |
| "loss": 0.0122, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9861949007001629, | |
| "eval_f1": 0.9864320022438031, | |
| "eval_loss": 0.0650700181722641, | |
| "eval_precision": 0.985085078075765, | |
| "eval_recall": 0.9877826148012919, | |
| "eval_runtime": 4.3604, | |
| "eval_samples_per_second": 444.679, | |
| "eval_steps_per_second": 13.989, | |
| "step": 3924 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 4905, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.01 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 3 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.6407480366006272e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |