| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9555555555555557, | |
| "eval_steps": 500, | |
| "global_step": 66, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11851851851851852, | |
| "grad_norm": 3.8725790977478027, | |
| "learning_rate": 5.714285714285714e-05, | |
| "logits/chosen": 0.5535547137260437, | |
| "logits/rejected": 0.6790706515312195, | |
| "logps/chosen": -154.02122497558594, | |
| "logps/rejected": -190.33836364746094, | |
| "loss": 0.6916, | |
| "rewards/accuracies": 0.28125, | |
| "rewards/chosen": -0.00438268156722188, | |
| "rewards/margins": 0.0031897304579615593, | |
| "rewards/rejected": -0.0075724124908447266, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.23703703703703705, | |
| "grad_norm": 4.261082649230957, | |
| "learning_rate": 9.830508474576272e-05, | |
| "logits/chosen": 0.8175854086875916, | |
| "logits/rejected": 0.7639511823654175, | |
| "logps/chosen": -176.618408203125, | |
| "logps/rejected": -232.55316162109375, | |
| "loss": 0.6661, | |
| "rewards/accuracies": 0.71875, | |
| "rewards/chosen": -0.024643946439027786, | |
| "rewards/margins": 0.05728544294834137, | |
| "rewards/rejected": -0.08192938566207886, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 3.7274258136749268, | |
| "learning_rate": 9.152542372881357e-05, | |
| "logits/chosen": 0.5340245366096497, | |
| "logits/rejected": 0.748319685459137, | |
| "logps/chosen": -137.61412048339844, | |
| "logps/rejected": -184.79991149902344, | |
| "loss": 0.6328, | |
| "rewards/accuracies": 0.71875, | |
| "rewards/chosen": -0.10764569044113159, | |
| "rewards/margins": 0.14540965855121613, | |
| "rewards/rejected": -0.2530553638935089, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.4740740740740741, | |
| "grad_norm": 4.1018290519714355, | |
| "learning_rate": 8.474576271186441e-05, | |
| "logits/chosen": 0.43400460481643677, | |
| "logits/rejected": 0.4534505605697632, | |
| "logps/chosen": -113.1705551147461, | |
| "logps/rejected": -201.7666778564453, | |
| "loss": 0.5399, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -0.11035704612731934, | |
| "rewards/margins": 0.3914012014865875, | |
| "rewards/rejected": -0.5017582178115845, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 7.585125923156738, | |
| "learning_rate": 7.796610169491526e-05, | |
| "logits/chosen": 0.3885829448699951, | |
| "logits/rejected": 0.4270927906036377, | |
| "logps/chosen": -162.44253540039062, | |
| "logps/rejected": -242.7151336669922, | |
| "loss": 0.5997, | |
| "rewards/accuracies": 0.65625, | |
| "rewards/chosen": -0.5063179731369019, | |
| "rewards/margins": 0.40314924716949463, | |
| "rewards/rejected": -0.9094672799110413, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 4.802177429199219, | |
| "learning_rate": 7.11864406779661e-05, | |
| "logits/chosen": 0.39339229464530945, | |
| "logits/rejected": 0.1176382452249527, | |
| "logps/chosen": -157.732177734375, | |
| "logps/rejected": -208.583251953125, | |
| "loss": 0.4315, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.5111438632011414, | |
| "rewards/margins": 0.9935877323150635, | |
| "rewards/rejected": -1.5047316551208496, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.8296296296296296, | |
| "grad_norm": 8.156561851501465, | |
| "learning_rate": 6.440677966101695e-05, | |
| "logits/chosen": 0.011842329055070877, | |
| "logits/rejected": 0.35012856125831604, | |
| "logps/chosen": -158.82598876953125, | |
| "logps/rejected": -232.102783203125, | |
| "loss": 0.6494, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.8290945291519165, | |
| "rewards/margins": 0.6025375723838806, | |
| "rewards/rejected": -1.4316319227218628, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.9481481481481482, | |
| "grad_norm": 6.632073402404785, | |
| "learning_rate": 5.76271186440678e-05, | |
| "logits/chosen": -0.034538522362709045, | |
| "logits/rejected": 0.06912083178758621, | |
| "logps/chosen": -163.13931274414062, | |
| "logps/rejected": -159.48757934570312, | |
| "loss": 0.6065, | |
| "rewards/accuracies": 0.78125, | |
| "rewards/chosen": -1.0138874053955078, | |
| "rewards/margins": 0.7364886403083801, | |
| "rewards/rejected": -1.7503761053085327, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.0666666666666667, | |
| "grad_norm": 4.534868240356445, | |
| "learning_rate": 5.0847457627118643e-05, | |
| "logits/chosen": 0.30354946851730347, | |
| "logits/rejected": 0.13392731547355652, | |
| "logps/chosen": -154.13514709472656, | |
| "logps/rejected": -247.21238708496094, | |
| "loss": 0.4295, | |
| "rewards/accuracies": 0.78125, | |
| "rewards/chosen": -0.7905898094177246, | |
| "rewards/margins": 1.3606148958206177, | |
| "rewards/rejected": -2.1512045860290527, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 8.053945541381836, | |
| "learning_rate": 4.4067796610169495e-05, | |
| "logits/chosen": 0.31644004583358765, | |
| "logits/rejected": 0.34593597054481506, | |
| "logps/chosen": -198.6423797607422, | |
| "logps/rejected": -201.91390991210938, | |
| "loss": 0.4743, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.8822494149208069, | |
| "rewards/margins": 0.9695678949356079, | |
| "rewards/rejected": -1.8518173694610596, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.3037037037037038, | |
| "grad_norm": 3.4382383823394775, | |
| "learning_rate": 3.728813559322034e-05, | |
| "logits/chosen": 0.21577686071395874, | |
| "logits/rejected": 0.19070810079574585, | |
| "logps/chosen": -151.60324096679688, | |
| "logps/rejected": -234.2410888671875, | |
| "loss": 0.3084, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.48244649171829224, | |
| "rewards/margins": 1.5388765335083008, | |
| "rewards/rejected": -2.0213229656219482, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.4222222222222223, | |
| "grad_norm": 3.3008296489715576, | |
| "learning_rate": 3.050847457627119e-05, | |
| "logits/chosen": 0.20948533713817596, | |
| "logits/rejected": 0.24637725949287415, | |
| "logps/chosen": -140.76052856445312, | |
| "logps/rejected": -215.05499267578125, | |
| "loss": 0.2868, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.4717501699924469, | |
| "rewards/margins": 1.771106243133545, | |
| "rewards/rejected": -2.242856502532959, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.5407407407407407, | |
| "grad_norm": 11.502059936523438, | |
| "learning_rate": 2.3728813559322036e-05, | |
| "logits/chosen": 0.08824478089809418, | |
| "logits/rejected": -0.027387090027332306, | |
| "logps/chosen": -148.2930450439453, | |
| "logps/rejected": -225.59078979492188, | |
| "loss": 0.5723, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -1.0404824018478394, | |
| "rewards/margins": 1.086774230003357, | |
| "rewards/rejected": -2.1272566318511963, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.6592592592592592, | |
| "grad_norm": 4.403205394744873, | |
| "learning_rate": 1.694915254237288e-05, | |
| "logits/chosen": 0.2315160483121872, | |
| "logits/rejected": 0.18600693345069885, | |
| "logps/chosen": -134.02783203125, | |
| "logps/rejected": -226.99459838867188, | |
| "loss": 0.2737, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.5924757719039917, | |
| "rewards/margins": 1.9642682075500488, | |
| "rewards/rejected": -2.55674409866333, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 4.00092077255249, | |
| "learning_rate": 1.016949152542373e-05, | |
| "logits/chosen": 0.10627911239862442, | |
| "logits/rejected": 0.08208482712507248, | |
| "logps/chosen": -172.69593811035156, | |
| "logps/rejected": -211.60107421875, | |
| "loss": 0.3421, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.770930826663971, | |
| "rewards/margins": 1.9104766845703125, | |
| "rewards/rejected": -2.6814074516296387, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.8962962962962964, | |
| "grad_norm": 4.906458854675293, | |
| "learning_rate": 3.3898305084745763e-06, | |
| "logits/chosen": -0.028121717274188995, | |
| "logits/rejected": 0.011860378086566925, | |
| "logps/chosen": -121.95062255859375, | |
| "logps/rejected": -200.142578125, | |
| "loss": 0.3773, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -0.47138434648513794, | |
| "rewards/margins": 1.7951738834381104, | |
| "rewards/rejected": -2.2665581703186035, | |
| "step": 64 | |
| } | |
| ], | |
| "logging_steps": 4, | |
| "max_steps": 66, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |