| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.6048780487804878, | |
| "eval_steps": 500, | |
| "global_step": 310, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01951219512195122, | |
| "grad_norm": 0.6663638353347778, | |
| "learning_rate": 0.00019934683213585893, | |
| "loss": 0.5255, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03902439024390244, | |
| "grad_norm": 0.7442628741264343, | |
| "learning_rate": 0.00019804049640757677, | |
| "loss": 0.3367, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05853658536585366, | |
| "grad_norm": 0.504467785358429, | |
| "learning_rate": 0.0001967341606792946, | |
| "loss": 0.3368, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07804878048780488, | |
| "grad_norm": 0.339693546295166, | |
| "learning_rate": 0.00019542782495101242, | |
| "loss": 0.3064, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0975609756097561, | |
| "grad_norm": 0.267976313829422, | |
| "learning_rate": 0.00019412148922273026, | |
| "loss": 0.3108, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11707317073170732, | |
| "grad_norm": 0.48640233278274536, | |
| "learning_rate": 0.00019281515349444807, | |
| "loss": 0.3069, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.13658536585365855, | |
| "grad_norm": 0.7269986867904663, | |
| "learning_rate": 0.0001915088177661659, | |
| "loss": 0.3134, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.15609756097560976, | |
| "grad_norm": 0.31376832723617554, | |
| "learning_rate": 0.00019020248203788375, | |
| "loss": 0.2915, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.17560975609756097, | |
| "grad_norm": 0.6166387796401978, | |
| "learning_rate": 0.00018889614630960156, | |
| "loss": 0.2862, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1951219512195122, | |
| "grad_norm": 0.6621638536453247, | |
| "learning_rate": 0.0001875898105813194, | |
| "loss": 0.263, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2146341463414634, | |
| "grad_norm": 0.2815336287021637, | |
| "learning_rate": 0.00018628347485303724, | |
| "loss": 0.2747, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.23414634146341465, | |
| "grad_norm": 0.5862469673156738, | |
| "learning_rate": 0.00018497713912475508, | |
| "loss": 0.2833, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.25365853658536586, | |
| "grad_norm": 0.5362260937690735, | |
| "learning_rate": 0.00018367080339647292, | |
| "loss": 0.2613, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2731707317073171, | |
| "grad_norm": 0.7799074053764343, | |
| "learning_rate": 0.00018236446766819073, | |
| "loss": 0.2535, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2926829268292683, | |
| "grad_norm": 0.8866592645645142, | |
| "learning_rate": 0.00018105813193990857, | |
| "loss": 0.2603, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3121951219512195, | |
| "grad_norm": 0.9003716707229614, | |
| "learning_rate": 0.00017975179621162638, | |
| "loss": 0.27, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.33170731707317075, | |
| "grad_norm": 0.5946381092071533, | |
| "learning_rate": 0.00017844546048334422, | |
| "loss": 0.2572, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.35121951219512193, | |
| "grad_norm": 0.8860711455345154, | |
| "learning_rate": 0.00017713912475506206, | |
| "loss": 0.2839, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.37073170731707317, | |
| "grad_norm": 0.8693526983261108, | |
| "learning_rate": 0.0001758327890267799, | |
| "loss": 0.2477, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3902439024390244, | |
| "grad_norm": 0.9044304490089417, | |
| "learning_rate": 0.00017452645329849774, | |
| "loss": 0.2674, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4097560975609756, | |
| "grad_norm": 0.5563161969184875, | |
| "learning_rate": 0.00017322011757021555, | |
| "loss": 0.2436, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.4292682926829268, | |
| "grad_norm": 1.1451846361160278, | |
| "learning_rate": 0.0001719137818419334, | |
| "loss": 0.25, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.44878048780487806, | |
| "grad_norm": 0.8895041942596436, | |
| "learning_rate": 0.00017060744611365123, | |
| "loss": 0.2542, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.4682926829268293, | |
| "grad_norm": 0.8991382718086243, | |
| "learning_rate": 0.00016930111038536904, | |
| "loss": 0.2523, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4878048780487805, | |
| "grad_norm": 1.0106490850448608, | |
| "learning_rate": 0.00016799477465708688, | |
| "loss": 0.2554, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5073170731707317, | |
| "grad_norm": 0.5570860505104065, | |
| "learning_rate": 0.0001666884389288047, | |
| "loss": 0.2431, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.526829268292683, | |
| "grad_norm": 1.1715517044067383, | |
| "learning_rate": 0.00016538210320052253, | |
| "loss": 0.2383, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5463414634146342, | |
| "grad_norm": 0.9527117609977722, | |
| "learning_rate": 0.00016407576747224037, | |
| "loss": 0.2222, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5658536585365853, | |
| "grad_norm": 1.012949824333191, | |
| "learning_rate": 0.0001627694317439582, | |
| "loss": 0.2743, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.5853658536585366, | |
| "grad_norm": 1.157406210899353, | |
| "learning_rate": 0.00016146309601567605, | |
| "loss": 0.261, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6048780487804878, | |
| "grad_norm": 1.044209599494934, | |
| "learning_rate": 0.00016015676028739386, | |
| "loss": 0.2346, | |
| "step": 310 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1536, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 10, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.840431360392192e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |