| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 174, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.011494252873563218, | |
| "grad_norm": 2.040087588568768, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 2.1134, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05747126436781609, | |
| "grad_norm": 1.5065184898286288, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 2.108, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.11494252873563218, | |
| "grad_norm": 0.6052866447590097, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 1.4177, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1724137931034483, | |
| "grad_norm": 0.21045147584699722, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.8948, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.22988505747126436, | |
| "grad_norm": 0.12856209828710086, | |
| "learning_rate": 9.995944990857848e-06, | |
| "loss": 0.6954, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.28735632183908044, | |
| "grad_norm": 0.10871678835639248, | |
| "learning_rate": 9.950401639805822e-06, | |
| "loss": 0.5977, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.10645674532135609, | |
| "learning_rate": 9.854709087130261e-06, | |
| "loss": 0.5494, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.40229885057471265, | |
| "grad_norm": 0.11751091495401653, | |
| "learning_rate": 9.709836729956326e-06, | |
| "loss": 0.5337, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.45977011494252873, | |
| "grad_norm": 0.09060531478913308, | |
| "learning_rate": 9.517252173051912e-06, | |
| "loss": 0.4915, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.09772503765904073, | |
| "learning_rate": 9.278906361507238e-06, | |
| "loss": 0.4843, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5747126436781609, | |
| "grad_norm": 0.07336423386596751, | |
| "learning_rate": 8.997213817017508e-06, | |
| "loss": 0.4568, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.632183908045977, | |
| "grad_norm": 0.07823020401929619, | |
| "learning_rate": 8.675028177981643e-06, | |
| "loss": 0.4348, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.08701416209592616, | |
| "learning_rate": 8.315613291203977e-06, | |
| "loss": 0.4556, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7471264367816092, | |
| "grad_norm": 0.07898059942451459, | |
| "learning_rate": 7.922610148049445e-06, | |
| "loss": 0.4388, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8045977011494253, | |
| "grad_norm": 0.0654201661759577, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.436, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 0.06503764998734388, | |
| "learning_rate": 7.052064027263785e-06, | |
| "loss": 0.4382, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9195402298850575, | |
| "grad_norm": 0.0728022326080332, | |
| "learning_rate": 6.583339969007364e-06, | |
| "loss": 0.4148, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.9770114942528736, | |
| "grad_norm": 0.06497997558868164, | |
| "learning_rate": 6.0985761545610865e-06, | |
| "loss": 0.4193, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.40544483065605164, | |
| "eval_runtime": 19.8771, | |
| "eval_samples_per_second": 19.218, | |
| "eval_steps_per_second": 4.83, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.0344827586206897, | |
| "grad_norm": 0.05245032339850799, | |
| "learning_rate": 5.6026834012766155e-06, | |
| "loss": 0.3796, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.0919540229885056, | |
| "grad_norm": 0.06254588777314657, | |
| "learning_rate": 5.100685266327202e-06, | |
| "loss": 0.3968, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.1494252873563218, | |
| "grad_norm": 0.05443663559664676, | |
| "learning_rate": 4.597667156416371e-06, | |
| "loss": 0.382, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.206896551724138, | |
| "grad_norm": 0.06350044846551632, | |
| "learning_rate": 4.098724810930472e-06, | |
| "loss": 0.3872, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.264367816091954, | |
| "grad_norm": 0.06178958458943511, | |
| "learning_rate": 3.6089126804177373e-06, | |
| "loss": 0.3867, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.3218390804597702, | |
| "grad_norm": 0.0541384265524022, | |
| "learning_rate": 3.1331927233368954e-06, | |
| "loss": 0.3698, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.3793103448275863, | |
| "grad_norm": 0.058026939719336854, | |
| "learning_rate": 2.6763841397811576e-06, | |
| "loss": 0.4053, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.4367816091954024, | |
| "grad_norm": 0.05756867266178145, | |
| "learning_rate": 2.243114551391542e-06, | |
| "loss": 0.4088, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.4942528735632183, | |
| "grad_norm": 0.06769952103102554, | |
| "learning_rate": 1.8377731220231144e-06, | |
| "loss": 0.4057, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.5517241379310345, | |
| "grad_norm": 0.06511504174110457, | |
| "learning_rate": 1.4644660940672628e-06, | |
| "loss": 0.4037, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.6091954022988506, | |
| "grad_norm": 0.057131814434957386, | |
| "learning_rate": 1.1269751908617277e-06, | |
| "loss": 0.393, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.056808678950532855, | |
| "learning_rate": 8.287193065856936e-07, | |
| "loss": 0.3644, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.7241379310344827, | |
| "grad_norm": 0.06184558069266949, | |
| "learning_rate": 5.727198717339511e-07, | |
| "loss": 0.3626, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.7816091954022988, | |
| "grad_norm": 0.06453137970254372, | |
| "learning_rate": 3.615702450292857e-07, | |
| "loss": 0.3765, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.839080459770115, | |
| "grad_norm": 0.059373820193991525, | |
| "learning_rate": 1.9740944184313882e-07, | |
| "loss": 0.3729, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.896551724137931, | |
| "grad_norm": 0.06410532863768878, | |
| "learning_rate": 8.190046526428241e-08, | |
| "loss": 0.3807, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.9540229885057472, | |
| "grad_norm": 0.058183398222304034, | |
| "learning_rate": 1.6213459328950355e-08, | |
| "loss": 0.3759, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.39200636744499207, | |
| "eval_runtime": 19.784, | |
| "eval_samples_per_second": 19.309, | |
| "eval_steps_per_second": 4.852, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 174, | |
| "total_flos": 4.4261488579313664e+17, | |
| "train_loss": 0.5207254633136179, | |
| "train_runtime": 1928.8215, | |
| "train_samples_per_second": 4.327, | |
| "train_steps_per_second": 0.09 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 174, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.4261488579313664e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |