mini-Llama-70M-SFT / checkpoint-500 /trainer_state.json
rootxhacker's picture
Upload folder using huggingface_hub
d7bfc08 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1545595054095827,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0030911901081916537,
"grad_norm": 3.058769941329956,
"learning_rate": 1.9972179289026277e-05,
"loss": 5.0209,
"mean_token_accuracy": 0.21833103336393833,
"num_tokens": 28941.0,
"step": 10
},
{
"epoch": 0.0061823802163833074,
"grad_norm": 2.485980749130249,
"learning_rate": 1.994126738794436e-05,
"loss": 4.7631,
"mean_token_accuracy": 0.23807235918939113,
"num_tokens": 60990.0,
"step": 20
},
{
"epoch": 0.00927357032457496,
"grad_norm": 5.288544178009033,
"learning_rate": 1.9910355486862444e-05,
"loss": 4.6799,
"mean_token_accuracy": 0.24858475103974342,
"num_tokens": 90602.0,
"step": 30
},
{
"epoch": 0.012364760432766615,
"grad_norm": 3.226719379425049,
"learning_rate": 1.9879443585780528e-05,
"loss": 4.5883,
"mean_token_accuracy": 0.2555678006261587,
"num_tokens": 121132.0,
"step": 40
},
{
"epoch": 0.015455950540958269,
"grad_norm": 2.027597188949585,
"learning_rate": 1.984853168469861e-05,
"loss": 4.5125,
"mean_token_accuracy": 0.26637452803552153,
"num_tokens": 154244.0,
"step": 50
},
{
"epoch": 0.01854714064914992,
"grad_norm": 1.6715962886810303,
"learning_rate": 1.9817619783616695e-05,
"loss": 4.4224,
"mean_token_accuracy": 0.27198897041380404,
"num_tokens": 185618.0,
"step": 60
},
{
"epoch": 0.021638330757341576,
"grad_norm": 3.0472540855407715,
"learning_rate": 1.9786707882534775e-05,
"loss": 4.3464,
"mean_token_accuracy": 0.2801041007041931,
"num_tokens": 220138.0,
"step": 70
},
{
"epoch": 0.02472952086553323,
"grad_norm": 2.74045729637146,
"learning_rate": 1.9755795981452862e-05,
"loss": 4.4569,
"mean_token_accuracy": 0.2733523309230804,
"num_tokens": 252919.0,
"step": 80
},
{
"epoch": 0.027820710973724884,
"grad_norm": 1.9274535179138184,
"learning_rate": 1.9724884080370946e-05,
"loss": 4.2569,
"mean_token_accuracy": 0.2858310595154762,
"num_tokens": 284739.0,
"step": 90
},
{
"epoch": 0.030911901081916538,
"grad_norm": 1.906964898109436,
"learning_rate": 1.9693972179289026e-05,
"loss": 4.2144,
"mean_token_accuracy": 0.2953581381589174,
"num_tokens": 315186.0,
"step": 100
},
{
"epoch": 0.03400309119010819,
"grad_norm": 2.042823314666748,
"learning_rate": 1.966306027820711e-05,
"loss": 4.2595,
"mean_token_accuracy": 0.287172843888402,
"num_tokens": 347507.0,
"step": 110
},
{
"epoch": 0.03709428129829984,
"grad_norm": 1.9889057874679565,
"learning_rate": 1.9632148377125197e-05,
"loss": 4.1643,
"mean_token_accuracy": 0.30306958928704264,
"num_tokens": 380331.0,
"step": 120
},
{
"epoch": 0.0401854714064915,
"grad_norm": 1.7952933311462402,
"learning_rate": 1.9601236476043277e-05,
"loss": 4.1334,
"mean_token_accuracy": 0.3080880597233772,
"num_tokens": 410947.0,
"step": 130
},
{
"epoch": 0.04327666151468315,
"grad_norm": 3.8508658409118652,
"learning_rate": 1.957032457496136e-05,
"loss": 4.0262,
"mean_token_accuracy": 0.3118089348077774,
"num_tokens": 444130.0,
"step": 140
},
{
"epoch": 0.04636785162287481,
"grad_norm": 31.280546188354492,
"learning_rate": 1.9539412673879444e-05,
"loss": 3.9889,
"mean_token_accuracy": 0.32304659858345985,
"num_tokens": 474570.0,
"step": 150
},
{
"epoch": 0.04945904173106646,
"grad_norm": 2.1569416522979736,
"learning_rate": 1.950850077279753e-05,
"loss": 4.1053,
"mean_token_accuracy": 0.31258094161748884,
"num_tokens": 505047.0,
"step": 160
},
{
"epoch": 0.05255023183925812,
"grad_norm": 1.808377981185913,
"learning_rate": 1.947758887171561e-05,
"loss": 3.9711,
"mean_token_accuracy": 0.33312112018465995,
"num_tokens": 533187.0,
"step": 170
},
{
"epoch": 0.05564142194744977,
"grad_norm": 2.8038811683654785,
"learning_rate": 1.9446676970633695e-05,
"loss": 3.961,
"mean_token_accuracy": 0.33416116759181025,
"num_tokens": 565598.0,
"step": 180
},
{
"epoch": 0.05873261205564142,
"grad_norm": 8.930831909179688,
"learning_rate": 1.941576506955178e-05,
"loss": 4.0107,
"mean_token_accuracy": 0.3261258576065302,
"num_tokens": 598505.0,
"step": 190
},
{
"epoch": 0.061823802163833076,
"grad_norm": 4.706038475036621,
"learning_rate": 1.9384853168469862e-05,
"loss": 3.9231,
"mean_token_accuracy": 0.33582728281617164,
"num_tokens": 630284.0,
"step": 200
},
{
"epoch": 0.06491499227202473,
"grad_norm": 2.9916040897369385,
"learning_rate": 1.9353941267387946e-05,
"loss": 3.9407,
"mean_token_accuracy": 0.32844844460487366,
"num_tokens": 660513.0,
"step": 210
},
{
"epoch": 0.06800618238021638,
"grad_norm": 2.763737678527832,
"learning_rate": 1.932302936630603e-05,
"loss": 4.0479,
"mean_token_accuracy": 0.3248734712600708,
"num_tokens": 693154.0,
"step": 220
},
{
"epoch": 0.07109737248840804,
"grad_norm": 3.656487464904785,
"learning_rate": 1.9292117465224113e-05,
"loss": 3.7843,
"mean_token_accuracy": 0.34686593189835546,
"num_tokens": 724686.0,
"step": 230
},
{
"epoch": 0.07418856259659969,
"grad_norm": 2.6140244007110596,
"learning_rate": 1.9261205564142196e-05,
"loss": 3.9261,
"mean_token_accuracy": 0.3310456670820713,
"num_tokens": 755625.0,
"step": 240
},
{
"epoch": 0.07727975270479134,
"grad_norm": 1.595627784729004,
"learning_rate": 1.923029366306028e-05,
"loss": 3.841,
"mean_token_accuracy": 0.34514380544424056,
"num_tokens": 785478.0,
"step": 250
},
{
"epoch": 0.080370942812983,
"grad_norm": 2.0485758781433105,
"learning_rate": 1.9199381761978363e-05,
"loss": 3.7596,
"mean_token_accuracy": 0.35417362824082377,
"num_tokens": 816351.0,
"step": 260
},
{
"epoch": 0.08346213292117466,
"grad_norm": 1.7564281225204468,
"learning_rate": 1.9168469860896447e-05,
"loss": 3.7927,
"mean_token_accuracy": 0.3487237967550755,
"num_tokens": 849774.0,
"step": 270
},
{
"epoch": 0.0865533230293663,
"grad_norm": 1.5662060976028442,
"learning_rate": 1.913755795981453e-05,
"loss": 3.7821,
"mean_token_accuracy": 0.3511029303073883,
"num_tokens": 881474.0,
"step": 280
},
{
"epoch": 0.08964451313755796,
"grad_norm": 2.539433002471924,
"learning_rate": 1.9106646058732614e-05,
"loss": 3.8613,
"mean_token_accuracy": 0.3471171148121357,
"num_tokens": 913459.0,
"step": 290
},
{
"epoch": 0.09273570324574962,
"grad_norm": 4.281046390533447,
"learning_rate": 1.9075734157650694e-05,
"loss": 3.8436,
"mean_token_accuracy": 0.3475985363125801,
"num_tokens": 947091.0,
"step": 300
},
{
"epoch": 0.09582689335394126,
"grad_norm": 2.379791736602783,
"learning_rate": 1.904482225656878e-05,
"loss": 3.8309,
"mean_token_accuracy": 0.34195478409528735,
"num_tokens": 983471.0,
"step": 310
},
{
"epoch": 0.09891808346213292,
"grad_norm": 2.4176697731018066,
"learning_rate": 1.9013910355486865e-05,
"loss": 3.75,
"mean_token_accuracy": 0.3633588753640652,
"num_tokens": 1011575.0,
"step": 320
},
{
"epoch": 0.10200927357032458,
"grad_norm": 3.375523328781128,
"learning_rate": 1.898299845440495e-05,
"loss": 3.7556,
"mean_token_accuracy": 0.36073010191321375,
"num_tokens": 1042031.0,
"step": 330
},
{
"epoch": 0.10510046367851623,
"grad_norm": 5.099122524261475,
"learning_rate": 1.895208655332303e-05,
"loss": 3.6861,
"mean_token_accuracy": 0.36301063373684883,
"num_tokens": 1074689.0,
"step": 340
},
{
"epoch": 0.10819165378670788,
"grad_norm": 1.4775930643081665,
"learning_rate": 1.8921174652241116e-05,
"loss": 3.6744,
"mean_token_accuracy": 0.3681299857795238,
"num_tokens": 1105719.0,
"step": 350
},
{
"epoch": 0.11128284389489954,
"grad_norm": 3.931447744369507,
"learning_rate": 1.88902627511592e-05,
"loss": 3.6248,
"mean_token_accuracy": 0.37062914595007895,
"num_tokens": 1136185.0,
"step": 360
},
{
"epoch": 0.1143740340030912,
"grad_norm": 2.6153130531311035,
"learning_rate": 1.885935085007728e-05,
"loss": 3.6971,
"mean_token_accuracy": 0.35686987787485125,
"num_tokens": 1166461.0,
"step": 370
},
{
"epoch": 0.11746522411128284,
"grad_norm": 3.0943849086761475,
"learning_rate": 1.8828438948995363e-05,
"loss": 3.7057,
"mean_token_accuracy": 0.36670113652944564,
"num_tokens": 1194661.0,
"step": 380
},
{
"epoch": 0.1205564142194745,
"grad_norm": 1.760920524597168,
"learning_rate": 1.879752704791345e-05,
"loss": 3.6806,
"mean_token_accuracy": 0.366318603605032,
"num_tokens": 1224670.0,
"step": 390
},
{
"epoch": 0.12364760432766615,
"grad_norm": 1.8976062536239624,
"learning_rate": 1.8766615146831534e-05,
"loss": 3.7318,
"mean_token_accuracy": 0.36500929966568946,
"num_tokens": 1252830.0,
"step": 400
},
{
"epoch": 0.1267387944358578,
"grad_norm": 1.950358510017395,
"learning_rate": 1.8735703245749614e-05,
"loss": 3.7106,
"mean_token_accuracy": 0.3675060346722603,
"num_tokens": 1285546.0,
"step": 410
},
{
"epoch": 0.12982998454404945,
"grad_norm": 2.707167148590088,
"learning_rate": 1.8704791344667697e-05,
"loss": 3.6688,
"mean_token_accuracy": 0.37156677842140196,
"num_tokens": 1316466.0,
"step": 420
},
{
"epoch": 0.13292117465224113,
"grad_norm": 2.084510564804077,
"learning_rate": 1.8673879443585784e-05,
"loss": 3.6758,
"mean_token_accuracy": 0.3684880450367928,
"num_tokens": 1349121.0,
"step": 430
},
{
"epoch": 0.13601236476043277,
"grad_norm": 2.2626636028289795,
"learning_rate": 1.8642967542503865e-05,
"loss": 3.7214,
"mean_token_accuracy": 0.35720510333776473,
"num_tokens": 1384366.0,
"step": 440
},
{
"epoch": 0.1391035548686244,
"grad_norm": 2.4145290851593018,
"learning_rate": 1.8612055641421948e-05,
"loss": 3.7261,
"mean_token_accuracy": 0.36429562568664553,
"num_tokens": 1415357.0,
"step": 450
},
{
"epoch": 0.14219474497681608,
"grad_norm": 2.7409212589263916,
"learning_rate": 1.8581143740340032e-05,
"loss": 3.7368,
"mean_token_accuracy": 0.3619597226381302,
"num_tokens": 1445641.0,
"step": 460
},
{
"epoch": 0.14528593508500773,
"grad_norm": 4.5937275886535645,
"learning_rate": 1.8550231839258115e-05,
"loss": 3.6911,
"mean_token_accuracy": 0.3683665543794632,
"num_tokens": 1475360.0,
"step": 470
},
{
"epoch": 0.14837712519319937,
"grad_norm": 12.00837516784668,
"learning_rate": 1.85193199381762e-05,
"loss": 3.6899,
"mean_token_accuracy": 0.3648961283266544,
"num_tokens": 1506048.0,
"step": 480
},
{
"epoch": 0.15146831530139104,
"grad_norm": 2.5790181159973145,
"learning_rate": 1.8488408037094283e-05,
"loss": 3.6948,
"mean_token_accuracy": 0.36360194012522695,
"num_tokens": 1539998.0,
"step": 490
},
{
"epoch": 0.1545595054095827,
"grad_norm": 1.8515182733535767,
"learning_rate": 1.8457496136012366e-05,
"loss": 3.65,
"mean_token_accuracy": 0.36655392646789553,
"num_tokens": 1573641.0,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 6470,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 906803839715328.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}