andrewatef's picture
Training in progress, step 210, checkpoint
cbd0968 verified
raw
history blame
4.41 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4097560975609756,
"eval_steps": 500,
"global_step": 210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01951219512195122,
"grad_norm": 0.6663638353347778,
"learning_rate": 0.00019934683213585893,
"loss": 0.5255,
"step": 10
},
{
"epoch": 0.03902439024390244,
"grad_norm": 0.7442628741264343,
"learning_rate": 0.00019804049640757677,
"loss": 0.3367,
"step": 20
},
{
"epoch": 0.05853658536585366,
"grad_norm": 0.504467785358429,
"learning_rate": 0.0001967341606792946,
"loss": 0.3368,
"step": 30
},
{
"epoch": 0.07804878048780488,
"grad_norm": 0.339693546295166,
"learning_rate": 0.00019542782495101242,
"loss": 0.3064,
"step": 40
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.267976313829422,
"learning_rate": 0.00019412148922273026,
"loss": 0.3108,
"step": 50
},
{
"epoch": 0.11707317073170732,
"grad_norm": 0.48640233278274536,
"learning_rate": 0.00019281515349444807,
"loss": 0.3069,
"step": 60
},
{
"epoch": 0.13658536585365855,
"grad_norm": 0.7269986867904663,
"learning_rate": 0.0001915088177661659,
"loss": 0.3134,
"step": 70
},
{
"epoch": 0.15609756097560976,
"grad_norm": 0.31376832723617554,
"learning_rate": 0.00019020248203788375,
"loss": 0.2915,
"step": 80
},
{
"epoch": 0.17560975609756097,
"grad_norm": 0.6166387796401978,
"learning_rate": 0.00018889614630960156,
"loss": 0.2862,
"step": 90
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.6621638536453247,
"learning_rate": 0.0001875898105813194,
"loss": 0.263,
"step": 100
},
{
"epoch": 0.2146341463414634,
"grad_norm": 0.2815336287021637,
"learning_rate": 0.00018628347485303724,
"loss": 0.2747,
"step": 110
},
{
"epoch": 0.23414634146341465,
"grad_norm": 0.5862469673156738,
"learning_rate": 0.00018497713912475508,
"loss": 0.2833,
"step": 120
},
{
"epoch": 0.25365853658536586,
"grad_norm": 0.5362260937690735,
"learning_rate": 0.00018367080339647292,
"loss": 0.2613,
"step": 130
},
{
"epoch": 0.2731707317073171,
"grad_norm": 0.7799074053764343,
"learning_rate": 0.00018236446766819073,
"loss": 0.2535,
"step": 140
},
{
"epoch": 0.2926829268292683,
"grad_norm": 0.8866592645645142,
"learning_rate": 0.00018105813193990857,
"loss": 0.2603,
"step": 150
},
{
"epoch": 0.3121951219512195,
"grad_norm": 0.9003716707229614,
"learning_rate": 0.00017975179621162638,
"loss": 0.27,
"step": 160
},
{
"epoch": 0.33170731707317075,
"grad_norm": 0.5946381092071533,
"learning_rate": 0.00017844546048334422,
"loss": 0.2572,
"step": 170
},
{
"epoch": 0.35121951219512193,
"grad_norm": 0.8860711455345154,
"learning_rate": 0.00017713912475506206,
"loss": 0.2839,
"step": 180
},
{
"epoch": 0.37073170731707317,
"grad_norm": 0.8693526983261108,
"learning_rate": 0.0001758327890267799,
"loss": 0.2477,
"step": 190
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.9044304490089417,
"learning_rate": 0.00017452645329849774,
"loss": 0.2674,
"step": 200
},
{
"epoch": 0.4097560975609756,
"grad_norm": 0.5563161969184875,
"learning_rate": 0.00017322011757021555,
"loss": 0.2436,
"step": 210
}
],
"logging_steps": 10,
"max_steps": 1536,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.955413070613299e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}