llama-68m / last-checkpoint /trainer_state.json
SystemAdmin123's picture
Training in progress, step 600, checkpoint
9a56763 verified
raw
history blame
13.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 46.15384615384615,
"eval_steps": 50,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"eval_loss": 3.9168343544006348,
"eval_runtime": 5.6922,
"eval_samples_per_second": 263.695,
"eval_steps_per_second": 4.216,
"step": 1
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.3671875,
"learning_rate": 6.666666666666667e-05,
"loss": 3.3633,
"step": 10
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.09375,
"learning_rate": 0.00013333333333333334,
"loss": 3.1311,
"step": 20
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.8203125,
"learning_rate": 0.0002,
"loss": 2.9153,
"step": 30
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.7265625,
"learning_rate": 0.00019984815164333163,
"loss": 2.7539,
"step": 40
},
{
"epoch": 3.8461538461538463,
"grad_norm": 0.71875,
"learning_rate": 0.00019939306773179497,
"loss": 2.5978,
"step": 50
},
{
"epoch": 3.8461538461538463,
"eval_loss": 2.814912796020508,
"eval_runtime": 5.762,
"eval_samples_per_second": 260.499,
"eval_steps_per_second": 4.165,
"step": 50
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.71484375,
"learning_rate": 0.00019863613034027224,
"loss": 2.4698,
"step": 60
},
{
"epoch": 5.384615384615385,
"grad_norm": 0.7734375,
"learning_rate": 0.00019757963826274357,
"loss": 2.3909,
"step": 70
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.765625,
"learning_rate": 0.00019622680003092503,
"loss": 2.2723,
"step": 80
},
{
"epoch": 6.923076923076923,
"grad_norm": 0.796875,
"learning_rate": 0.00019458172417006347,
"loss": 2.1816,
"step": 90
},
{
"epoch": 7.6923076923076925,
"grad_norm": 0.8203125,
"learning_rate": 0.00019264940672148018,
"loss": 2.0808,
"step": 100
},
{
"epoch": 7.6923076923076925,
"eval_loss": 2.966378927230835,
"eval_runtime": 5.6204,
"eval_samples_per_second": 267.064,
"eval_steps_per_second": 4.27,
"step": 100
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.83984375,
"learning_rate": 0.00019043571606975777,
"loss": 1.9902,
"step": 110
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.99609375,
"learning_rate": 0.0001879473751206489,
"loss": 1.9033,
"step": 120
},
{
"epoch": 10.0,
"grad_norm": 0.83984375,
"learning_rate": 0.00018519194088383273,
"loss": 1.8173,
"step": 130
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.87890625,
"learning_rate": 0.0001821777815225245,
"loss": 1.7058,
"step": 140
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.9296875,
"learning_rate": 0.00017891405093963938,
"loss": 1.6294,
"step": 150
},
{
"epoch": 11.538461538461538,
"eval_loss": 3.233675003051758,
"eval_runtime": 5.5692,
"eval_samples_per_second": 269.517,
"eval_steps_per_second": 4.309,
"step": 150
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.9921875,
"learning_rate": 0.00017541066097768963,
"loss": 1.5563,
"step": 160
},
{
"epoch": 13.076923076923077,
"grad_norm": 1.0546875,
"learning_rate": 0.00017167825131684513,
"loss": 1.4865,
"step": 170
},
{
"epoch": 13.846153846153847,
"grad_norm": 0.94921875,
"learning_rate": 0.00016772815716257412,
"loss": 1.3938,
"step": 180
},
{
"epoch": 14.615384615384615,
"grad_norm": 0.90625,
"learning_rate": 0.00016357237482099684,
"loss": 1.3247,
"step": 190
},
{
"epoch": 15.384615384615385,
"grad_norm": 0.99609375,
"learning_rate": 0.00015922352526649803,
"loss": 1.2699,
"step": 200
},
{
"epoch": 15.384615384615385,
"eval_loss": 3.521737813949585,
"eval_runtime": 5.6468,
"eval_samples_per_second": 265.815,
"eval_steps_per_second": 4.25,
"step": 200
},
{
"epoch": 16.153846153846153,
"grad_norm": 0.95703125,
"learning_rate": 0.00015469481581224272,
"loss": 1.2117,
"step": 210
},
{
"epoch": 16.923076923076923,
"grad_norm": 1.0390625,
"learning_rate": 0.00015000000000000001,
"loss": 1.1498,
"step": 220
},
{
"epoch": 17.692307692307693,
"grad_norm": 1.0078125,
"learning_rate": 0.00014515333583108896,
"loss": 1.0864,
"step": 230
},
{
"epoch": 18.46153846153846,
"grad_norm": 0.81640625,
"learning_rate": 0.00014016954246529696,
"loss": 1.0441,
"step": 240
},
{
"epoch": 19.23076923076923,
"grad_norm": 0.98828125,
"learning_rate": 0.00013506375551927547,
"loss": 1.0092,
"step": 250
},
{
"epoch": 19.23076923076923,
"eval_loss": 3.726165533065796,
"eval_runtime": 5.8207,
"eval_samples_per_second": 257.872,
"eval_steps_per_second": 4.123,
"step": 250
},
{
"epoch": 20.0,
"grad_norm": 0.8359375,
"learning_rate": 0.00012985148110016947,
"loss": 0.9671,
"step": 260
},
{
"epoch": 20.76923076923077,
"grad_norm": 0.8203125,
"learning_rate": 0.00012454854871407994,
"loss": 0.9182,
"step": 270
},
{
"epoch": 21.53846153846154,
"grad_norm": 0.796875,
"learning_rate": 0.00011917106319237386,
"loss": 0.8857,
"step": 280
},
{
"epoch": 22.307692307692307,
"grad_norm": 0.79296875,
"learning_rate": 0.00011373535578184082,
"loss": 0.8623,
"step": 290
},
{
"epoch": 23.076923076923077,
"grad_norm": 0.7421875,
"learning_rate": 0.00010825793454723325,
"loss": 0.8392,
"step": 300
},
{
"epoch": 23.076923076923077,
"eval_loss": 3.868284225463867,
"eval_runtime": 5.7191,
"eval_samples_per_second": 262.455,
"eval_steps_per_second": 4.196,
"step": 300
},
{
"epoch": 23.846153846153847,
"grad_norm": 0.75,
"learning_rate": 0.00010275543423681621,
"loss": 0.8103,
"step": 310
},
{
"epoch": 24.615384615384617,
"grad_norm": 0.75390625,
"learning_rate": 9.724456576318381e-05,
"loss": 0.7828,
"step": 320
},
{
"epoch": 25.384615384615383,
"grad_norm": 0.7265625,
"learning_rate": 9.174206545276677e-05,
"loss": 0.7749,
"step": 330
},
{
"epoch": 26.153846153846153,
"grad_norm": 0.74609375,
"learning_rate": 8.626464421815919e-05,
"loss": 0.7574,
"step": 340
},
{
"epoch": 26.923076923076923,
"grad_norm": 0.6875,
"learning_rate": 8.082893680762619e-05,
"loss": 0.7428,
"step": 350
},
{
"epoch": 26.923076923076923,
"eval_loss": 3.9434773921966553,
"eval_runtime": 5.6325,
"eval_samples_per_second": 266.49,
"eval_steps_per_second": 4.261,
"step": 350
},
{
"epoch": 27.692307692307693,
"grad_norm": 0.6953125,
"learning_rate": 7.54514512859201e-05,
"loss": 0.7298,
"step": 360
},
{
"epoch": 28.46153846153846,
"grad_norm": 0.66015625,
"learning_rate": 7.014851889983057e-05,
"loss": 0.7167,
"step": 370
},
{
"epoch": 29.23076923076923,
"grad_norm": 0.65625,
"learning_rate": 6.493624448072457e-05,
"loss": 0.7147,
"step": 380
},
{
"epoch": 30.0,
"grad_norm": 0.69140625,
"learning_rate": 5.983045753470308e-05,
"loss": 0.7019,
"step": 390
},
{
"epoch": 30.76923076923077,
"grad_norm": 0.66796875,
"learning_rate": 5.484666416891109e-05,
"loss": 0.6952,
"step": 400
},
{
"epoch": 30.76923076923077,
"eval_loss": 3.985978841781616,
"eval_runtime": 5.6438,
"eval_samples_per_second": 265.955,
"eval_steps_per_second": 4.252,
"step": 400
},
{
"epoch": 31.53846153846154,
"grad_norm": 0.6484375,
"learning_rate": 5.000000000000002e-05,
"loss": 0.6901,
"step": 410
},
{
"epoch": 32.30769230769231,
"grad_norm": 0.640625,
"learning_rate": 4.530518418775733e-05,
"loss": 0.685,
"step": 420
},
{
"epoch": 33.07692307692308,
"grad_norm": 0.62890625,
"learning_rate": 4.077647473350201e-05,
"loss": 0.6851,
"step": 430
},
{
"epoch": 33.84615384615385,
"grad_norm": 0.62109375,
"learning_rate": 3.642762517900322e-05,
"loss": 0.6782,
"step": 440
},
{
"epoch": 34.61538461538461,
"grad_norm": 0.6171875,
"learning_rate": 3.227184283742591e-05,
"loss": 0.6762,
"step": 450
},
{
"epoch": 34.61538461538461,
"eval_loss": 3.998966932296753,
"eval_runtime": 5.7156,
"eval_samples_per_second": 262.613,
"eval_steps_per_second": 4.199,
"step": 450
},
{
"epoch": 35.38461538461539,
"grad_norm": 0.62109375,
"learning_rate": 2.8321748683154893e-05,
"loss": 0.6742,
"step": 460
},
{
"epoch": 36.15384615384615,
"grad_norm": 0.6171875,
"learning_rate": 2.4589339022310386e-05,
"loss": 0.674,
"step": 470
},
{
"epoch": 36.92307692307692,
"grad_norm": 0.62109375,
"learning_rate": 2.1085949060360654e-05,
"loss": 0.6728,
"step": 480
},
{
"epoch": 37.69230769230769,
"grad_norm": 0.6171875,
"learning_rate": 1.7822218477475494e-05,
"loss": 0.6681,
"step": 490
},
{
"epoch": 38.46153846153846,
"grad_norm": 0.62109375,
"learning_rate": 1.4808059116167305e-05,
"loss": 0.6739,
"step": 500
},
{
"epoch": 38.46153846153846,
"eval_loss": 4.016704559326172,
"eval_runtime": 5.9697,
"eval_samples_per_second": 251.437,
"eval_steps_per_second": 4.02,
"step": 500
},
{
"epoch": 39.23076923076923,
"grad_norm": 0.61328125,
"learning_rate": 1.2052624879351104e-05,
"loss": 0.6685,
"step": 510
},
{
"epoch": 40.0,
"grad_norm": 0.625,
"learning_rate": 9.564283930242257e-06,
"loss": 0.6697,
"step": 520
},
{
"epoch": 40.76923076923077,
"grad_norm": 0.60546875,
"learning_rate": 7.350593278519824e-06,
"loss": 0.6691,
"step": 530
},
{
"epoch": 41.53846153846154,
"grad_norm": 0.609375,
"learning_rate": 5.418275829936537e-06,
"loss": 0.6709,
"step": 540
},
{
"epoch": 42.30769230769231,
"grad_norm": 0.62109375,
"learning_rate": 3.7731999690749585e-06,
"loss": 0.6691,
"step": 550
},
{
"epoch": 42.30769230769231,
"eval_loss": 4.020811080932617,
"eval_runtime": 5.6558,
"eval_samples_per_second": 265.392,
"eval_steps_per_second": 4.243,
"step": 550
},
{
"epoch": 43.07692307692308,
"grad_norm": 0.61328125,
"learning_rate": 2.420361737256438e-06,
"loss": 0.6671,
"step": 560
},
{
"epoch": 43.84615384615385,
"grad_norm": 0.62890625,
"learning_rate": 1.3638696597277679e-06,
"loss": 0.6683,
"step": 570
},
{
"epoch": 44.61538461538461,
"grad_norm": 0.59375,
"learning_rate": 6.069322682050516e-07,
"loss": 0.6714,
"step": 580
},
{
"epoch": 45.38461538461539,
"grad_norm": 0.6015625,
"learning_rate": 1.518483566683826e-07,
"loss": 0.6695,
"step": 590
},
{
"epoch": 46.15384615384615,
"grad_norm": 0.62109375,
"learning_rate": 0.0,
"loss": 0.6667,
"step": 600
},
{
"epoch": 46.15384615384615,
"eval_loss": 4.010259628295898,
"eval_runtime": 5.6787,
"eval_samples_per_second": 264.32,
"eval_steps_per_second": 4.226,
"step": 600
}
],
"logging_steps": 10,
"max_steps": 600,
"num_input_tokens_seen": 0,
"num_train_epochs": 47,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.05042678235136e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}