winnieyangwannan's picture
Training in progress, step 300, checkpoint
0bcc90d verified
raw
history blame
7.27 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.31512605042016806,
"eval_steps": 50,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01050420168067227,
"grad_norm": 2.5582146644592285,
"learning_rate": 4.98249299719888e-05,
"loss": 1.6787,
"step": 10
},
{
"epoch": 0.02100840336134454,
"grad_norm": 0.9345070719718933,
"learning_rate": 4.96498599439776e-05,
"loss": 0.518,
"step": 20
},
{
"epoch": 0.031512605042016806,
"grad_norm": 1.6358414888381958,
"learning_rate": 4.947478991596639e-05,
"loss": 0.4604,
"step": 30
},
{
"epoch": 0.04201680672268908,
"grad_norm": 0.7778844237327576,
"learning_rate": 4.9299719887955186e-05,
"loss": 0.3771,
"step": 40
},
{
"epoch": 0.052521008403361345,
"grad_norm": 0.7006077766418457,
"learning_rate": 4.912464985994398e-05,
"loss": 0.3842,
"step": 50
},
{
"epoch": 0.052521008403361345,
"eval_loss": 0.42603224515914917,
"eval_runtime": 13.5673,
"eval_samples_per_second": 35.379,
"eval_steps_per_second": 2.211,
"step": 50
},
{
"epoch": 0.06302521008403361,
"grad_norm": 0.6415153741836548,
"learning_rate": 4.8949579831932775e-05,
"loss": 0.3399,
"step": 60
},
{
"epoch": 0.07352941176470588,
"grad_norm": 0.6030780076980591,
"learning_rate": 4.877450980392157e-05,
"loss": 0.3447,
"step": 70
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.688852071762085,
"learning_rate": 4.859943977591036e-05,
"loss": 0.3219,
"step": 80
},
{
"epoch": 0.09453781512605042,
"grad_norm": 0.6371557712554932,
"learning_rate": 4.8424369747899164e-05,
"loss": 0.3379,
"step": 90
},
{
"epoch": 0.10504201680672269,
"grad_norm": 0.7739270329475403,
"learning_rate": 4.824929971988796e-05,
"loss": 0.3177,
"step": 100
},
{
"epoch": 0.10504201680672269,
"eval_loss": 0.3801896274089813,
"eval_runtime": 13.6107,
"eval_samples_per_second": 35.266,
"eval_steps_per_second": 2.204,
"step": 100
},
{
"epoch": 0.11554621848739496,
"grad_norm": 0.649507462978363,
"learning_rate": 4.807422969187675e-05,
"loss": 0.3415,
"step": 110
},
{
"epoch": 0.12605042016806722,
"grad_norm": 0.594717264175415,
"learning_rate": 4.7899159663865554e-05,
"loss": 0.3325,
"step": 120
},
{
"epoch": 0.13655462184873948,
"grad_norm": 0.627918541431427,
"learning_rate": 4.772408963585435e-05,
"loss": 0.3222,
"step": 130
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.5384674668312073,
"learning_rate": 4.7549019607843135e-05,
"loss": 0.3426,
"step": 140
},
{
"epoch": 0.15756302521008403,
"grad_norm": 0.5673420429229736,
"learning_rate": 4.7373949579831936e-05,
"loss": 0.3061,
"step": 150
},
{
"epoch": 0.15756302521008403,
"eval_loss": 0.3653399348258972,
"eval_runtime": 13.5947,
"eval_samples_per_second": 35.308,
"eval_steps_per_second": 2.207,
"step": 150
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.6111018657684326,
"learning_rate": 4.719887955182073e-05,
"loss": 0.3271,
"step": 160
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.7422594428062439,
"learning_rate": 4.7023809523809525e-05,
"loss": 0.315,
"step": 170
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.7226534485816956,
"learning_rate": 4.684873949579832e-05,
"loss": 0.3031,
"step": 180
},
{
"epoch": 0.19957983193277312,
"grad_norm": 0.6302976012229919,
"learning_rate": 4.667366946778712e-05,
"loss": 0.3161,
"step": 190
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.6225076913833618,
"learning_rate": 4.6498599439775914e-05,
"loss": 0.3038,
"step": 200
},
{
"epoch": 0.21008403361344538,
"eval_loss": 0.35061606764793396,
"eval_runtime": 13.5616,
"eval_samples_per_second": 35.394,
"eval_steps_per_second": 2.212,
"step": 200
},
{
"epoch": 0.22058823529411764,
"grad_norm": 0.6001319885253906,
"learning_rate": 4.632352941176471e-05,
"loss": 0.3129,
"step": 210
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.5385990142822266,
"learning_rate": 4.61484593837535e-05,
"loss": 0.2991,
"step": 220
},
{
"epoch": 0.2415966386554622,
"grad_norm": 0.4513624906539917,
"learning_rate": 4.59733893557423e-05,
"loss": 0.2896,
"step": 230
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.6142160892486572,
"learning_rate": 4.579831932773109e-05,
"loss": 0.3059,
"step": 240
},
{
"epoch": 0.26260504201680673,
"grad_norm": 0.6714802384376526,
"learning_rate": 4.562324929971989e-05,
"loss": 0.2897,
"step": 250
},
{
"epoch": 0.26260504201680673,
"eval_loss": 0.3456435203552246,
"eval_runtime": 13.5552,
"eval_samples_per_second": 35.411,
"eval_steps_per_second": 2.213,
"step": 250
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.6518235206604004,
"learning_rate": 4.5448179271708687e-05,
"loss": 0.312,
"step": 260
},
{
"epoch": 0.28361344537815125,
"grad_norm": 0.6250632405281067,
"learning_rate": 4.527310924369748e-05,
"loss": 0.2959,
"step": 270
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.5683826804161072,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.3027,
"step": 280
},
{
"epoch": 0.30462184873949577,
"grad_norm": 0.560312807559967,
"learning_rate": 4.4922969187675076e-05,
"loss": 0.3002,
"step": 290
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.66291743516922,
"learning_rate": 4.474789915966387e-05,
"loss": 0.2925,
"step": 300
},
{
"epoch": 0.31512605042016806,
"eval_loss": 0.3431606888771057,
"eval_runtime": 13.5629,
"eval_samples_per_second": 35.391,
"eval_steps_per_second": 2.212,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 2856,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.591343031844864e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}