winnieyangwannan's picture
Training in progress, step 500, checkpoint
464b62a verified
raw
history blame
11.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5268703898840885,
"eval_steps": 50,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01053740779768177,
"grad_norm": 3.070249080657959,
"learning_rate": 4.982437653670531e-05,
"loss": 1.7879,
"step": 10
},
{
"epoch": 0.02107481559536354,
"grad_norm": 1.702326774597168,
"learning_rate": 4.964875307341061e-05,
"loss": 0.5567,
"step": 20
},
{
"epoch": 0.03161222339304531,
"grad_norm": 1.1947294473648071,
"learning_rate": 4.947312961011591e-05,
"loss": 0.4493,
"step": 30
},
{
"epoch": 0.04214963119072708,
"grad_norm": 0.9556658267974854,
"learning_rate": 4.929750614682122e-05,
"loss": 0.3728,
"step": 40
},
{
"epoch": 0.05268703898840885,
"grad_norm": 0.7952510714530945,
"learning_rate": 4.9121882683526524e-05,
"loss": 0.3535,
"step": 50
},
{
"epoch": 0.05268703898840885,
"eval_loss": 0.4311191439628601,
"eval_runtime": 13.6539,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 50
},
{
"epoch": 0.06322444678609063,
"grad_norm": 0.6962826251983643,
"learning_rate": 4.894625922023183e-05,
"loss": 0.3507,
"step": 60
},
{
"epoch": 0.0737618545837724,
"grad_norm": 0.6941961646080017,
"learning_rate": 4.877063575693713e-05,
"loss": 0.3585,
"step": 70
},
{
"epoch": 0.08429926238145416,
"grad_norm": 0.6864392757415771,
"learning_rate": 4.8595012293642434e-05,
"loss": 0.3496,
"step": 80
},
{
"epoch": 0.09483667017913593,
"grad_norm": 0.7322937846183777,
"learning_rate": 4.841938883034774e-05,
"loss": 0.3295,
"step": 90
},
{
"epoch": 0.1053740779768177,
"grad_norm": 0.6921488046646118,
"learning_rate": 4.824376536705304e-05,
"loss": 0.3357,
"step": 100
},
{
"epoch": 0.1053740779768177,
"eval_loss": 0.39120009541511536,
"eval_runtime": 13.7031,
"eval_samples_per_second": 35.029,
"eval_steps_per_second": 2.189,
"step": 100
},
{
"epoch": 0.11591148577449947,
"grad_norm": 0.6553240418434143,
"learning_rate": 4.8068141903758344e-05,
"loss": 0.3105,
"step": 110
},
{
"epoch": 0.12644889357218125,
"grad_norm": 0.5637819170951843,
"learning_rate": 4.789251844046364e-05,
"loss": 0.3164,
"step": 120
},
{
"epoch": 0.136986301369863,
"grad_norm": 0.6341928839683533,
"learning_rate": 4.7716894977168955e-05,
"loss": 0.304,
"step": 130
},
{
"epoch": 0.1475237091675448,
"grad_norm": 0.5917785167694092,
"learning_rate": 4.754127151387426e-05,
"loss": 0.3234,
"step": 140
},
{
"epoch": 0.15806111696522657,
"grad_norm": 0.5884453654289246,
"learning_rate": 4.736564805057956e-05,
"loss": 0.317,
"step": 150
},
{
"epoch": 0.15806111696522657,
"eval_loss": 0.37688738107681274,
"eval_runtime": 13.6535,
"eval_samples_per_second": 35.156,
"eval_steps_per_second": 2.197,
"step": 150
},
{
"epoch": 0.16859852476290832,
"grad_norm": 0.5819964408874512,
"learning_rate": 4.7190024587284866e-05,
"loss": 0.2992,
"step": 160
},
{
"epoch": 0.1791359325605901,
"grad_norm": 0.689468264579773,
"learning_rate": 4.7014401123990165e-05,
"loss": 0.3168,
"step": 170
},
{
"epoch": 0.18967334035827185,
"grad_norm": 0.6950872540473938,
"learning_rate": 4.683877766069547e-05,
"loss": 0.3041,
"step": 180
},
{
"epoch": 0.20021074815595363,
"grad_norm": 0.8322122097015381,
"learning_rate": 4.6663154197400776e-05,
"loss": 0.3028,
"step": 190
},
{
"epoch": 0.2107481559536354,
"grad_norm": 0.5850774645805359,
"learning_rate": 4.6487530734106075e-05,
"loss": 0.2992,
"step": 200
},
{
"epoch": 0.2107481559536354,
"eval_loss": 0.36230018734931946,
"eval_runtime": 13.6165,
"eval_samples_per_second": 35.251,
"eval_steps_per_second": 2.203,
"step": 200
},
{
"epoch": 0.22128556375131717,
"grad_norm": 0.6668715476989746,
"learning_rate": 4.631190727081138e-05,
"loss": 0.2924,
"step": 210
},
{
"epoch": 0.23182297154899895,
"grad_norm": 0.4749641418457031,
"learning_rate": 4.6136283807516686e-05,
"loss": 0.3017,
"step": 220
},
{
"epoch": 0.24236037934668073,
"grad_norm": 0.6381515860557556,
"learning_rate": 4.596066034422199e-05,
"loss": 0.2887,
"step": 230
},
{
"epoch": 0.2528977871443625,
"grad_norm": 0.49952977895736694,
"learning_rate": 4.57850368809273e-05,
"loss": 0.2833,
"step": 240
},
{
"epoch": 0.26343519494204426,
"grad_norm": 0.699518084526062,
"learning_rate": 4.5609413417632596e-05,
"loss": 0.2859,
"step": 250
},
{
"epoch": 0.26343519494204426,
"eval_loss": 0.3563433885574341,
"eval_runtime": 13.6275,
"eval_samples_per_second": 35.223,
"eval_steps_per_second": 2.201,
"step": 250
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.5912085175514221,
"learning_rate": 4.54337899543379e-05,
"loss": 0.302,
"step": 260
},
{
"epoch": 0.2845100105374078,
"grad_norm": 0.6353363990783691,
"learning_rate": 4.525816649104321e-05,
"loss": 0.3112,
"step": 270
},
{
"epoch": 0.2950474183350896,
"grad_norm": 0.5483567118644714,
"learning_rate": 4.5082543027748506e-05,
"loss": 0.2808,
"step": 280
},
{
"epoch": 0.3055848261327713,
"grad_norm": 0.5003193616867065,
"learning_rate": 4.490691956445381e-05,
"loss": 0.3185,
"step": 290
},
{
"epoch": 0.31612223393045313,
"grad_norm": 0.4919240176677704,
"learning_rate": 4.473129610115912e-05,
"loss": 0.2883,
"step": 300
},
{
"epoch": 0.31612223393045313,
"eval_loss": 0.35315924882888794,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 300
},
{
"epoch": 0.3266596417281349,
"grad_norm": 0.5145038366317749,
"learning_rate": 4.455567263786442e-05,
"loss": 0.3084,
"step": 310
},
{
"epoch": 0.33719704952581664,
"grad_norm": 0.6343855261802673,
"learning_rate": 4.438004917456973e-05,
"loss": 0.295,
"step": 320
},
{
"epoch": 0.34773445732349845,
"grad_norm": 0.5336400270462036,
"learning_rate": 4.420442571127503e-05,
"loss": 0.2882,
"step": 330
},
{
"epoch": 0.3582718651211802,
"grad_norm": 0.49547308683395386,
"learning_rate": 4.4028802247980333e-05,
"loss": 0.3084,
"step": 340
},
{
"epoch": 0.36880927291886195,
"grad_norm": 0.5037292242050171,
"learning_rate": 4.385317878468563e-05,
"loss": 0.3084,
"step": 350
},
{
"epoch": 0.36880927291886195,
"eval_loss": 0.34608179330825806,
"eval_runtime": 13.6537,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 350
},
{
"epoch": 0.3793466807165437,
"grad_norm": 0.5956543684005737,
"learning_rate": 4.367755532139094e-05,
"loss": 0.2763,
"step": 360
},
{
"epoch": 0.3898840885142255,
"grad_norm": 0.6263634562492371,
"learning_rate": 4.3501931858096244e-05,
"loss": 0.3025,
"step": 370
},
{
"epoch": 0.40042149631190727,
"grad_norm": 0.4832920730113983,
"learning_rate": 4.332630839480154e-05,
"loss": 0.2792,
"step": 380
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.4969714879989624,
"learning_rate": 4.3150684931506855e-05,
"loss": 0.2704,
"step": 390
},
{
"epoch": 0.4214963119072708,
"grad_norm": 0.568900465965271,
"learning_rate": 4.297506146821216e-05,
"loss": 0.2734,
"step": 400
},
{
"epoch": 0.4214963119072708,
"eval_loss": 0.34151414036750793,
"eval_runtime": 13.664,
"eval_samples_per_second": 35.129,
"eval_steps_per_second": 2.196,
"step": 400
},
{
"epoch": 0.4320337197049526,
"grad_norm": 0.5023282170295715,
"learning_rate": 4.279943800491746e-05,
"loss": 0.2845,
"step": 410
},
{
"epoch": 0.44257112750263433,
"grad_norm": 0.531538188457489,
"learning_rate": 4.2623814541622765e-05,
"loss": 0.276,
"step": 420
},
{
"epoch": 0.45310853530031614,
"grad_norm": 0.6686979532241821,
"learning_rate": 4.2448191078328064e-05,
"loss": 0.283,
"step": 430
},
{
"epoch": 0.4636459430979979,
"grad_norm": 0.5194190144538879,
"learning_rate": 4.227256761503337e-05,
"loss": 0.2636,
"step": 440
},
{
"epoch": 0.47418335089567965,
"grad_norm": 0.6105541586875916,
"learning_rate": 4.2096944151738675e-05,
"loss": 0.2713,
"step": 450
},
{
"epoch": 0.47418335089567965,
"eval_loss": 0.33879777789115906,
"eval_runtime": 13.6666,
"eval_samples_per_second": 35.122,
"eval_steps_per_second": 2.195,
"step": 450
},
{
"epoch": 0.48472075869336145,
"grad_norm": 0.4929138123989105,
"learning_rate": 4.1921320688443974e-05,
"loss": 0.261,
"step": 460
},
{
"epoch": 0.4952581664910432,
"grad_norm": 0.584095299243927,
"learning_rate": 4.174569722514928e-05,
"loss": 0.2714,
"step": 470
},
{
"epoch": 0.505795574288725,
"grad_norm": 0.5386167764663696,
"learning_rate": 4.1570073761854585e-05,
"loss": 0.2855,
"step": 480
},
{
"epoch": 0.5163329820864068,
"grad_norm": 0.5819774270057678,
"learning_rate": 4.139445029855989e-05,
"loss": 0.2911,
"step": 490
},
{
"epoch": 0.5268703898840885,
"grad_norm": 0.5875944495201111,
"learning_rate": 4.12188268352652e-05,
"loss": 0.2619,
"step": 500
},
{
"epoch": 0.5268703898840885,
"eval_loss": 0.3364439010620117,
"eval_runtime": 13.6731,
"eval_samples_per_second": 35.105,
"eval_steps_per_second": 2.194,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 2847,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.975090629863014e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}