winnieyangwannan's picture
Training in progress, step 1100, checkpoint
7e4d5d0 verified
raw
history blame
24.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1591148577449948,
"eval_steps": 50,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01053740779768177,
"grad_norm": 3.070249080657959,
"learning_rate": 4.982437653670531e-05,
"loss": 1.7879,
"step": 10
},
{
"epoch": 0.02107481559536354,
"grad_norm": 1.702326774597168,
"learning_rate": 4.964875307341061e-05,
"loss": 0.5567,
"step": 20
},
{
"epoch": 0.03161222339304531,
"grad_norm": 1.1947294473648071,
"learning_rate": 4.947312961011591e-05,
"loss": 0.4493,
"step": 30
},
{
"epoch": 0.04214963119072708,
"grad_norm": 0.9556658267974854,
"learning_rate": 4.929750614682122e-05,
"loss": 0.3728,
"step": 40
},
{
"epoch": 0.05268703898840885,
"grad_norm": 0.7952510714530945,
"learning_rate": 4.9121882683526524e-05,
"loss": 0.3535,
"step": 50
},
{
"epoch": 0.05268703898840885,
"eval_loss": 0.4311191439628601,
"eval_runtime": 13.6539,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 50
},
{
"epoch": 0.06322444678609063,
"grad_norm": 0.6962826251983643,
"learning_rate": 4.894625922023183e-05,
"loss": 0.3507,
"step": 60
},
{
"epoch": 0.0737618545837724,
"grad_norm": 0.6941961646080017,
"learning_rate": 4.877063575693713e-05,
"loss": 0.3585,
"step": 70
},
{
"epoch": 0.08429926238145416,
"grad_norm": 0.6864392757415771,
"learning_rate": 4.8595012293642434e-05,
"loss": 0.3496,
"step": 80
},
{
"epoch": 0.09483667017913593,
"grad_norm": 0.7322937846183777,
"learning_rate": 4.841938883034774e-05,
"loss": 0.3295,
"step": 90
},
{
"epoch": 0.1053740779768177,
"grad_norm": 0.6921488046646118,
"learning_rate": 4.824376536705304e-05,
"loss": 0.3357,
"step": 100
},
{
"epoch": 0.1053740779768177,
"eval_loss": 0.39120009541511536,
"eval_runtime": 13.7031,
"eval_samples_per_second": 35.029,
"eval_steps_per_second": 2.189,
"step": 100
},
{
"epoch": 0.11591148577449947,
"grad_norm": 0.6553240418434143,
"learning_rate": 4.8068141903758344e-05,
"loss": 0.3105,
"step": 110
},
{
"epoch": 0.12644889357218125,
"grad_norm": 0.5637819170951843,
"learning_rate": 4.789251844046364e-05,
"loss": 0.3164,
"step": 120
},
{
"epoch": 0.136986301369863,
"grad_norm": 0.6341928839683533,
"learning_rate": 4.7716894977168955e-05,
"loss": 0.304,
"step": 130
},
{
"epoch": 0.1475237091675448,
"grad_norm": 0.5917785167694092,
"learning_rate": 4.754127151387426e-05,
"loss": 0.3234,
"step": 140
},
{
"epoch": 0.15806111696522657,
"grad_norm": 0.5884453654289246,
"learning_rate": 4.736564805057956e-05,
"loss": 0.317,
"step": 150
},
{
"epoch": 0.15806111696522657,
"eval_loss": 0.37688738107681274,
"eval_runtime": 13.6535,
"eval_samples_per_second": 35.156,
"eval_steps_per_second": 2.197,
"step": 150
},
{
"epoch": 0.16859852476290832,
"grad_norm": 0.5819964408874512,
"learning_rate": 4.7190024587284866e-05,
"loss": 0.2992,
"step": 160
},
{
"epoch": 0.1791359325605901,
"grad_norm": 0.689468264579773,
"learning_rate": 4.7014401123990165e-05,
"loss": 0.3168,
"step": 170
},
{
"epoch": 0.18967334035827185,
"grad_norm": 0.6950872540473938,
"learning_rate": 4.683877766069547e-05,
"loss": 0.3041,
"step": 180
},
{
"epoch": 0.20021074815595363,
"grad_norm": 0.8322122097015381,
"learning_rate": 4.6663154197400776e-05,
"loss": 0.3028,
"step": 190
},
{
"epoch": 0.2107481559536354,
"grad_norm": 0.5850774645805359,
"learning_rate": 4.6487530734106075e-05,
"loss": 0.2992,
"step": 200
},
{
"epoch": 0.2107481559536354,
"eval_loss": 0.36230018734931946,
"eval_runtime": 13.6165,
"eval_samples_per_second": 35.251,
"eval_steps_per_second": 2.203,
"step": 200
},
{
"epoch": 0.22128556375131717,
"grad_norm": 0.6668715476989746,
"learning_rate": 4.631190727081138e-05,
"loss": 0.2924,
"step": 210
},
{
"epoch": 0.23182297154899895,
"grad_norm": 0.4749641418457031,
"learning_rate": 4.6136283807516686e-05,
"loss": 0.3017,
"step": 220
},
{
"epoch": 0.24236037934668073,
"grad_norm": 0.6381515860557556,
"learning_rate": 4.596066034422199e-05,
"loss": 0.2887,
"step": 230
},
{
"epoch": 0.2528977871443625,
"grad_norm": 0.49952977895736694,
"learning_rate": 4.57850368809273e-05,
"loss": 0.2833,
"step": 240
},
{
"epoch": 0.26343519494204426,
"grad_norm": 0.699518084526062,
"learning_rate": 4.5609413417632596e-05,
"loss": 0.2859,
"step": 250
},
{
"epoch": 0.26343519494204426,
"eval_loss": 0.3563433885574341,
"eval_runtime": 13.6275,
"eval_samples_per_second": 35.223,
"eval_steps_per_second": 2.201,
"step": 250
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.5912085175514221,
"learning_rate": 4.54337899543379e-05,
"loss": 0.302,
"step": 260
},
{
"epoch": 0.2845100105374078,
"grad_norm": 0.6353363990783691,
"learning_rate": 4.525816649104321e-05,
"loss": 0.3112,
"step": 270
},
{
"epoch": 0.2950474183350896,
"grad_norm": 0.5483567118644714,
"learning_rate": 4.5082543027748506e-05,
"loss": 0.2808,
"step": 280
},
{
"epoch": 0.3055848261327713,
"grad_norm": 0.5003193616867065,
"learning_rate": 4.490691956445381e-05,
"loss": 0.3185,
"step": 290
},
{
"epoch": 0.31612223393045313,
"grad_norm": 0.4919240176677704,
"learning_rate": 4.473129610115912e-05,
"loss": 0.2883,
"step": 300
},
{
"epoch": 0.31612223393045313,
"eval_loss": 0.35315924882888794,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 300
},
{
"epoch": 0.3266596417281349,
"grad_norm": 0.5145038366317749,
"learning_rate": 4.455567263786442e-05,
"loss": 0.3084,
"step": 310
},
{
"epoch": 0.33719704952581664,
"grad_norm": 0.6343855261802673,
"learning_rate": 4.438004917456973e-05,
"loss": 0.295,
"step": 320
},
{
"epoch": 0.34773445732349845,
"grad_norm": 0.5336400270462036,
"learning_rate": 4.420442571127503e-05,
"loss": 0.2882,
"step": 330
},
{
"epoch": 0.3582718651211802,
"grad_norm": 0.49547308683395386,
"learning_rate": 4.4028802247980333e-05,
"loss": 0.3084,
"step": 340
},
{
"epoch": 0.36880927291886195,
"grad_norm": 0.5037292242050171,
"learning_rate": 4.385317878468563e-05,
"loss": 0.3084,
"step": 350
},
{
"epoch": 0.36880927291886195,
"eval_loss": 0.34608179330825806,
"eval_runtime": 13.6537,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 350
},
{
"epoch": 0.3793466807165437,
"grad_norm": 0.5956543684005737,
"learning_rate": 4.367755532139094e-05,
"loss": 0.2763,
"step": 360
},
{
"epoch": 0.3898840885142255,
"grad_norm": 0.6263634562492371,
"learning_rate": 4.3501931858096244e-05,
"loss": 0.3025,
"step": 370
},
{
"epoch": 0.40042149631190727,
"grad_norm": 0.4832920730113983,
"learning_rate": 4.332630839480154e-05,
"loss": 0.2792,
"step": 380
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.4969714879989624,
"learning_rate": 4.3150684931506855e-05,
"loss": 0.2704,
"step": 390
},
{
"epoch": 0.4214963119072708,
"grad_norm": 0.568900465965271,
"learning_rate": 4.297506146821216e-05,
"loss": 0.2734,
"step": 400
},
{
"epoch": 0.4214963119072708,
"eval_loss": 0.34151414036750793,
"eval_runtime": 13.664,
"eval_samples_per_second": 35.129,
"eval_steps_per_second": 2.196,
"step": 400
},
{
"epoch": 0.4320337197049526,
"grad_norm": 0.5023282170295715,
"learning_rate": 4.279943800491746e-05,
"loss": 0.2845,
"step": 410
},
{
"epoch": 0.44257112750263433,
"grad_norm": 0.531538188457489,
"learning_rate": 4.2623814541622765e-05,
"loss": 0.276,
"step": 420
},
{
"epoch": 0.45310853530031614,
"grad_norm": 0.6686979532241821,
"learning_rate": 4.2448191078328064e-05,
"loss": 0.283,
"step": 430
},
{
"epoch": 0.4636459430979979,
"grad_norm": 0.5194190144538879,
"learning_rate": 4.227256761503337e-05,
"loss": 0.2636,
"step": 440
},
{
"epoch": 0.47418335089567965,
"grad_norm": 0.6105541586875916,
"learning_rate": 4.2096944151738675e-05,
"loss": 0.2713,
"step": 450
},
{
"epoch": 0.47418335089567965,
"eval_loss": 0.33879777789115906,
"eval_runtime": 13.6666,
"eval_samples_per_second": 35.122,
"eval_steps_per_second": 2.195,
"step": 450
},
{
"epoch": 0.48472075869336145,
"grad_norm": 0.4929138123989105,
"learning_rate": 4.1921320688443974e-05,
"loss": 0.261,
"step": 460
},
{
"epoch": 0.4952581664910432,
"grad_norm": 0.584095299243927,
"learning_rate": 4.174569722514928e-05,
"loss": 0.2714,
"step": 470
},
{
"epoch": 0.505795574288725,
"grad_norm": 0.5386167764663696,
"learning_rate": 4.1570073761854585e-05,
"loss": 0.2855,
"step": 480
},
{
"epoch": 0.5163329820864068,
"grad_norm": 0.5819774270057678,
"learning_rate": 4.139445029855989e-05,
"loss": 0.2911,
"step": 490
},
{
"epoch": 0.5268703898840885,
"grad_norm": 0.5875944495201111,
"learning_rate": 4.12188268352652e-05,
"loss": 0.2619,
"step": 500
},
{
"epoch": 0.5268703898840885,
"eval_loss": 0.3364439010620117,
"eval_runtime": 13.6731,
"eval_samples_per_second": 35.105,
"eval_steps_per_second": 2.194,
"step": 500
},
{
"epoch": 0.5374077976817703,
"grad_norm": 0.6705812811851501,
"learning_rate": 4.1043203371970496e-05,
"loss": 0.2792,
"step": 510
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.5975854396820068,
"learning_rate": 4.08675799086758e-05,
"loss": 0.2611,
"step": 520
},
{
"epoch": 0.5584826132771338,
"grad_norm": 0.5905255675315857,
"learning_rate": 4.069195644538111e-05,
"loss": 0.2688,
"step": 530
},
{
"epoch": 0.5690200210748156,
"grad_norm": 0.6222755312919617,
"learning_rate": 4.0516332982086406e-05,
"loss": 0.2968,
"step": 540
},
{
"epoch": 0.5795574288724974,
"grad_norm": 0.5604814291000366,
"learning_rate": 4.034070951879171e-05,
"loss": 0.2623,
"step": 550
},
{
"epoch": 0.5795574288724974,
"eval_loss": 0.3329848647117615,
"eval_runtime": 13.6573,
"eval_samples_per_second": 35.146,
"eval_steps_per_second": 2.197,
"step": 550
},
{
"epoch": 0.5900948366701791,
"grad_norm": 0.552557647228241,
"learning_rate": 4.016508605549702e-05,
"loss": 0.2579,
"step": 560
},
{
"epoch": 0.6006322444678609,
"grad_norm": 0.7151882648468018,
"learning_rate": 3.998946259220232e-05,
"loss": 0.263,
"step": 570
},
{
"epoch": 0.6111696522655427,
"grad_norm": 0.7065399289131165,
"learning_rate": 3.981383912890763e-05,
"loss": 0.2626,
"step": 580
},
{
"epoch": 0.6217070600632244,
"grad_norm": 0.5590985417366028,
"learning_rate": 3.963821566561293e-05,
"loss": 0.2614,
"step": 590
},
{
"epoch": 0.6322444678609063,
"grad_norm": 0.6018164157867432,
"learning_rate": 3.946259220231823e-05,
"loss": 0.2547,
"step": 600
},
{
"epoch": 0.6322444678609063,
"eval_loss": 0.3322373926639557,
"eval_runtime": 13.6386,
"eval_samples_per_second": 35.194,
"eval_steps_per_second": 2.2,
"step": 600
},
{
"epoch": 0.642781875658588,
"grad_norm": 0.6659051179885864,
"learning_rate": 3.928696873902353e-05,
"loss": 0.2768,
"step": 610
},
{
"epoch": 0.6533192834562698,
"grad_norm": 0.6509780883789062,
"learning_rate": 3.911134527572884e-05,
"loss": 0.272,
"step": 620
},
{
"epoch": 0.6638566912539515,
"grad_norm": 0.5752527713775635,
"learning_rate": 3.893572181243414e-05,
"loss": 0.2625,
"step": 630
},
{
"epoch": 0.6743940990516333,
"grad_norm": 0.4737485349178314,
"learning_rate": 3.876009834913945e-05,
"loss": 0.2668,
"step": 640
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.548322319984436,
"learning_rate": 3.8584474885844754e-05,
"loss": 0.2597,
"step": 650
},
{
"epoch": 0.684931506849315,
"eval_loss": 0.3307412266731262,
"eval_runtime": 13.6407,
"eval_samples_per_second": 35.189,
"eval_steps_per_second": 2.199,
"step": 650
},
{
"epoch": 0.6954689146469969,
"grad_norm": 0.5635099411010742,
"learning_rate": 3.840885142255005e-05,
"loss": 0.2453,
"step": 660
},
{
"epoch": 0.7060063224446786,
"grad_norm": 0.6012313365936279,
"learning_rate": 3.823322795925536e-05,
"loss": 0.2695,
"step": 670
},
{
"epoch": 0.7165437302423604,
"grad_norm": 0.6025511026382446,
"learning_rate": 3.8057604495960664e-05,
"loss": 0.2599,
"step": 680
},
{
"epoch": 0.7270811380400422,
"grad_norm": 0.5996225476264954,
"learning_rate": 3.788198103266596e-05,
"loss": 0.2624,
"step": 690
},
{
"epoch": 0.7376185458377239,
"grad_norm": 0.5269689559936523,
"learning_rate": 3.770635756937127e-05,
"loss": 0.2498,
"step": 700
},
{
"epoch": 0.7376185458377239,
"eval_loss": 0.3312172591686249,
"eval_runtime": 13.6169,
"eval_samples_per_second": 35.25,
"eval_steps_per_second": 2.203,
"step": 700
},
{
"epoch": 0.7481559536354057,
"grad_norm": 0.6115732192993164,
"learning_rate": 3.7530734106076575e-05,
"loss": 0.2566,
"step": 710
},
{
"epoch": 0.7586933614330874,
"grad_norm": 0.4686366319656372,
"learning_rate": 3.7355110642781874e-05,
"loss": 0.2325,
"step": 720
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.5317673087120056,
"learning_rate": 3.717948717948718e-05,
"loss": 0.2429,
"step": 730
},
{
"epoch": 0.779768177028451,
"grad_norm": 0.5183236598968506,
"learning_rate": 3.7003863716192485e-05,
"loss": 0.2562,
"step": 740
},
{
"epoch": 0.7903055848261328,
"grad_norm": 0.5319241881370544,
"learning_rate": 3.682824025289779e-05,
"loss": 0.2356,
"step": 750
},
{
"epoch": 0.7903055848261328,
"eval_loss": 0.3285733163356781,
"eval_runtime": 13.6213,
"eval_samples_per_second": 35.239,
"eval_steps_per_second": 2.202,
"step": 750
},
{
"epoch": 0.8008429926238145,
"grad_norm": 0.5814469456672668,
"learning_rate": 3.6652616789603096e-05,
"loss": 0.2428,
"step": 760
},
{
"epoch": 0.8113804004214963,
"grad_norm": 0.5138055086135864,
"learning_rate": 3.6476993326308395e-05,
"loss": 0.2639,
"step": 770
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.5066888332366943,
"learning_rate": 3.63013698630137e-05,
"loss": 0.2373,
"step": 780
},
{
"epoch": 0.8324552160168599,
"grad_norm": 0.513317883014679,
"learning_rate": 3.6125746399719e-05,
"loss": 0.2462,
"step": 790
},
{
"epoch": 0.8429926238145417,
"grad_norm": 0.5878031253814697,
"learning_rate": 3.5950122936424305e-05,
"loss": 0.2397,
"step": 800
},
{
"epoch": 0.8429926238145417,
"eval_loss": 0.3288480043411255,
"eval_runtime": 13.6316,
"eval_samples_per_second": 35.212,
"eval_steps_per_second": 2.201,
"step": 800
},
{
"epoch": 0.8535300316122234,
"grad_norm": 0.6269211769104004,
"learning_rate": 3.577449947312961e-05,
"loss": 0.2418,
"step": 810
},
{
"epoch": 0.8640674394099052,
"grad_norm": 0.5279048085212708,
"learning_rate": 3.5598876009834916e-05,
"loss": 0.246,
"step": 820
},
{
"epoch": 0.8746048472075869,
"grad_norm": 0.5837969183921814,
"learning_rate": 3.542325254654022e-05,
"loss": 0.256,
"step": 830
},
{
"epoch": 0.8851422550052687,
"grad_norm": 0.5384166240692139,
"learning_rate": 3.524762908324553e-05,
"loss": 0.2412,
"step": 840
},
{
"epoch": 0.8956796628029505,
"grad_norm": 0.5514136552810669,
"learning_rate": 3.507200561995083e-05,
"loss": 0.251,
"step": 850
},
{
"epoch": 0.8956796628029505,
"eval_loss": 0.3275190591812134,
"eval_runtime": 13.654,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 850
},
{
"epoch": 0.9062170706006323,
"grad_norm": 0.5456790328025818,
"learning_rate": 3.489638215665613e-05,
"loss": 0.2337,
"step": 860
},
{
"epoch": 0.916754478398314,
"grad_norm": 0.5794851779937744,
"learning_rate": 3.472075869336143e-05,
"loss": 0.2462,
"step": 870
},
{
"epoch": 0.9272918861959958,
"grad_norm": 0.5296761393547058,
"learning_rate": 3.454513523006674e-05,
"loss": 0.2357,
"step": 880
},
{
"epoch": 0.9378292939936775,
"grad_norm": 0.5668390989303589,
"learning_rate": 3.436951176677204e-05,
"loss": 0.2439,
"step": 890
},
{
"epoch": 0.9483667017913593,
"grad_norm": 0.5026710629463196,
"learning_rate": 3.419388830347735e-05,
"loss": 0.2301,
"step": 900
},
{
"epoch": 0.9483667017913593,
"eval_loss": 0.32623717188835144,
"eval_runtime": 13.6644,
"eval_samples_per_second": 35.128,
"eval_steps_per_second": 2.195,
"step": 900
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.5280706286430359,
"learning_rate": 3.4018264840182654e-05,
"loss": 0.238,
"step": 910
},
{
"epoch": 0.9694415173867229,
"grad_norm": 0.5578323006629944,
"learning_rate": 3.384264137688795e-05,
"loss": 0.2484,
"step": 920
},
{
"epoch": 0.9799789251844047,
"grad_norm": 0.5756859183311462,
"learning_rate": 3.366701791359326e-05,
"loss": 0.2539,
"step": 930
},
{
"epoch": 0.9905163329820864,
"grad_norm": 0.6118686199188232,
"learning_rate": 3.3491394450298564e-05,
"loss": 0.2401,
"step": 940
},
{
"epoch": 1.0010537407797682,
"grad_norm": 0.43512389063835144,
"learning_rate": 3.331577098700386e-05,
"loss": 0.2334,
"step": 950
},
{
"epoch": 1.0010537407797682,
"eval_loss": 0.32700982689857483,
"eval_runtime": 13.6561,
"eval_samples_per_second": 35.149,
"eval_steps_per_second": 2.197,
"step": 950
},
{
"epoch": 1.01159114857745,
"grad_norm": 0.5436535477638245,
"learning_rate": 3.314014752370917e-05,
"loss": 0.1933,
"step": 960
},
{
"epoch": 1.0221285563751317,
"grad_norm": 0.5048521161079407,
"learning_rate": 3.2964524060414474e-05,
"loss": 0.2088,
"step": 970
},
{
"epoch": 1.0326659641728135,
"grad_norm": 0.6231564283370972,
"learning_rate": 3.278890059711977e-05,
"loss": 0.2146,
"step": 980
},
{
"epoch": 1.0432033719704952,
"grad_norm": 0.6846581101417542,
"learning_rate": 3.261327713382508e-05,
"loss": 0.2149,
"step": 990
},
{
"epoch": 1.053740779768177,
"grad_norm": 0.5640810132026672,
"learning_rate": 3.2437653670530384e-05,
"loss": 0.2078,
"step": 1000
},
{
"epoch": 1.053740779768177,
"eval_loss": 0.3363134264945984,
"eval_runtime": 13.6484,
"eval_samples_per_second": 35.169,
"eval_steps_per_second": 2.198,
"step": 1000
},
{
"epoch": 1.064278187565859,
"grad_norm": 0.6279656887054443,
"learning_rate": 3.226203020723569e-05,
"loss": 0.2004,
"step": 1010
},
{
"epoch": 1.0748155953635405,
"grad_norm": 0.5957475304603577,
"learning_rate": 3.2086406743940996e-05,
"loss": 0.1952,
"step": 1020
},
{
"epoch": 1.0853530031612224,
"grad_norm": 0.6246820092201233,
"learning_rate": 3.1910783280646294e-05,
"loss": 0.2071,
"step": 1030
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.5296807885169983,
"learning_rate": 3.17351598173516e-05,
"loss": 0.2077,
"step": 1040
},
{
"epoch": 1.106427818756586,
"grad_norm": 0.6367089152336121,
"learning_rate": 3.15595363540569e-05,
"loss": 0.2087,
"step": 1050
},
{
"epoch": 1.106427818756586,
"eval_loss": 0.3410273492336273,
"eval_runtime": 13.6191,
"eval_samples_per_second": 35.245,
"eval_steps_per_second": 2.203,
"step": 1050
},
{
"epoch": 1.1169652265542676,
"grad_norm": 0.5743587017059326,
"learning_rate": 3.1383912890762205e-05,
"loss": 0.2063,
"step": 1060
},
{
"epoch": 1.1275026343519494,
"grad_norm": 0.5830729603767395,
"learning_rate": 3.120828942746751e-05,
"loss": 0.2021,
"step": 1070
},
{
"epoch": 1.1380400421496313,
"grad_norm": 0.585697591304779,
"learning_rate": 3.1032665964172816e-05,
"loss": 0.2003,
"step": 1080
},
{
"epoch": 1.148577449947313,
"grad_norm": 0.6112616062164307,
"learning_rate": 3.085704250087812e-05,
"loss": 0.2035,
"step": 1090
},
{
"epoch": 1.1591148577449948,
"grad_norm": 0.6068351864814758,
"learning_rate": 3.068141903758342e-05,
"loss": 0.198,
"step": 1100
},
{
"epoch": 1.1591148577449948,
"eval_loss": 0.33999618887901306,
"eval_runtime": 13.5917,
"eval_samples_per_second": 35.316,
"eval_steps_per_second": 2.207,
"step": 1100
}
],
"logging_steps": 10,
"max_steps": 2847,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3194816720283238e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}