winnieyangwannan's picture
Training in progress, step 1700, checkpoint
44c8350 verified
raw
history blame
37.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7913593256059008,
"eval_steps": 50,
"global_step": 1700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01053740779768177,
"grad_norm": 3.070249080657959,
"learning_rate": 4.982437653670531e-05,
"loss": 1.7879,
"step": 10
},
{
"epoch": 0.02107481559536354,
"grad_norm": 1.702326774597168,
"learning_rate": 4.964875307341061e-05,
"loss": 0.5567,
"step": 20
},
{
"epoch": 0.03161222339304531,
"grad_norm": 1.1947294473648071,
"learning_rate": 4.947312961011591e-05,
"loss": 0.4493,
"step": 30
},
{
"epoch": 0.04214963119072708,
"grad_norm": 0.9556658267974854,
"learning_rate": 4.929750614682122e-05,
"loss": 0.3728,
"step": 40
},
{
"epoch": 0.05268703898840885,
"grad_norm": 0.7952510714530945,
"learning_rate": 4.9121882683526524e-05,
"loss": 0.3535,
"step": 50
},
{
"epoch": 0.05268703898840885,
"eval_loss": 0.4311191439628601,
"eval_runtime": 13.6539,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 50
},
{
"epoch": 0.06322444678609063,
"grad_norm": 0.6962826251983643,
"learning_rate": 4.894625922023183e-05,
"loss": 0.3507,
"step": 60
},
{
"epoch": 0.0737618545837724,
"grad_norm": 0.6941961646080017,
"learning_rate": 4.877063575693713e-05,
"loss": 0.3585,
"step": 70
},
{
"epoch": 0.08429926238145416,
"grad_norm": 0.6864392757415771,
"learning_rate": 4.8595012293642434e-05,
"loss": 0.3496,
"step": 80
},
{
"epoch": 0.09483667017913593,
"grad_norm": 0.7322937846183777,
"learning_rate": 4.841938883034774e-05,
"loss": 0.3295,
"step": 90
},
{
"epoch": 0.1053740779768177,
"grad_norm": 0.6921488046646118,
"learning_rate": 4.824376536705304e-05,
"loss": 0.3357,
"step": 100
},
{
"epoch": 0.1053740779768177,
"eval_loss": 0.39120009541511536,
"eval_runtime": 13.7031,
"eval_samples_per_second": 35.029,
"eval_steps_per_second": 2.189,
"step": 100
},
{
"epoch": 0.11591148577449947,
"grad_norm": 0.6553240418434143,
"learning_rate": 4.8068141903758344e-05,
"loss": 0.3105,
"step": 110
},
{
"epoch": 0.12644889357218125,
"grad_norm": 0.5637819170951843,
"learning_rate": 4.789251844046364e-05,
"loss": 0.3164,
"step": 120
},
{
"epoch": 0.136986301369863,
"grad_norm": 0.6341928839683533,
"learning_rate": 4.7716894977168955e-05,
"loss": 0.304,
"step": 130
},
{
"epoch": 0.1475237091675448,
"grad_norm": 0.5917785167694092,
"learning_rate": 4.754127151387426e-05,
"loss": 0.3234,
"step": 140
},
{
"epoch": 0.15806111696522657,
"grad_norm": 0.5884453654289246,
"learning_rate": 4.736564805057956e-05,
"loss": 0.317,
"step": 150
},
{
"epoch": 0.15806111696522657,
"eval_loss": 0.37688738107681274,
"eval_runtime": 13.6535,
"eval_samples_per_second": 35.156,
"eval_steps_per_second": 2.197,
"step": 150
},
{
"epoch": 0.16859852476290832,
"grad_norm": 0.5819964408874512,
"learning_rate": 4.7190024587284866e-05,
"loss": 0.2992,
"step": 160
},
{
"epoch": 0.1791359325605901,
"grad_norm": 0.689468264579773,
"learning_rate": 4.7014401123990165e-05,
"loss": 0.3168,
"step": 170
},
{
"epoch": 0.18967334035827185,
"grad_norm": 0.6950872540473938,
"learning_rate": 4.683877766069547e-05,
"loss": 0.3041,
"step": 180
},
{
"epoch": 0.20021074815595363,
"grad_norm": 0.8322122097015381,
"learning_rate": 4.6663154197400776e-05,
"loss": 0.3028,
"step": 190
},
{
"epoch": 0.2107481559536354,
"grad_norm": 0.5850774645805359,
"learning_rate": 4.6487530734106075e-05,
"loss": 0.2992,
"step": 200
},
{
"epoch": 0.2107481559536354,
"eval_loss": 0.36230018734931946,
"eval_runtime": 13.6165,
"eval_samples_per_second": 35.251,
"eval_steps_per_second": 2.203,
"step": 200
},
{
"epoch": 0.22128556375131717,
"grad_norm": 0.6668715476989746,
"learning_rate": 4.631190727081138e-05,
"loss": 0.2924,
"step": 210
},
{
"epoch": 0.23182297154899895,
"grad_norm": 0.4749641418457031,
"learning_rate": 4.6136283807516686e-05,
"loss": 0.3017,
"step": 220
},
{
"epoch": 0.24236037934668073,
"grad_norm": 0.6381515860557556,
"learning_rate": 4.596066034422199e-05,
"loss": 0.2887,
"step": 230
},
{
"epoch": 0.2528977871443625,
"grad_norm": 0.49952977895736694,
"learning_rate": 4.57850368809273e-05,
"loss": 0.2833,
"step": 240
},
{
"epoch": 0.26343519494204426,
"grad_norm": 0.699518084526062,
"learning_rate": 4.5609413417632596e-05,
"loss": 0.2859,
"step": 250
},
{
"epoch": 0.26343519494204426,
"eval_loss": 0.3563433885574341,
"eval_runtime": 13.6275,
"eval_samples_per_second": 35.223,
"eval_steps_per_second": 2.201,
"step": 250
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.5912085175514221,
"learning_rate": 4.54337899543379e-05,
"loss": 0.302,
"step": 260
},
{
"epoch": 0.2845100105374078,
"grad_norm": 0.6353363990783691,
"learning_rate": 4.525816649104321e-05,
"loss": 0.3112,
"step": 270
},
{
"epoch": 0.2950474183350896,
"grad_norm": 0.5483567118644714,
"learning_rate": 4.5082543027748506e-05,
"loss": 0.2808,
"step": 280
},
{
"epoch": 0.3055848261327713,
"grad_norm": 0.5003193616867065,
"learning_rate": 4.490691956445381e-05,
"loss": 0.3185,
"step": 290
},
{
"epoch": 0.31612223393045313,
"grad_norm": 0.4919240176677704,
"learning_rate": 4.473129610115912e-05,
"loss": 0.2883,
"step": 300
},
{
"epoch": 0.31612223393045313,
"eval_loss": 0.35315924882888794,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 300
},
{
"epoch": 0.3266596417281349,
"grad_norm": 0.5145038366317749,
"learning_rate": 4.455567263786442e-05,
"loss": 0.3084,
"step": 310
},
{
"epoch": 0.33719704952581664,
"grad_norm": 0.6343855261802673,
"learning_rate": 4.438004917456973e-05,
"loss": 0.295,
"step": 320
},
{
"epoch": 0.34773445732349845,
"grad_norm": 0.5336400270462036,
"learning_rate": 4.420442571127503e-05,
"loss": 0.2882,
"step": 330
},
{
"epoch": 0.3582718651211802,
"grad_norm": 0.49547308683395386,
"learning_rate": 4.4028802247980333e-05,
"loss": 0.3084,
"step": 340
},
{
"epoch": 0.36880927291886195,
"grad_norm": 0.5037292242050171,
"learning_rate": 4.385317878468563e-05,
"loss": 0.3084,
"step": 350
},
{
"epoch": 0.36880927291886195,
"eval_loss": 0.34608179330825806,
"eval_runtime": 13.6537,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 350
},
{
"epoch": 0.3793466807165437,
"grad_norm": 0.5956543684005737,
"learning_rate": 4.367755532139094e-05,
"loss": 0.2763,
"step": 360
},
{
"epoch": 0.3898840885142255,
"grad_norm": 0.6263634562492371,
"learning_rate": 4.3501931858096244e-05,
"loss": 0.3025,
"step": 370
},
{
"epoch": 0.40042149631190727,
"grad_norm": 0.4832920730113983,
"learning_rate": 4.332630839480154e-05,
"loss": 0.2792,
"step": 380
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.4969714879989624,
"learning_rate": 4.3150684931506855e-05,
"loss": 0.2704,
"step": 390
},
{
"epoch": 0.4214963119072708,
"grad_norm": 0.568900465965271,
"learning_rate": 4.297506146821216e-05,
"loss": 0.2734,
"step": 400
},
{
"epoch": 0.4214963119072708,
"eval_loss": 0.34151414036750793,
"eval_runtime": 13.664,
"eval_samples_per_second": 35.129,
"eval_steps_per_second": 2.196,
"step": 400
},
{
"epoch": 0.4320337197049526,
"grad_norm": 0.5023282170295715,
"learning_rate": 4.279943800491746e-05,
"loss": 0.2845,
"step": 410
},
{
"epoch": 0.44257112750263433,
"grad_norm": 0.531538188457489,
"learning_rate": 4.2623814541622765e-05,
"loss": 0.276,
"step": 420
},
{
"epoch": 0.45310853530031614,
"grad_norm": 0.6686979532241821,
"learning_rate": 4.2448191078328064e-05,
"loss": 0.283,
"step": 430
},
{
"epoch": 0.4636459430979979,
"grad_norm": 0.5194190144538879,
"learning_rate": 4.227256761503337e-05,
"loss": 0.2636,
"step": 440
},
{
"epoch": 0.47418335089567965,
"grad_norm": 0.6105541586875916,
"learning_rate": 4.2096944151738675e-05,
"loss": 0.2713,
"step": 450
},
{
"epoch": 0.47418335089567965,
"eval_loss": 0.33879777789115906,
"eval_runtime": 13.6666,
"eval_samples_per_second": 35.122,
"eval_steps_per_second": 2.195,
"step": 450
},
{
"epoch": 0.48472075869336145,
"grad_norm": 0.4929138123989105,
"learning_rate": 4.1921320688443974e-05,
"loss": 0.261,
"step": 460
},
{
"epoch": 0.4952581664910432,
"grad_norm": 0.584095299243927,
"learning_rate": 4.174569722514928e-05,
"loss": 0.2714,
"step": 470
},
{
"epoch": 0.505795574288725,
"grad_norm": 0.5386167764663696,
"learning_rate": 4.1570073761854585e-05,
"loss": 0.2855,
"step": 480
},
{
"epoch": 0.5163329820864068,
"grad_norm": 0.5819774270057678,
"learning_rate": 4.139445029855989e-05,
"loss": 0.2911,
"step": 490
},
{
"epoch": 0.5268703898840885,
"grad_norm": 0.5875944495201111,
"learning_rate": 4.12188268352652e-05,
"loss": 0.2619,
"step": 500
},
{
"epoch": 0.5268703898840885,
"eval_loss": 0.3364439010620117,
"eval_runtime": 13.6731,
"eval_samples_per_second": 35.105,
"eval_steps_per_second": 2.194,
"step": 500
},
{
"epoch": 0.5374077976817703,
"grad_norm": 0.6705812811851501,
"learning_rate": 4.1043203371970496e-05,
"loss": 0.2792,
"step": 510
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.5975854396820068,
"learning_rate": 4.08675799086758e-05,
"loss": 0.2611,
"step": 520
},
{
"epoch": 0.5584826132771338,
"grad_norm": 0.5905255675315857,
"learning_rate": 4.069195644538111e-05,
"loss": 0.2688,
"step": 530
},
{
"epoch": 0.5690200210748156,
"grad_norm": 0.6222755312919617,
"learning_rate": 4.0516332982086406e-05,
"loss": 0.2968,
"step": 540
},
{
"epoch": 0.5795574288724974,
"grad_norm": 0.5604814291000366,
"learning_rate": 4.034070951879171e-05,
"loss": 0.2623,
"step": 550
},
{
"epoch": 0.5795574288724974,
"eval_loss": 0.3329848647117615,
"eval_runtime": 13.6573,
"eval_samples_per_second": 35.146,
"eval_steps_per_second": 2.197,
"step": 550
},
{
"epoch": 0.5900948366701791,
"grad_norm": 0.552557647228241,
"learning_rate": 4.016508605549702e-05,
"loss": 0.2579,
"step": 560
},
{
"epoch": 0.6006322444678609,
"grad_norm": 0.7151882648468018,
"learning_rate": 3.998946259220232e-05,
"loss": 0.263,
"step": 570
},
{
"epoch": 0.6111696522655427,
"grad_norm": 0.7065399289131165,
"learning_rate": 3.981383912890763e-05,
"loss": 0.2626,
"step": 580
},
{
"epoch": 0.6217070600632244,
"grad_norm": 0.5590985417366028,
"learning_rate": 3.963821566561293e-05,
"loss": 0.2614,
"step": 590
},
{
"epoch": 0.6322444678609063,
"grad_norm": 0.6018164157867432,
"learning_rate": 3.946259220231823e-05,
"loss": 0.2547,
"step": 600
},
{
"epoch": 0.6322444678609063,
"eval_loss": 0.3322373926639557,
"eval_runtime": 13.6386,
"eval_samples_per_second": 35.194,
"eval_steps_per_second": 2.2,
"step": 600
},
{
"epoch": 0.642781875658588,
"grad_norm": 0.6659051179885864,
"learning_rate": 3.928696873902353e-05,
"loss": 0.2768,
"step": 610
},
{
"epoch": 0.6533192834562698,
"grad_norm": 0.6509780883789062,
"learning_rate": 3.911134527572884e-05,
"loss": 0.272,
"step": 620
},
{
"epoch": 0.6638566912539515,
"grad_norm": 0.5752527713775635,
"learning_rate": 3.893572181243414e-05,
"loss": 0.2625,
"step": 630
},
{
"epoch": 0.6743940990516333,
"grad_norm": 0.4737485349178314,
"learning_rate": 3.876009834913945e-05,
"loss": 0.2668,
"step": 640
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.548322319984436,
"learning_rate": 3.8584474885844754e-05,
"loss": 0.2597,
"step": 650
},
{
"epoch": 0.684931506849315,
"eval_loss": 0.3307412266731262,
"eval_runtime": 13.6407,
"eval_samples_per_second": 35.189,
"eval_steps_per_second": 2.199,
"step": 650
},
{
"epoch": 0.6954689146469969,
"grad_norm": 0.5635099411010742,
"learning_rate": 3.840885142255005e-05,
"loss": 0.2453,
"step": 660
},
{
"epoch": 0.7060063224446786,
"grad_norm": 0.6012313365936279,
"learning_rate": 3.823322795925536e-05,
"loss": 0.2695,
"step": 670
},
{
"epoch": 0.7165437302423604,
"grad_norm": 0.6025511026382446,
"learning_rate": 3.8057604495960664e-05,
"loss": 0.2599,
"step": 680
},
{
"epoch": 0.7270811380400422,
"grad_norm": 0.5996225476264954,
"learning_rate": 3.788198103266596e-05,
"loss": 0.2624,
"step": 690
},
{
"epoch": 0.7376185458377239,
"grad_norm": 0.5269689559936523,
"learning_rate": 3.770635756937127e-05,
"loss": 0.2498,
"step": 700
},
{
"epoch": 0.7376185458377239,
"eval_loss": 0.3312172591686249,
"eval_runtime": 13.6169,
"eval_samples_per_second": 35.25,
"eval_steps_per_second": 2.203,
"step": 700
},
{
"epoch": 0.7481559536354057,
"grad_norm": 0.6115732192993164,
"learning_rate": 3.7530734106076575e-05,
"loss": 0.2566,
"step": 710
},
{
"epoch": 0.7586933614330874,
"grad_norm": 0.4686366319656372,
"learning_rate": 3.7355110642781874e-05,
"loss": 0.2325,
"step": 720
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.5317673087120056,
"learning_rate": 3.717948717948718e-05,
"loss": 0.2429,
"step": 730
},
{
"epoch": 0.779768177028451,
"grad_norm": 0.5183236598968506,
"learning_rate": 3.7003863716192485e-05,
"loss": 0.2562,
"step": 740
},
{
"epoch": 0.7903055848261328,
"grad_norm": 0.5319241881370544,
"learning_rate": 3.682824025289779e-05,
"loss": 0.2356,
"step": 750
},
{
"epoch": 0.7903055848261328,
"eval_loss": 0.3285733163356781,
"eval_runtime": 13.6213,
"eval_samples_per_second": 35.239,
"eval_steps_per_second": 2.202,
"step": 750
},
{
"epoch": 0.8008429926238145,
"grad_norm": 0.5814469456672668,
"learning_rate": 3.6652616789603096e-05,
"loss": 0.2428,
"step": 760
},
{
"epoch": 0.8113804004214963,
"grad_norm": 0.5138055086135864,
"learning_rate": 3.6476993326308395e-05,
"loss": 0.2639,
"step": 770
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.5066888332366943,
"learning_rate": 3.63013698630137e-05,
"loss": 0.2373,
"step": 780
},
{
"epoch": 0.8324552160168599,
"grad_norm": 0.513317883014679,
"learning_rate": 3.6125746399719e-05,
"loss": 0.2462,
"step": 790
},
{
"epoch": 0.8429926238145417,
"grad_norm": 0.5878031253814697,
"learning_rate": 3.5950122936424305e-05,
"loss": 0.2397,
"step": 800
},
{
"epoch": 0.8429926238145417,
"eval_loss": 0.3288480043411255,
"eval_runtime": 13.6316,
"eval_samples_per_second": 35.212,
"eval_steps_per_second": 2.201,
"step": 800
},
{
"epoch": 0.8535300316122234,
"grad_norm": 0.6269211769104004,
"learning_rate": 3.577449947312961e-05,
"loss": 0.2418,
"step": 810
},
{
"epoch": 0.8640674394099052,
"grad_norm": 0.5279048085212708,
"learning_rate": 3.5598876009834916e-05,
"loss": 0.246,
"step": 820
},
{
"epoch": 0.8746048472075869,
"grad_norm": 0.5837969183921814,
"learning_rate": 3.542325254654022e-05,
"loss": 0.256,
"step": 830
},
{
"epoch": 0.8851422550052687,
"grad_norm": 0.5384166240692139,
"learning_rate": 3.524762908324553e-05,
"loss": 0.2412,
"step": 840
},
{
"epoch": 0.8956796628029505,
"grad_norm": 0.5514136552810669,
"learning_rate": 3.507200561995083e-05,
"loss": 0.251,
"step": 850
},
{
"epoch": 0.8956796628029505,
"eval_loss": 0.3275190591812134,
"eval_runtime": 13.654,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 850
},
{
"epoch": 0.9062170706006323,
"grad_norm": 0.5456790328025818,
"learning_rate": 3.489638215665613e-05,
"loss": 0.2337,
"step": 860
},
{
"epoch": 0.916754478398314,
"grad_norm": 0.5794851779937744,
"learning_rate": 3.472075869336143e-05,
"loss": 0.2462,
"step": 870
},
{
"epoch": 0.9272918861959958,
"grad_norm": 0.5296761393547058,
"learning_rate": 3.454513523006674e-05,
"loss": 0.2357,
"step": 880
},
{
"epoch": 0.9378292939936775,
"grad_norm": 0.5668390989303589,
"learning_rate": 3.436951176677204e-05,
"loss": 0.2439,
"step": 890
},
{
"epoch": 0.9483667017913593,
"grad_norm": 0.5026710629463196,
"learning_rate": 3.419388830347735e-05,
"loss": 0.2301,
"step": 900
},
{
"epoch": 0.9483667017913593,
"eval_loss": 0.32623717188835144,
"eval_runtime": 13.6644,
"eval_samples_per_second": 35.128,
"eval_steps_per_second": 2.195,
"step": 900
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.5280706286430359,
"learning_rate": 3.4018264840182654e-05,
"loss": 0.238,
"step": 910
},
{
"epoch": 0.9694415173867229,
"grad_norm": 0.5578323006629944,
"learning_rate": 3.384264137688795e-05,
"loss": 0.2484,
"step": 920
},
{
"epoch": 0.9799789251844047,
"grad_norm": 0.5756859183311462,
"learning_rate": 3.366701791359326e-05,
"loss": 0.2539,
"step": 930
},
{
"epoch": 0.9905163329820864,
"grad_norm": 0.6118686199188232,
"learning_rate": 3.3491394450298564e-05,
"loss": 0.2401,
"step": 940
},
{
"epoch": 1.0010537407797682,
"grad_norm": 0.43512389063835144,
"learning_rate": 3.331577098700386e-05,
"loss": 0.2334,
"step": 950
},
{
"epoch": 1.0010537407797682,
"eval_loss": 0.32700982689857483,
"eval_runtime": 13.6561,
"eval_samples_per_second": 35.149,
"eval_steps_per_second": 2.197,
"step": 950
},
{
"epoch": 1.01159114857745,
"grad_norm": 0.5436535477638245,
"learning_rate": 3.314014752370917e-05,
"loss": 0.1933,
"step": 960
},
{
"epoch": 1.0221285563751317,
"grad_norm": 0.5048521161079407,
"learning_rate": 3.2964524060414474e-05,
"loss": 0.2088,
"step": 970
},
{
"epoch": 1.0326659641728135,
"grad_norm": 0.6231564283370972,
"learning_rate": 3.278890059711977e-05,
"loss": 0.2146,
"step": 980
},
{
"epoch": 1.0432033719704952,
"grad_norm": 0.6846581101417542,
"learning_rate": 3.261327713382508e-05,
"loss": 0.2149,
"step": 990
},
{
"epoch": 1.053740779768177,
"grad_norm": 0.5640810132026672,
"learning_rate": 3.2437653670530384e-05,
"loss": 0.2078,
"step": 1000
},
{
"epoch": 1.053740779768177,
"eval_loss": 0.3363134264945984,
"eval_runtime": 13.6484,
"eval_samples_per_second": 35.169,
"eval_steps_per_second": 2.198,
"step": 1000
},
{
"epoch": 1.064278187565859,
"grad_norm": 0.6279656887054443,
"learning_rate": 3.226203020723569e-05,
"loss": 0.2004,
"step": 1010
},
{
"epoch": 1.0748155953635405,
"grad_norm": 0.5957475304603577,
"learning_rate": 3.2086406743940996e-05,
"loss": 0.1952,
"step": 1020
},
{
"epoch": 1.0853530031612224,
"grad_norm": 0.6246820092201233,
"learning_rate": 3.1910783280646294e-05,
"loss": 0.2071,
"step": 1030
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.5296807885169983,
"learning_rate": 3.17351598173516e-05,
"loss": 0.2077,
"step": 1040
},
{
"epoch": 1.106427818756586,
"grad_norm": 0.6367089152336121,
"learning_rate": 3.15595363540569e-05,
"loss": 0.2087,
"step": 1050
},
{
"epoch": 1.106427818756586,
"eval_loss": 0.3410273492336273,
"eval_runtime": 13.6191,
"eval_samples_per_second": 35.245,
"eval_steps_per_second": 2.203,
"step": 1050
},
{
"epoch": 1.1169652265542676,
"grad_norm": 0.5743587017059326,
"learning_rate": 3.1383912890762205e-05,
"loss": 0.2063,
"step": 1060
},
{
"epoch": 1.1275026343519494,
"grad_norm": 0.5830729603767395,
"learning_rate": 3.120828942746751e-05,
"loss": 0.2021,
"step": 1070
},
{
"epoch": 1.1380400421496313,
"grad_norm": 0.585697591304779,
"learning_rate": 3.1032665964172816e-05,
"loss": 0.2003,
"step": 1080
},
{
"epoch": 1.148577449947313,
"grad_norm": 0.6112616062164307,
"learning_rate": 3.085704250087812e-05,
"loss": 0.2035,
"step": 1090
},
{
"epoch": 1.1591148577449948,
"grad_norm": 0.6068351864814758,
"learning_rate": 3.068141903758342e-05,
"loss": 0.198,
"step": 1100
},
{
"epoch": 1.1591148577449948,
"eval_loss": 0.33999618887901306,
"eval_runtime": 13.5917,
"eval_samples_per_second": 35.316,
"eval_steps_per_second": 2.207,
"step": 1100
},
{
"epoch": 1.1696522655426764,
"grad_norm": 0.6323215961456299,
"learning_rate": 3.0505795574288726e-05,
"loss": 0.204,
"step": 1110
},
{
"epoch": 1.1801896733403583,
"grad_norm": 0.6767095923423767,
"learning_rate": 3.0330172110994032e-05,
"loss": 0.2,
"step": 1120
},
{
"epoch": 1.1907270811380402,
"grad_norm": 0.6808314919471741,
"learning_rate": 3.015454864769933e-05,
"loss": 0.2052,
"step": 1130
},
{
"epoch": 1.2012644889357218,
"grad_norm": 0.7271966934204102,
"learning_rate": 2.997892518440464e-05,
"loss": 0.2035,
"step": 1140
},
{
"epoch": 1.2118018967334037,
"grad_norm": 0.5914655327796936,
"learning_rate": 2.9803301721109945e-05,
"loss": 0.1999,
"step": 1150
},
{
"epoch": 1.2118018967334037,
"eval_loss": 0.3386387825012207,
"eval_runtime": 13.6001,
"eval_samples_per_second": 35.294,
"eval_steps_per_second": 2.206,
"step": 1150
},
{
"epoch": 1.2223393045310853,
"grad_norm": 0.6497891545295715,
"learning_rate": 2.9627678257815244e-05,
"loss": 0.2059,
"step": 1160
},
{
"epoch": 1.2328767123287672,
"grad_norm": 0.6975721120834351,
"learning_rate": 2.945205479452055e-05,
"loss": 0.1968,
"step": 1170
},
{
"epoch": 1.2434141201264488,
"grad_norm": 0.6115730404853821,
"learning_rate": 2.9276431331225852e-05,
"loss": 0.1983,
"step": 1180
},
{
"epoch": 1.2539515279241307,
"grad_norm": 0.6213293075561523,
"learning_rate": 2.9100807867931158e-05,
"loss": 0.2077,
"step": 1190
},
{
"epoch": 1.2644889357218125,
"grad_norm": 0.6509750485420227,
"learning_rate": 2.8925184404636463e-05,
"loss": 0.194,
"step": 1200
},
{
"epoch": 1.2644889357218125,
"eval_loss": 0.34055641293525696,
"eval_runtime": 13.997,
"eval_samples_per_second": 34.293,
"eval_steps_per_second": 2.143,
"step": 1200
},
{
"epoch": 1.2750263435194942,
"grad_norm": 0.4847603142261505,
"learning_rate": 2.8749560941341762e-05,
"loss": 0.1976,
"step": 1210
},
{
"epoch": 1.285563751317176,
"grad_norm": 0.6689492464065552,
"learning_rate": 2.8573937478047068e-05,
"loss": 0.19,
"step": 1220
},
{
"epoch": 1.2961011591148577,
"grad_norm": 0.6299355626106262,
"learning_rate": 2.839831401475237e-05,
"loss": 0.2025,
"step": 1230
},
{
"epoch": 1.3066385669125395,
"grad_norm": 0.5940089821815491,
"learning_rate": 2.8222690551457676e-05,
"loss": 0.1917,
"step": 1240
},
{
"epoch": 1.3171759747102212,
"grad_norm": 0.8054115772247314,
"learning_rate": 2.804706708816298e-05,
"loss": 0.1928,
"step": 1250
},
{
"epoch": 1.3171759747102212,
"eval_loss": 0.34280383586883545,
"eval_runtime": 13.6317,
"eval_samples_per_second": 35.212,
"eval_steps_per_second": 2.201,
"step": 1250
},
{
"epoch": 1.327713382507903,
"grad_norm": 0.5501131415367126,
"learning_rate": 2.7871443624868284e-05,
"loss": 0.1933,
"step": 1260
},
{
"epoch": 1.338250790305585,
"grad_norm": 0.6450643539428711,
"learning_rate": 2.769582016157359e-05,
"loss": 0.1994,
"step": 1270
},
{
"epoch": 1.3487881981032666,
"grad_norm": 0.6347033381462097,
"learning_rate": 2.7520196698278895e-05,
"loss": 0.2004,
"step": 1280
},
{
"epoch": 1.3593256059009484,
"grad_norm": 0.6968681216239929,
"learning_rate": 2.7344573234984194e-05,
"loss": 0.214,
"step": 1290
},
{
"epoch": 1.36986301369863,
"grad_norm": 0.5916845202445984,
"learning_rate": 2.71689497716895e-05,
"loss": 0.1849,
"step": 1300
},
{
"epoch": 1.36986301369863,
"eval_loss": 0.34068360924720764,
"eval_runtime": 13.6631,
"eval_samples_per_second": 35.131,
"eval_steps_per_second": 2.196,
"step": 1300
},
{
"epoch": 1.380400421496312,
"grad_norm": 0.7653020024299622,
"learning_rate": 2.6993326308394802e-05,
"loss": 0.1917,
"step": 1310
},
{
"epoch": 1.3909378292939936,
"grad_norm": 0.6557948589324951,
"learning_rate": 2.6817702845100107e-05,
"loss": 0.2008,
"step": 1320
},
{
"epoch": 1.4014752370916754,
"grad_norm": 0.57426518201828,
"learning_rate": 2.6642079381805413e-05,
"loss": 0.1889,
"step": 1330
},
{
"epoch": 1.4120126448893573,
"grad_norm": 0.6126993298530579,
"learning_rate": 2.6466455918510712e-05,
"loss": 0.1995,
"step": 1340
},
{
"epoch": 1.422550052687039,
"grad_norm": 0.6700727343559265,
"learning_rate": 2.6290832455216018e-05,
"loss": 0.1898,
"step": 1350
},
{
"epoch": 1.422550052687039,
"eval_loss": 0.3482060432434082,
"eval_runtime": 13.6643,
"eval_samples_per_second": 35.128,
"eval_steps_per_second": 2.195,
"step": 1350
},
{
"epoch": 1.4330874604847208,
"grad_norm": 0.6523577570915222,
"learning_rate": 2.611520899192132e-05,
"loss": 0.1977,
"step": 1360
},
{
"epoch": 1.4436248682824027,
"grad_norm": 0.6040759682655334,
"learning_rate": 2.5939585528626625e-05,
"loss": 0.1833,
"step": 1370
},
{
"epoch": 1.4541622760800843,
"grad_norm": 0.6998751163482666,
"learning_rate": 2.576396206533193e-05,
"loss": 0.1917,
"step": 1380
},
{
"epoch": 1.464699683877766,
"grad_norm": 0.7758492231369019,
"learning_rate": 2.5588338602037233e-05,
"loss": 0.192,
"step": 1390
},
{
"epoch": 1.4752370916754478,
"grad_norm": 0.708914577960968,
"learning_rate": 2.541271513874254e-05,
"loss": 0.1853,
"step": 1400
},
{
"epoch": 1.4752370916754478,
"eval_loss": 0.3444618582725525,
"eval_runtime": 13.653,
"eval_samples_per_second": 35.157,
"eval_steps_per_second": 2.197,
"step": 1400
},
{
"epoch": 1.4857744994731297,
"grad_norm": 0.6209197640419006,
"learning_rate": 2.5237091675447845e-05,
"loss": 0.1826,
"step": 1410
},
{
"epoch": 1.4963119072708113,
"grad_norm": 0.714142918586731,
"learning_rate": 2.5061468212153144e-05,
"loss": 0.1921,
"step": 1420
},
{
"epoch": 1.5068493150684932,
"grad_norm": 0.6723175644874573,
"learning_rate": 2.4885844748858446e-05,
"loss": 0.2006,
"step": 1430
},
{
"epoch": 1.517386722866175,
"grad_norm": 0.8485958576202393,
"learning_rate": 2.4710221285563755e-05,
"loss": 0.2032,
"step": 1440
},
{
"epoch": 1.5279241306638567,
"grad_norm": 0.6563605666160583,
"learning_rate": 2.4534597822269057e-05,
"loss": 0.1951,
"step": 1450
},
{
"epoch": 1.5279241306638567,
"eval_loss": 0.34677523374557495,
"eval_runtime": 13.6421,
"eval_samples_per_second": 35.185,
"eval_steps_per_second": 2.199,
"step": 1450
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.6512174010276794,
"learning_rate": 2.435897435897436e-05,
"loss": 0.1946,
"step": 1460
},
{
"epoch": 1.5489989462592202,
"grad_norm": 0.8586357831954956,
"learning_rate": 2.418335089567966e-05,
"loss": 0.1818,
"step": 1470
},
{
"epoch": 1.559536354056902,
"grad_norm": 0.6348748803138733,
"learning_rate": 2.4007727432384967e-05,
"loss": 0.19,
"step": 1480
},
{
"epoch": 1.5700737618545837,
"grad_norm": 0.6497516632080078,
"learning_rate": 2.3832103969090273e-05,
"loss": 0.1901,
"step": 1490
},
{
"epoch": 1.5806111696522656,
"grad_norm": 0.5382278561592102,
"learning_rate": 2.3656480505795575e-05,
"loss": 0.1877,
"step": 1500
},
{
"epoch": 1.5806111696522656,
"eval_loss": 0.35011863708496094,
"eval_runtime": 13.6377,
"eval_samples_per_second": 35.196,
"eval_steps_per_second": 2.2,
"step": 1500
},
{
"epoch": 1.5911485774499474,
"grad_norm": 0.5429180860519409,
"learning_rate": 2.3480857042500877e-05,
"loss": 0.1961,
"step": 1510
},
{
"epoch": 1.601685985247629,
"grad_norm": 0.6648889183998108,
"learning_rate": 2.3305233579206183e-05,
"loss": 0.1934,
"step": 1520
},
{
"epoch": 1.6122233930453107,
"grad_norm": 0.6552326679229736,
"learning_rate": 2.312961011591149e-05,
"loss": 0.1928,
"step": 1530
},
{
"epoch": 1.6227608008429928,
"grad_norm": 0.6863034963607788,
"learning_rate": 2.295398665261679e-05,
"loss": 0.1956,
"step": 1540
},
{
"epoch": 1.6332982086406744,
"grad_norm": 0.8960068225860596,
"learning_rate": 2.2778363189322093e-05,
"loss": 0.203,
"step": 1550
},
{
"epoch": 1.6332982086406744,
"eval_loss": 0.3496212661266327,
"eval_runtime": 13.629,
"eval_samples_per_second": 35.219,
"eval_steps_per_second": 2.201,
"step": 1550
},
{
"epoch": 1.643835616438356,
"grad_norm": 0.7601160407066345,
"learning_rate": 2.2602739726027396e-05,
"loss": 0.1934,
"step": 1560
},
{
"epoch": 1.654373024236038,
"grad_norm": 0.9118824601173401,
"learning_rate": 2.2427116262732705e-05,
"loss": 0.1861,
"step": 1570
},
{
"epoch": 1.6649104320337198,
"grad_norm": 0.8418806195259094,
"learning_rate": 2.2251492799438007e-05,
"loss": 0.1794,
"step": 1580
},
{
"epoch": 1.6754478398314014,
"grad_norm": 0.7187584042549133,
"learning_rate": 2.207586933614331e-05,
"loss": 0.1878,
"step": 1590
},
{
"epoch": 1.685985247629083,
"grad_norm": 0.7605792284011841,
"learning_rate": 2.190024587284861e-05,
"loss": 0.1868,
"step": 1600
},
{
"epoch": 1.685985247629083,
"eval_loss": 0.3536493182182312,
"eval_runtime": 13.6104,
"eval_samples_per_second": 35.267,
"eval_steps_per_second": 2.204,
"step": 1600
},
{
"epoch": 1.6965226554267652,
"grad_norm": 0.6814613342285156,
"learning_rate": 2.1724622409553917e-05,
"loss": 0.1851,
"step": 1610
},
{
"epoch": 1.7070600632244468,
"grad_norm": 0.6218210458755493,
"learning_rate": 2.1548998946259223e-05,
"loss": 0.1921,
"step": 1620
},
{
"epoch": 1.7175974710221285,
"grad_norm": 0.9718311429023743,
"learning_rate": 2.1373375482964525e-05,
"loss": 0.1844,
"step": 1630
},
{
"epoch": 1.7281348788198103,
"grad_norm": 0.6959328055381775,
"learning_rate": 2.1197752019669827e-05,
"loss": 0.1802,
"step": 1640
},
{
"epoch": 1.7386722866174922,
"grad_norm": 0.6459183096885681,
"learning_rate": 2.1022128556375133e-05,
"loss": 0.1801,
"step": 1650
},
{
"epoch": 1.7386722866174922,
"eval_loss": 0.3579024374485016,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 1650
},
{
"epoch": 1.7492096944151738,
"grad_norm": 0.8063459396362305,
"learning_rate": 2.084650509308044e-05,
"loss": 0.1793,
"step": 1660
},
{
"epoch": 1.7597471022128557,
"grad_norm": 0.7973386645317078,
"learning_rate": 2.067088162978574e-05,
"loss": 0.1851,
"step": 1670
},
{
"epoch": 1.7702845100105375,
"grad_norm": 0.6851320862770081,
"learning_rate": 2.0495258166491043e-05,
"loss": 0.1745,
"step": 1680
},
{
"epoch": 1.7808219178082192,
"grad_norm": 0.6314299702644348,
"learning_rate": 2.0319634703196345e-05,
"loss": 0.1891,
"step": 1690
},
{
"epoch": 1.7913593256059008,
"grad_norm": 0.7445196509361267,
"learning_rate": 2.0144011239901654e-05,
"loss": 0.173,
"step": 1700
},
{
"epoch": 1.7913593256059008,
"eval_loss": 0.3595808744430542,
"eval_runtime": 13.6356,
"eval_samples_per_second": 35.202,
"eval_steps_per_second": 2.2,
"step": 1700
}
],
"logging_steps": 10,
"max_steps": 2847,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0400034712213914e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}