winnieyangwannan's picture
Training in progress, step 2600, checkpoint
aca5958 verified
raw
history blame
57.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.73972602739726,
"eval_steps": 50,
"global_step": 2600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01053740779768177,
"grad_norm": 3.070249080657959,
"learning_rate": 4.982437653670531e-05,
"loss": 1.7879,
"step": 10
},
{
"epoch": 0.02107481559536354,
"grad_norm": 1.702326774597168,
"learning_rate": 4.964875307341061e-05,
"loss": 0.5567,
"step": 20
},
{
"epoch": 0.03161222339304531,
"grad_norm": 1.1947294473648071,
"learning_rate": 4.947312961011591e-05,
"loss": 0.4493,
"step": 30
},
{
"epoch": 0.04214963119072708,
"grad_norm": 0.9556658267974854,
"learning_rate": 4.929750614682122e-05,
"loss": 0.3728,
"step": 40
},
{
"epoch": 0.05268703898840885,
"grad_norm": 0.7952510714530945,
"learning_rate": 4.9121882683526524e-05,
"loss": 0.3535,
"step": 50
},
{
"epoch": 0.05268703898840885,
"eval_loss": 0.4311191439628601,
"eval_runtime": 13.6539,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 50
},
{
"epoch": 0.06322444678609063,
"grad_norm": 0.6962826251983643,
"learning_rate": 4.894625922023183e-05,
"loss": 0.3507,
"step": 60
},
{
"epoch": 0.0737618545837724,
"grad_norm": 0.6941961646080017,
"learning_rate": 4.877063575693713e-05,
"loss": 0.3585,
"step": 70
},
{
"epoch": 0.08429926238145416,
"grad_norm": 0.6864392757415771,
"learning_rate": 4.8595012293642434e-05,
"loss": 0.3496,
"step": 80
},
{
"epoch": 0.09483667017913593,
"grad_norm": 0.7322937846183777,
"learning_rate": 4.841938883034774e-05,
"loss": 0.3295,
"step": 90
},
{
"epoch": 0.1053740779768177,
"grad_norm": 0.6921488046646118,
"learning_rate": 4.824376536705304e-05,
"loss": 0.3357,
"step": 100
},
{
"epoch": 0.1053740779768177,
"eval_loss": 0.39120009541511536,
"eval_runtime": 13.7031,
"eval_samples_per_second": 35.029,
"eval_steps_per_second": 2.189,
"step": 100
},
{
"epoch": 0.11591148577449947,
"grad_norm": 0.6553240418434143,
"learning_rate": 4.8068141903758344e-05,
"loss": 0.3105,
"step": 110
},
{
"epoch": 0.12644889357218125,
"grad_norm": 0.5637819170951843,
"learning_rate": 4.789251844046364e-05,
"loss": 0.3164,
"step": 120
},
{
"epoch": 0.136986301369863,
"grad_norm": 0.6341928839683533,
"learning_rate": 4.7716894977168955e-05,
"loss": 0.304,
"step": 130
},
{
"epoch": 0.1475237091675448,
"grad_norm": 0.5917785167694092,
"learning_rate": 4.754127151387426e-05,
"loss": 0.3234,
"step": 140
},
{
"epoch": 0.15806111696522657,
"grad_norm": 0.5884453654289246,
"learning_rate": 4.736564805057956e-05,
"loss": 0.317,
"step": 150
},
{
"epoch": 0.15806111696522657,
"eval_loss": 0.37688738107681274,
"eval_runtime": 13.6535,
"eval_samples_per_second": 35.156,
"eval_steps_per_second": 2.197,
"step": 150
},
{
"epoch": 0.16859852476290832,
"grad_norm": 0.5819964408874512,
"learning_rate": 4.7190024587284866e-05,
"loss": 0.2992,
"step": 160
},
{
"epoch": 0.1791359325605901,
"grad_norm": 0.689468264579773,
"learning_rate": 4.7014401123990165e-05,
"loss": 0.3168,
"step": 170
},
{
"epoch": 0.18967334035827185,
"grad_norm": 0.6950872540473938,
"learning_rate": 4.683877766069547e-05,
"loss": 0.3041,
"step": 180
},
{
"epoch": 0.20021074815595363,
"grad_norm": 0.8322122097015381,
"learning_rate": 4.6663154197400776e-05,
"loss": 0.3028,
"step": 190
},
{
"epoch": 0.2107481559536354,
"grad_norm": 0.5850774645805359,
"learning_rate": 4.6487530734106075e-05,
"loss": 0.2992,
"step": 200
},
{
"epoch": 0.2107481559536354,
"eval_loss": 0.36230018734931946,
"eval_runtime": 13.6165,
"eval_samples_per_second": 35.251,
"eval_steps_per_second": 2.203,
"step": 200
},
{
"epoch": 0.22128556375131717,
"grad_norm": 0.6668715476989746,
"learning_rate": 4.631190727081138e-05,
"loss": 0.2924,
"step": 210
},
{
"epoch": 0.23182297154899895,
"grad_norm": 0.4749641418457031,
"learning_rate": 4.6136283807516686e-05,
"loss": 0.3017,
"step": 220
},
{
"epoch": 0.24236037934668073,
"grad_norm": 0.6381515860557556,
"learning_rate": 4.596066034422199e-05,
"loss": 0.2887,
"step": 230
},
{
"epoch": 0.2528977871443625,
"grad_norm": 0.49952977895736694,
"learning_rate": 4.57850368809273e-05,
"loss": 0.2833,
"step": 240
},
{
"epoch": 0.26343519494204426,
"grad_norm": 0.699518084526062,
"learning_rate": 4.5609413417632596e-05,
"loss": 0.2859,
"step": 250
},
{
"epoch": 0.26343519494204426,
"eval_loss": 0.3563433885574341,
"eval_runtime": 13.6275,
"eval_samples_per_second": 35.223,
"eval_steps_per_second": 2.201,
"step": 250
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.5912085175514221,
"learning_rate": 4.54337899543379e-05,
"loss": 0.302,
"step": 260
},
{
"epoch": 0.2845100105374078,
"grad_norm": 0.6353363990783691,
"learning_rate": 4.525816649104321e-05,
"loss": 0.3112,
"step": 270
},
{
"epoch": 0.2950474183350896,
"grad_norm": 0.5483567118644714,
"learning_rate": 4.5082543027748506e-05,
"loss": 0.2808,
"step": 280
},
{
"epoch": 0.3055848261327713,
"grad_norm": 0.5003193616867065,
"learning_rate": 4.490691956445381e-05,
"loss": 0.3185,
"step": 290
},
{
"epoch": 0.31612223393045313,
"grad_norm": 0.4919240176677704,
"learning_rate": 4.473129610115912e-05,
"loss": 0.2883,
"step": 300
},
{
"epoch": 0.31612223393045313,
"eval_loss": 0.35315924882888794,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 300
},
{
"epoch": 0.3266596417281349,
"grad_norm": 0.5145038366317749,
"learning_rate": 4.455567263786442e-05,
"loss": 0.3084,
"step": 310
},
{
"epoch": 0.33719704952581664,
"grad_norm": 0.6343855261802673,
"learning_rate": 4.438004917456973e-05,
"loss": 0.295,
"step": 320
},
{
"epoch": 0.34773445732349845,
"grad_norm": 0.5336400270462036,
"learning_rate": 4.420442571127503e-05,
"loss": 0.2882,
"step": 330
},
{
"epoch": 0.3582718651211802,
"grad_norm": 0.49547308683395386,
"learning_rate": 4.4028802247980333e-05,
"loss": 0.3084,
"step": 340
},
{
"epoch": 0.36880927291886195,
"grad_norm": 0.5037292242050171,
"learning_rate": 4.385317878468563e-05,
"loss": 0.3084,
"step": 350
},
{
"epoch": 0.36880927291886195,
"eval_loss": 0.34608179330825806,
"eval_runtime": 13.6537,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 350
},
{
"epoch": 0.3793466807165437,
"grad_norm": 0.5956543684005737,
"learning_rate": 4.367755532139094e-05,
"loss": 0.2763,
"step": 360
},
{
"epoch": 0.3898840885142255,
"grad_norm": 0.6263634562492371,
"learning_rate": 4.3501931858096244e-05,
"loss": 0.3025,
"step": 370
},
{
"epoch": 0.40042149631190727,
"grad_norm": 0.4832920730113983,
"learning_rate": 4.332630839480154e-05,
"loss": 0.2792,
"step": 380
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.4969714879989624,
"learning_rate": 4.3150684931506855e-05,
"loss": 0.2704,
"step": 390
},
{
"epoch": 0.4214963119072708,
"grad_norm": 0.568900465965271,
"learning_rate": 4.297506146821216e-05,
"loss": 0.2734,
"step": 400
},
{
"epoch": 0.4214963119072708,
"eval_loss": 0.34151414036750793,
"eval_runtime": 13.664,
"eval_samples_per_second": 35.129,
"eval_steps_per_second": 2.196,
"step": 400
},
{
"epoch": 0.4320337197049526,
"grad_norm": 0.5023282170295715,
"learning_rate": 4.279943800491746e-05,
"loss": 0.2845,
"step": 410
},
{
"epoch": 0.44257112750263433,
"grad_norm": 0.531538188457489,
"learning_rate": 4.2623814541622765e-05,
"loss": 0.276,
"step": 420
},
{
"epoch": 0.45310853530031614,
"grad_norm": 0.6686979532241821,
"learning_rate": 4.2448191078328064e-05,
"loss": 0.283,
"step": 430
},
{
"epoch": 0.4636459430979979,
"grad_norm": 0.5194190144538879,
"learning_rate": 4.227256761503337e-05,
"loss": 0.2636,
"step": 440
},
{
"epoch": 0.47418335089567965,
"grad_norm": 0.6105541586875916,
"learning_rate": 4.2096944151738675e-05,
"loss": 0.2713,
"step": 450
},
{
"epoch": 0.47418335089567965,
"eval_loss": 0.33879777789115906,
"eval_runtime": 13.6666,
"eval_samples_per_second": 35.122,
"eval_steps_per_second": 2.195,
"step": 450
},
{
"epoch": 0.48472075869336145,
"grad_norm": 0.4929138123989105,
"learning_rate": 4.1921320688443974e-05,
"loss": 0.261,
"step": 460
},
{
"epoch": 0.4952581664910432,
"grad_norm": 0.584095299243927,
"learning_rate": 4.174569722514928e-05,
"loss": 0.2714,
"step": 470
},
{
"epoch": 0.505795574288725,
"grad_norm": 0.5386167764663696,
"learning_rate": 4.1570073761854585e-05,
"loss": 0.2855,
"step": 480
},
{
"epoch": 0.5163329820864068,
"grad_norm": 0.5819774270057678,
"learning_rate": 4.139445029855989e-05,
"loss": 0.2911,
"step": 490
},
{
"epoch": 0.5268703898840885,
"grad_norm": 0.5875944495201111,
"learning_rate": 4.12188268352652e-05,
"loss": 0.2619,
"step": 500
},
{
"epoch": 0.5268703898840885,
"eval_loss": 0.3364439010620117,
"eval_runtime": 13.6731,
"eval_samples_per_second": 35.105,
"eval_steps_per_second": 2.194,
"step": 500
},
{
"epoch": 0.5374077976817703,
"grad_norm": 0.6705812811851501,
"learning_rate": 4.1043203371970496e-05,
"loss": 0.2792,
"step": 510
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.5975854396820068,
"learning_rate": 4.08675799086758e-05,
"loss": 0.2611,
"step": 520
},
{
"epoch": 0.5584826132771338,
"grad_norm": 0.5905255675315857,
"learning_rate": 4.069195644538111e-05,
"loss": 0.2688,
"step": 530
},
{
"epoch": 0.5690200210748156,
"grad_norm": 0.6222755312919617,
"learning_rate": 4.0516332982086406e-05,
"loss": 0.2968,
"step": 540
},
{
"epoch": 0.5795574288724974,
"grad_norm": 0.5604814291000366,
"learning_rate": 4.034070951879171e-05,
"loss": 0.2623,
"step": 550
},
{
"epoch": 0.5795574288724974,
"eval_loss": 0.3329848647117615,
"eval_runtime": 13.6573,
"eval_samples_per_second": 35.146,
"eval_steps_per_second": 2.197,
"step": 550
},
{
"epoch": 0.5900948366701791,
"grad_norm": 0.552557647228241,
"learning_rate": 4.016508605549702e-05,
"loss": 0.2579,
"step": 560
},
{
"epoch": 0.6006322444678609,
"grad_norm": 0.7151882648468018,
"learning_rate": 3.998946259220232e-05,
"loss": 0.263,
"step": 570
},
{
"epoch": 0.6111696522655427,
"grad_norm": 0.7065399289131165,
"learning_rate": 3.981383912890763e-05,
"loss": 0.2626,
"step": 580
},
{
"epoch": 0.6217070600632244,
"grad_norm": 0.5590985417366028,
"learning_rate": 3.963821566561293e-05,
"loss": 0.2614,
"step": 590
},
{
"epoch": 0.6322444678609063,
"grad_norm": 0.6018164157867432,
"learning_rate": 3.946259220231823e-05,
"loss": 0.2547,
"step": 600
},
{
"epoch": 0.6322444678609063,
"eval_loss": 0.3322373926639557,
"eval_runtime": 13.6386,
"eval_samples_per_second": 35.194,
"eval_steps_per_second": 2.2,
"step": 600
},
{
"epoch": 0.642781875658588,
"grad_norm": 0.6659051179885864,
"learning_rate": 3.928696873902353e-05,
"loss": 0.2768,
"step": 610
},
{
"epoch": 0.6533192834562698,
"grad_norm": 0.6509780883789062,
"learning_rate": 3.911134527572884e-05,
"loss": 0.272,
"step": 620
},
{
"epoch": 0.6638566912539515,
"grad_norm": 0.5752527713775635,
"learning_rate": 3.893572181243414e-05,
"loss": 0.2625,
"step": 630
},
{
"epoch": 0.6743940990516333,
"grad_norm": 0.4737485349178314,
"learning_rate": 3.876009834913945e-05,
"loss": 0.2668,
"step": 640
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.548322319984436,
"learning_rate": 3.8584474885844754e-05,
"loss": 0.2597,
"step": 650
},
{
"epoch": 0.684931506849315,
"eval_loss": 0.3307412266731262,
"eval_runtime": 13.6407,
"eval_samples_per_second": 35.189,
"eval_steps_per_second": 2.199,
"step": 650
},
{
"epoch": 0.6954689146469969,
"grad_norm": 0.5635099411010742,
"learning_rate": 3.840885142255005e-05,
"loss": 0.2453,
"step": 660
},
{
"epoch": 0.7060063224446786,
"grad_norm": 0.6012313365936279,
"learning_rate": 3.823322795925536e-05,
"loss": 0.2695,
"step": 670
},
{
"epoch": 0.7165437302423604,
"grad_norm": 0.6025511026382446,
"learning_rate": 3.8057604495960664e-05,
"loss": 0.2599,
"step": 680
},
{
"epoch": 0.7270811380400422,
"grad_norm": 0.5996225476264954,
"learning_rate": 3.788198103266596e-05,
"loss": 0.2624,
"step": 690
},
{
"epoch": 0.7376185458377239,
"grad_norm": 0.5269689559936523,
"learning_rate": 3.770635756937127e-05,
"loss": 0.2498,
"step": 700
},
{
"epoch": 0.7376185458377239,
"eval_loss": 0.3312172591686249,
"eval_runtime": 13.6169,
"eval_samples_per_second": 35.25,
"eval_steps_per_second": 2.203,
"step": 700
},
{
"epoch": 0.7481559536354057,
"grad_norm": 0.6115732192993164,
"learning_rate": 3.7530734106076575e-05,
"loss": 0.2566,
"step": 710
},
{
"epoch": 0.7586933614330874,
"grad_norm": 0.4686366319656372,
"learning_rate": 3.7355110642781874e-05,
"loss": 0.2325,
"step": 720
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.5317673087120056,
"learning_rate": 3.717948717948718e-05,
"loss": 0.2429,
"step": 730
},
{
"epoch": 0.779768177028451,
"grad_norm": 0.5183236598968506,
"learning_rate": 3.7003863716192485e-05,
"loss": 0.2562,
"step": 740
},
{
"epoch": 0.7903055848261328,
"grad_norm": 0.5319241881370544,
"learning_rate": 3.682824025289779e-05,
"loss": 0.2356,
"step": 750
},
{
"epoch": 0.7903055848261328,
"eval_loss": 0.3285733163356781,
"eval_runtime": 13.6213,
"eval_samples_per_second": 35.239,
"eval_steps_per_second": 2.202,
"step": 750
},
{
"epoch": 0.8008429926238145,
"grad_norm": 0.5814469456672668,
"learning_rate": 3.6652616789603096e-05,
"loss": 0.2428,
"step": 760
},
{
"epoch": 0.8113804004214963,
"grad_norm": 0.5138055086135864,
"learning_rate": 3.6476993326308395e-05,
"loss": 0.2639,
"step": 770
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.5066888332366943,
"learning_rate": 3.63013698630137e-05,
"loss": 0.2373,
"step": 780
},
{
"epoch": 0.8324552160168599,
"grad_norm": 0.513317883014679,
"learning_rate": 3.6125746399719e-05,
"loss": 0.2462,
"step": 790
},
{
"epoch": 0.8429926238145417,
"grad_norm": 0.5878031253814697,
"learning_rate": 3.5950122936424305e-05,
"loss": 0.2397,
"step": 800
},
{
"epoch": 0.8429926238145417,
"eval_loss": 0.3288480043411255,
"eval_runtime": 13.6316,
"eval_samples_per_second": 35.212,
"eval_steps_per_second": 2.201,
"step": 800
},
{
"epoch": 0.8535300316122234,
"grad_norm": 0.6269211769104004,
"learning_rate": 3.577449947312961e-05,
"loss": 0.2418,
"step": 810
},
{
"epoch": 0.8640674394099052,
"grad_norm": 0.5279048085212708,
"learning_rate": 3.5598876009834916e-05,
"loss": 0.246,
"step": 820
},
{
"epoch": 0.8746048472075869,
"grad_norm": 0.5837969183921814,
"learning_rate": 3.542325254654022e-05,
"loss": 0.256,
"step": 830
},
{
"epoch": 0.8851422550052687,
"grad_norm": 0.5384166240692139,
"learning_rate": 3.524762908324553e-05,
"loss": 0.2412,
"step": 840
},
{
"epoch": 0.8956796628029505,
"grad_norm": 0.5514136552810669,
"learning_rate": 3.507200561995083e-05,
"loss": 0.251,
"step": 850
},
{
"epoch": 0.8956796628029505,
"eval_loss": 0.3275190591812134,
"eval_runtime": 13.654,
"eval_samples_per_second": 35.155,
"eval_steps_per_second": 2.197,
"step": 850
},
{
"epoch": 0.9062170706006323,
"grad_norm": 0.5456790328025818,
"learning_rate": 3.489638215665613e-05,
"loss": 0.2337,
"step": 860
},
{
"epoch": 0.916754478398314,
"grad_norm": 0.5794851779937744,
"learning_rate": 3.472075869336143e-05,
"loss": 0.2462,
"step": 870
},
{
"epoch": 0.9272918861959958,
"grad_norm": 0.5296761393547058,
"learning_rate": 3.454513523006674e-05,
"loss": 0.2357,
"step": 880
},
{
"epoch": 0.9378292939936775,
"grad_norm": 0.5668390989303589,
"learning_rate": 3.436951176677204e-05,
"loss": 0.2439,
"step": 890
},
{
"epoch": 0.9483667017913593,
"grad_norm": 0.5026710629463196,
"learning_rate": 3.419388830347735e-05,
"loss": 0.2301,
"step": 900
},
{
"epoch": 0.9483667017913593,
"eval_loss": 0.32623717188835144,
"eval_runtime": 13.6644,
"eval_samples_per_second": 35.128,
"eval_steps_per_second": 2.195,
"step": 900
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.5280706286430359,
"learning_rate": 3.4018264840182654e-05,
"loss": 0.238,
"step": 910
},
{
"epoch": 0.9694415173867229,
"grad_norm": 0.5578323006629944,
"learning_rate": 3.384264137688795e-05,
"loss": 0.2484,
"step": 920
},
{
"epoch": 0.9799789251844047,
"grad_norm": 0.5756859183311462,
"learning_rate": 3.366701791359326e-05,
"loss": 0.2539,
"step": 930
},
{
"epoch": 0.9905163329820864,
"grad_norm": 0.6118686199188232,
"learning_rate": 3.3491394450298564e-05,
"loss": 0.2401,
"step": 940
},
{
"epoch": 1.0010537407797682,
"grad_norm": 0.43512389063835144,
"learning_rate": 3.331577098700386e-05,
"loss": 0.2334,
"step": 950
},
{
"epoch": 1.0010537407797682,
"eval_loss": 0.32700982689857483,
"eval_runtime": 13.6561,
"eval_samples_per_second": 35.149,
"eval_steps_per_second": 2.197,
"step": 950
},
{
"epoch": 1.01159114857745,
"grad_norm": 0.5436535477638245,
"learning_rate": 3.314014752370917e-05,
"loss": 0.1933,
"step": 960
},
{
"epoch": 1.0221285563751317,
"grad_norm": 0.5048521161079407,
"learning_rate": 3.2964524060414474e-05,
"loss": 0.2088,
"step": 970
},
{
"epoch": 1.0326659641728135,
"grad_norm": 0.6231564283370972,
"learning_rate": 3.278890059711977e-05,
"loss": 0.2146,
"step": 980
},
{
"epoch": 1.0432033719704952,
"grad_norm": 0.6846581101417542,
"learning_rate": 3.261327713382508e-05,
"loss": 0.2149,
"step": 990
},
{
"epoch": 1.053740779768177,
"grad_norm": 0.5640810132026672,
"learning_rate": 3.2437653670530384e-05,
"loss": 0.2078,
"step": 1000
},
{
"epoch": 1.053740779768177,
"eval_loss": 0.3363134264945984,
"eval_runtime": 13.6484,
"eval_samples_per_second": 35.169,
"eval_steps_per_second": 2.198,
"step": 1000
},
{
"epoch": 1.064278187565859,
"grad_norm": 0.6279656887054443,
"learning_rate": 3.226203020723569e-05,
"loss": 0.2004,
"step": 1010
},
{
"epoch": 1.0748155953635405,
"grad_norm": 0.5957475304603577,
"learning_rate": 3.2086406743940996e-05,
"loss": 0.1952,
"step": 1020
},
{
"epoch": 1.0853530031612224,
"grad_norm": 0.6246820092201233,
"learning_rate": 3.1910783280646294e-05,
"loss": 0.2071,
"step": 1030
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.5296807885169983,
"learning_rate": 3.17351598173516e-05,
"loss": 0.2077,
"step": 1040
},
{
"epoch": 1.106427818756586,
"grad_norm": 0.6367089152336121,
"learning_rate": 3.15595363540569e-05,
"loss": 0.2087,
"step": 1050
},
{
"epoch": 1.106427818756586,
"eval_loss": 0.3410273492336273,
"eval_runtime": 13.6191,
"eval_samples_per_second": 35.245,
"eval_steps_per_second": 2.203,
"step": 1050
},
{
"epoch": 1.1169652265542676,
"grad_norm": 0.5743587017059326,
"learning_rate": 3.1383912890762205e-05,
"loss": 0.2063,
"step": 1060
},
{
"epoch": 1.1275026343519494,
"grad_norm": 0.5830729603767395,
"learning_rate": 3.120828942746751e-05,
"loss": 0.2021,
"step": 1070
},
{
"epoch": 1.1380400421496313,
"grad_norm": 0.585697591304779,
"learning_rate": 3.1032665964172816e-05,
"loss": 0.2003,
"step": 1080
},
{
"epoch": 1.148577449947313,
"grad_norm": 0.6112616062164307,
"learning_rate": 3.085704250087812e-05,
"loss": 0.2035,
"step": 1090
},
{
"epoch": 1.1591148577449948,
"grad_norm": 0.6068351864814758,
"learning_rate": 3.068141903758342e-05,
"loss": 0.198,
"step": 1100
},
{
"epoch": 1.1591148577449948,
"eval_loss": 0.33999618887901306,
"eval_runtime": 13.5917,
"eval_samples_per_second": 35.316,
"eval_steps_per_second": 2.207,
"step": 1100
},
{
"epoch": 1.1696522655426764,
"grad_norm": 0.6323215961456299,
"learning_rate": 3.0505795574288726e-05,
"loss": 0.204,
"step": 1110
},
{
"epoch": 1.1801896733403583,
"grad_norm": 0.6767095923423767,
"learning_rate": 3.0330172110994032e-05,
"loss": 0.2,
"step": 1120
},
{
"epoch": 1.1907270811380402,
"grad_norm": 0.6808314919471741,
"learning_rate": 3.015454864769933e-05,
"loss": 0.2052,
"step": 1130
},
{
"epoch": 1.2012644889357218,
"grad_norm": 0.7271966934204102,
"learning_rate": 2.997892518440464e-05,
"loss": 0.2035,
"step": 1140
},
{
"epoch": 1.2118018967334037,
"grad_norm": 0.5914655327796936,
"learning_rate": 2.9803301721109945e-05,
"loss": 0.1999,
"step": 1150
},
{
"epoch": 1.2118018967334037,
"eval_loss": 0.3386387825012207,
"eval_runtime": 13.6001,
"eval_samples_per_second": 35.294,
"eval_steps_per_second": 2.206,
"step": 1150
},
{
"epoch": 1.2223393045310853,
"grad_norm": 0.6497891545295715,
"learning_rate": 2.9627678257815244e-05,
"loss": 0.2059,
"step": 1160
},
{
"epoch": 1.2328767123287672,
"grad_norm": 0.6975721120834351,
"learning_rate": 2.945205479452055e-05,
"loss": 0.1968,
"step": 1170
},
{
"epoch": 1.2434141201264488,
"grad_norm": 0.6115730404853821,
"learning_rate": 2.9276431331225852e-05,
"loss": 0.1983,
"step": 1180
},
{
"epoch": 1.2539515279241307,
"grad_norm": 0.6213293075561523,
"learning_rate": 2.9100807867931158e-05,
"loss": 0.2077,
"step": 1190
},
{
"epoch": 1.2644889357218125,
"grad_norm": 0.6509750485420227,
"learning_rate": 2.8925184404636463e-05,
"loss": 0.194,
"step": 1200
},
{
"epoch": 1.2644889357218125,
"eval_loss": 0.34055641293525696,
"eval_runtime": 13.997,
"eval_samples_per_second": 34.293,
"eval_steps_per_second": 2.143,
"step": 1200
},
{
"epoch": 1.2750263435194942,
"grad_norm": 0.4847603142261505,
"learning_rate": 2.8749560941341762e-05,
"loss": 0.1976,
"step": 1210
},
{
"epoch": 1.285563751317176,
"grad_norm": 0.6689492464065552,
"learning_rate": 2.8573937478047068e-05,
"loss": 0.19,
"step": 1220
},
{
"epoch": 1.2961011591148577,
"grad_norm": 0.6299355626106262,
"learning_rate": 2.839831401475237e-05,
"loss": 0.2025,
"step": 1230
},
{
"epoch": 1.3066385669125395,
"grad_norm": 0.5940089821815491,
"learning_rate": 2.8222690551457676e-05,
"loss": 0.1917,
"step": 1240
},
{
"epoch": 1.3171759747102212,
"grad_norm": 0.8054115772247314,
"learning_rate": 2.804706708816298e-05,
"loss": 0.1928,
"step": 1250
},
{
"epoch": 1.3171759747102212,
"eval_loss": 0.34280383586883545,
"eval_runtime": 13.6317,
"eval_samples_per_second": 35.212,
"eval_steps_per_second": 2.201,
"step": 1250
},
{
"epoch": 1.327713382507903,
"grad_norm": 0.5501131415367126,
"learning_rate": 2.7871443624868284e-05,
"loss": 0.1933,
"step": 1260
},
{
"epoch": 1.338250790305585,
"grad_norm": 0.6450643539428711,
"learning_rate": 2.769582016157359e-05,
"loss": 0.1994,
"step": 1270
},
{
"epoch": 1.3487881981032666,
"grad_norm": 0.6347033381462097,
"learning_rate": 2.7520196698278895e-05,
"loss": 0.2004,
"step": 1280
},
{
"epoch": 1.3593256059009484,
"grad_norm": 0.6968681216239929,
"learning_rate": 2.7344573234984194e-05,
"loss": 0.214,
"step": 1290
},
{
"epoch": 1.36986301369863,
"grad_norm": 0.5916845202445984,
"learning_rate": 2.71689497716895e-05,
"loss": 0.1849,
"step": 1300
},
{
"epoch": 1.36986301369863,
"eval_loss": 0.34068360924720764,
"eval_runtime": 13.6631,
"eval_samples_per_second": 35.131,
"eval_steps_per_second": 2.196,
"step": 1300
},
{
"epoch": 1.380400421496312,
"grad_norm": 0.7653020024299622,
"learning_rate": 2.6993326308394802e-05,
"loss": 0.1917,
"step": 1310
},
{
"epoch": 1.3909378292939936,
"grad_norm": 0.6557948589324951,
"learning_rate": 2.6817702845100107e-05,
"loss": 0.2008,
"step": 1320
},
{
"epoch": 1.4014752370916754,
"grad_norm": 0.57426518201828,
"learning_rate": 2.6642079381805413e-05,
"loss": 0.1889,
"step": 1330
},
{
"epoch": 1.4120126448893573,
"grad_norm": 0.6126993298530579,
"learning_rate": 2.6466455918510712e-05,
"loss": 0.1995,
"step": 1340
},
{
"epoch": 1.422550052687039,
"grad_norm": 0.6700727343559265,
"learning_rate": 2.6290832455216018e-05,
"loss": 0.1898,
"step": 1350
},
{
"epoch": 1.422550052687039,
"eval_loss": 0.3482060432434082,
"eval_runtime": 13.6643,
"eval_samples_per_second": 35.128,
"eval_steps_per_second": 2.195,
"step": 1350
},
{
"epoch": 1.4330874604847208,
"grad_norm": 0.6523577570915222,
"learning_rate": 2.611520899192132e-05,
"loss": 0.1977,
"step": 1360
},
{
"epoch": 1.4436248682824027,
"grad_norm": 0.6040759682655334,
"learning_rate": 2.5939585528626625e-05,
"loss": 0.1833,
"step": 1370
},
{
"epoch": 1.4541622760800843,
"grad_norm": 0.6998751163482666,
"learning_rate": 2.576396206533193e-05,
"loss": 0.1917,
"step": 1380
},
{
"epoch": 1.464699683877766,
"grad_norm": 0.7758492231369019,
"learning_rate": 2.5588338602037233e-05,
"loss": 0.192,
"step": 1390
},
{
"epoch": 1.4752370916754478,
"grad_norm": 0.708914577960968,
"learning_rate": 2.541271513874254e-05,
"loss": 0.1853,
"step": 1400
},
{
"epoch": 1.4752370916754478,
"eval_loss": 0.3444618582725525,
"eval_runtime": 13.653,
"eval_samples_per_second": 35.157,
"eval_steps_per_second": 2.197,
"step": 1400
},
{
"epoch": 1.4857744994731297,
"grad_norm": 0.6209197640419006,
"learning_rate": 2.5237091675447845e-05,
"loss": 0.1826,
"step": 1410
},
{
"epoch": 1.4963119072708113,
"grad_norm": 0.714142918586731,
"learning_rate": 2.5061468212153144e-05,
"loss": 0.1921,
"step": 1420
},
{
"epoch": 1.5068493150684932,
"grad_norm": 0.6723175644874573,
"learning_rate": 2.4885844748858446e-05,
"loss": 0.2006,
"step": 1430
},
{
"epoch": 1.517386722866175,
"grad_norm": 0.8485958576202393,
"learning_rate": 2.4710221285563755e-05,
"loss": 0.2032,
"step": 1440
},
{
"epoch": 1.5279241306638567,
"grad_norm": 0.6563605666160583,
"learning_rate": 2.4534597822269057e-05,
"loss": 0.1951,
"step": 1450
},
{
"epoch": 1.5279241306638567,
"eval_loss": 0.34677523374557495,
"eval_runtime": 13.6421,
"eval_samples_per_second": 35.185,
"eval_steps_per_second": 2.199,
"step": 1450
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.6512174010276794,
"learning_rate": 2.435897435897436e-05,
"loss": 0.1946,
"step": 1460
},
{
"epoch": 1.5489989462592202,
"grad_norm": 0.8586357831954956,
"learning_rate": 2.418335089567966e-05,
"loss": 0.1818,
"step": 1470
},
{
"epoch": 1.559536354056902,
"grad_norm": 0.6348748803138733,
"learning_rate": 2.4007727432384967e-05,
"loss": 0.19,
"step": 1480
},
{
"epoch": 1.5700737618545837,
"grad_norm": 0.6497516632080078,
"learning_rate": 2.3832103969090273e-05,
"loss": 0.1901,
"step": 1490
},
{
"epoch": 1.5806111696522656,
"grad_norm": 0.5382278561592102,
"learning_rate": 2.3656480505795575e-05,
"loss": 0.1877,
"step": 1500
},
{
"epoch": 1.5806111696522656,
"eval_loss": 0.35011863708496094,
"eval_runtime": 13.6377,
"eval_samples_per_second": 35.196,
"eval_steps_per_second": 2.2,
"step": 1500
},
{
"epoch": 1.5911485774499474,
"grad_norm": 0.5429180860519409,
"learning_rate": 2.3480857042500877e-05,
"loss": 0.1961,
"step": 1510
},
{
"epoch": 1.601685985247629,
"grad_norm": 0.6648889183998108,
"learning_rate": 2.3305233579206183e-05,
"loss": 0.1934,
"step": 1520
},
{
"epoch": 1.6122233930453107,
"grad_norm": 0.6552326679229736,
"learning_rate": 2.312961011591149e-05,
"loss": 0.1928,
"step": 1530
},
{
"epoch": 1.6227608008429928,
"grad_norm": 0.6863034963607788,
"learning_rate": 2.295398665261679e-05,
"loss": 0.1956,
"step": 1540
},
{
"epoch": 1.6332982086406744,
"grad_norm": 0.8960068225860596,
"learning_rate": 2.2778363189322093e-05,
"loss": 0.203,
"step": 1550
},
{
"epoch": 1.6332982086406744,
"eval_loss": 0.3496212661266327,
"eval_runtime": 13.629,
"eval_samples_per_second": 35.219,
"eval_steps_per_second": 2.201,
"step": 1550
},
{
"epoch": 1.643835616438356,
"grad_norm": 0.7601160407066345,
"learning_rate": 2.2602739726027396e-05,
"loss": 0.1934,
"step": 1560
},
{
"epoch": 1.654373024236038,
"grad_norm": 0.9118824601173401,
"learning_rate": 2.2427116262732705e-05,
"loss": 0.1861,
"step": 1570
},
{
"epoch": 1.6649104320337198,
"grad_norm": 0.8418806195259094,
"learning_rate": 2.2251492799438007e-05,
"loss": 0.1794,
"step": 1580
},
{
"epoch": 1.6754478398314014,
"grad_norm": 0.7187584042549133,
"learning_rate": 2.207586933614331e-05,
"loss": 0.1878,
"step": 1590
},
{
"epoch": 1.685985247629083,
"grad_norm": 0.7605792284011841,
"learning_rate": 2.190024587284861e-05,
"loss": 0.1868,
"step": 1600
},
{
"epoch": 1.685985247629083,
"eval_loss": 0.3536493182182312,
"eval_runtime": 13.6104,
"eval_samples_per_second": 35.267,
"eval_steps_per_second": 2.204,
"step": 1600
},
{
"epoch": 1.6965226554267652,
"grad_norm": 0.6814613342285156,
"learning_rate": 2.1724622409553917e-05,
"loss": 0.1851,
"step": 1610
},
{
"epoch": 1.7070600632244468,
"grad_norm": 0.6218210458755493,
"learning_rate": 2.1548998946259223e-05,
"loss": 0.1921,
"step": 1620
},
{
"epoch": 1.7175974710221285,
"grad_norm": 0.9718311429023743,
"learning_rate": 2.1373375482964525e-05,
"loss": 0.1844,
"step": 1630
},
{
"epoch": 1.7281348788198103,
"grad_norm": 0.6959328055381775,
"learning_rate": 2.1197752019669827e-05,
"loss": 0.1802,
"step": 1640
},
{
"epoch": 1.7386722866174922,
"grad_norm": 0.6459183096885681,
"learning_rate": 2.1022128556375133e-05,
"loss": 0.1801,
"step": 1650
},
{
"epoch": 1.7386722866174922,
"eval_loss": 0.3579024374485016,
"eval_runtime": 13.6133,
"eval_samples_per_second": 35.26,
"eval_steps_per_second": 2.204,
"step": 1650
},
{
"epoch": 1.7492096944151738,
"grad_norm": 0.8063459396362305,
"learning_rate": 2.084650509308044e-05,
"loss": 0.1793,
"step": 1660
},
{
"epoch": 1.7597471022128557,
"grad_norm": 0.7973386645317078,
"learning_rate": 2.067088162978574e-05,
"loss": 0.1851,
"step": 1670
},
{
"epoch": 1.7702845100105375,
"grad_norm": 0.6851320862770081,
"learning_rate": 2.0495258166491043e-05,
"loss": 0.1745,
"step": 1680
},
{
"epoch": 1.7808219178082192,
"grad_norm": 0.6314299702644348,
"learning_rate": 2.0319634703196345e-05,
"loss": 0.1891,
"step": 1690
},
{
"epoch": 1.7913593256059008,
"grad_norm": 0.7445196509361267,
"learning_rate": 2.0144011239901654e-05,
"loss": 0.173,
"step": 1700
},
{
"epoch": 1.7913593256059008,
"eval_loss": 0.3595808744430542,
"eval_runtime": 13.6356,
"eval_samples_per_second": 35.202,
"eval_steps_per_second": 2.2,
"step": 1700
},
{
"epoch": 1.8018967334035827,
"grad_norm": 0.6725503206253052,
"learning_rate": 1.9968387776606957e-05,
"loss": 0.1729,
"step": 1710
},
{
"epoch": 1.8124341412012646,
"grad_norm": 0.7874764204025269,
"learning_rate": 1.979276431331226e-05,
"loss": 0.1789,
"step": 1720
},
{
"epoch": 1.8229715489989462,
"grad_norm": 1.164414882659912,
"learning_rate": 1.961714085001756e-05,
"loss": 0.1799,
"step": 1730
},
{
"epoch": 1.833508956796628,
"grad_norm": 0.6965774297714233,
"learning_rate": 1.9441517386722867e-05,
"loss": 0.1806,
"step": 1740
},
{
"epoch": 1.84404636459431,
"grad_norm": 0.7329718470573425,
"learning_rate": 1.9265893923428172e-05,
"loss": 0.1877,
"step": 1750
},
{
"epoch": 1.84404636459431,
"eval_loss": 0.3642064929008484,
"eval_runtime": 13.6522,
"eval_samples_per_second": 35.159,
"eval_steps_per_second": 2.197,
"step": 1750
},
{
"epoch": 1.8545837723919916,
"grad_norm": 1.6226530075073242,
"learning_rate": 1.9090270460133475e-05,
"loss": 0.1843,
"step": 1760
},
{
"epoch": 1.8651211801896732,
"grad_norm": 0.6283461451530457,
"learning_rate": 1.8914646996838777e-05,
"loss": 0.1733,
"step": 1770
},
{
"epoch": 1.875658587987355,
"grad_norm": 0.5947582125663757,
"learning_rate": 1.8739023533544083e-05,
"loss": 0.1788,
"step": 1780
},
{
"epoch": 1.886195995785037,
"grad_norm": 0.7731133699417114,
"learning_rate": 1.8563400070249388e-05,
"loss": 0.1749,
"step": 1790
},
{
"epoch": 1.8967334035827186,
"grad_norm": 0.869389533996582,
"learning_rate": 1.838777660695469e-05,
"loss": 0.1787,
"step": 1800
},
{
"epoch": 1.8967334035827186,
"eval_loss": 0.36554011702537537,
"eval_runtime": 13.6633,
"eval_samples_per_second": 35.131,
"eval_steps_per_second": 2.196,
"step": 1800
},
{
"epoch": 1.9072708113804004,
"grad_norm": 0.6407529711723328,
"learning_rate": 1.8212153143659993e-05,
"loss": 0.1693,
"step": 1810
},
{
"epoch": 1.9178082191780823,
"grad_norm": 0.9166205525398254,
"learning_rate": 1.80365296803653e-05,
"loss": 0.1716,
"step": 1820
},
{
"epoch": 1.928345626975764,
"grad_norm": 0.7278560400009155,
"learning_rate": 1.7860906217070604e-05,
"loss": 0.1782,
"step": 1830
},
{
"epoch": 1.9388830347734456,
"grad_norm": 0.5721282362937927,
"learning_rate": 1.7685282753775906e-05,
"loss": 0.1735,
"step": 1840
},
{
"epoch": 1.9494204425711275,
"grad_norm": 0.707082986831665,
"learning_rate": 1.750965929048121e-05,
"loss": 0.1767,
"step": 1850
},
{
"epoch": 1.9494204425711275,
"eval_loss": 0.3640550374984741,
"eval_runtime": 13.667,
"eval_samples_per_second": 35.121,
"eval_steps_per_second": 2.195,
"step": 1850
},
{
"epoch": 1.9599578503688093,
"grad_norm": 0.6257717609405518,
"learning_rate": 1.733403582718651e-05,
"loss": 0.1706,
"step": 1860
},
{
"epoch": 1.970495258166491,
"grad_norm": 0.8650119304656982,
"learning_rate": 1.7158412363891816e-05,
"loss": 0.171,
"step": 1870
},
{
"epoch": 1.9810326659641728,
"grad_norm": 0.6042978167533875,
"learning_rate": 1.6982788900597122e-05,
"loss": 0.1687,
"step": 1880
},
{
"epoch": 1.9915700737618547,
"grad_norm": 0.8354888558387756,
"learning_rate": 1.6807165437302424e-05,
"loss": 0.1676,
"step": 1890
},
{
"epoch": 2.0021074815595363,
"grad_norm": 0.5127582550048828,
"learning_rate": 1.6631541974007727e-05,
"loss": 0.1615,
"step": 1900
},
{
"epoch": 2.0021074815595363,
"eval_loss": 0.3677693009376526,
"eval_runtime": 13.6584,
"eval_samples_per_second": 35.143,
"eval_steps_per_second": 2.196,
"step": 1900
},
{
"epoch": 2.012644889357218,
"grad_norm": 0.6080672740936279,
"learning_rate": 1.6455918510713032e-05,
"loss": 0.1499,
"step": 1910
},
{
"epoch": 2.0231822971549,
"grad_norm": 0.6206851601600647,
"learning_rate": 1.6280295047418338e-05,
"loss": 0.1412,
"step": 1920
},
{
"epoch": 2.0337197049525817,
"grad_norm": 0.8675506711006165,
"learning_rate": 1.610467158412364e-05,
"loss": 0.1468,
"step": 1930
},
{
"epoch": 2.0442571127502633,
"grad_norm": 0.7144894003868103,
"learning_rate": 1.5929048120828942e-05,
"loss": 0.1505,
"step": 1940
},
{
"epoch": 2.0547945205479454,
"grad_norm": 0.6287038326263428,
"learning_rate": 1.5753424657534248e-05,
"loss": 0.1481,
"step": 1950
},
{
"epoch": 2.0547945205479454,
"eval_loss": 0.3874117434024811,
"eval_runtime": 13.6221,
"eval_samples_per_second": 35.237,
"eval_steps_per_second": 2.202,
"step": 1950
},
{
"epoch": 2.065331928345627,
"grad_norm": 0.8757415413856506,
"learning_rate": 1.5577801194239554e-05,
"loss": 0.1476,
"step": 1960
},
{
"epoch": 2.0758693361433087,
"grad_norm": 0.9166101813316345,
"learning_rate": 1.5402177730944856e-05,
"loss": 0.15,
"step": 1970
},
{
"epoch": 2.0864067439409903,
"grad_norm": 0.7331491112709045,
"learning_rate": 1.5226554267650158e-05,
"loss": 0.1581,
"step": 1980
},
{
"epoch": 2.0969441517386724,
"grad_norm": 0.7679803967475891,
"learning_rate": 1.5050930804355462e-05,
"loss": 0.1435,
"step": 1990
},
{
"epoch": 2.107481559536354,
"grad_norm": 0.6296097636222839,
"learning_rate": 1.4875307341060764e-05,
"loss": 0.1586,
"step": 2000
},
{
"epoch": 2.107481559536354,
"eval_loss": 0.399673730134964,
"eval_runtime": 13.5877,
"eval_samples_per_second": 35.326,
"eval_steps_per_second": 2.208,
"step": 2000
},
{
"epoch": 2.1180189673340357,
"grad_norm": 0.7950565814971924,
"learning_rate": 1.4699683877766072e-05,
"loss": 0.147,
"step": 2010
},
{
"epoch": 2.128556375131718,
"grad_norm": 0.7187503576278687,
"learning_rate": 1.4524060414471374e-05,
"loss": 0.1505,
"step": 2020
},
{
"epoch": 2.1390937829293994,
"grad_norm": 0.6770270466804504,
"learning_rate": 1.4348436951176678e-05,
"loss": 0.1509,
"step": 2030
},
{
"epoch": 2.149631190727081,
"grad_norm": 0.7002925276756287,
"learning_rate": 1.417281348788198e-05,
"loss": 0.149,
"step": 2040
},
{
"epoch": 2.1601685985247627,
"grad_norm": 0.8196738362312317,
"learning_rate": 1.3997190024587286e-05,
"loss": 0.1496,
"step": 2050
},
{
"epoch": 2.1601685985247627,
"eval_loss": 0.4035220742225647,
"eval_runtime": 13.6079,
"eval_samples_per_second": 35.274,
"eval_steps_per_second": 2.205,
"step": 2050
},
{
"epoch": 2.170706006322445,
"grad_norm": 0.972318708896637,
"learning_rate": 1.382156656129259e-05,
"loss": 0.1507,
"step": 2060
},
{
"epoch": 2.1812434141201265,
"grad_norm": 0.8114174604415894,
"learning_rate": 1.3645943097997894e-05,
"loss": 0.1498,
"step": 2070
},
{
"epoch": 2.191780821917808,
"grad_norm": 0.6477043032646179,
"learning_rate": 1.3470319634703196e-05,
"loss": 0.1523,
"step": 2080
},
{
"epoch": 2.20231822971549,
"grad_norm": 0.8281266093254089,
"learning_rate": 1.32946961714085e-05,
"loss": 0.1498,
"step": 2090
},
{
"epoch": 2.212855637513172,
"grad_norm": 0.7811893224716187,
"learning_rate": 1.3119072708113806e-05,
"loss": 0.1486,
"step": 2100
},
{
"epoch": 2.212855637513172,
"eval_loss": 0.3995499908924103,
"eval_runtime": 13.6182,
"eval_samples_per_second": 35.247,
"eval_steps_per_second": 2.203,
"step": 2100
},
{
"epoch": 2.2233930453108535,
"grad_norm": 0.8453881144523621,
"learning_rate": 1.2943449244819108e-05,
"loss": 0.1473,
"step": 2110
},
{
"epoch": 2.233930453108535,
"grad_norm": 0.8025777339935303,
"learning_rate": 1.2767825781524412e-05,
"loss": 0.1502,
"step": 2120
},
{
"epoch": 2.244467860906217,
"grad_norm": 0.775323748588562,
"learning_rate": 1.2592202318229716e-05,
"loss": 0.1402,
"step": 2130
},
{
"epoch": 2.255005268703899,
"grad_norm": 1.1507049798965454,
"learning_rate": 1.241657885493502e-05,
"loss": 0.1447,
"step": 2140
},
{
"epoch": 2.2655426765015805,
"grad_norm": 0.7232986688613892,
"learning_rate": 1.2240955391640324e-05,
"loss": 0.1477,
"step": 2150
},
{
"epoch": 2.2655426765015805,
"eval_loss": 0.409657746553421,
"eval_runtime": 13.6393,
"eval_samples_per_second": 35.192,
"eval_steps_per_second": 2.2,
"step": 2150
},
{
"epoch": 2.2760800842992626,
"grad_norm": 0.957969069480896,
"learning_rate": 1.2065331928345628e-05,
"loss": 0.1477,
"step": 2160
},
{
"epoch": 2.286617492096944,
"grad_norm": 0.7197175621986389,
"learning_rate": 1.1889708465050932e-05,
"loss": 0.1399,
"step": 2170
},
{
"epoch": 2.297154899894626,
"grad_norm": 0.7239243984222412,
"learning_rate": 1.1714085001756236e-05,
"loss": 0.1508,
"step": 2180
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.9576734900474548,
"learning_rate": 1.153846153846154e-05,
"loss": 0.1424,
"step": 2190
},
{
"epoch": 2.3182297154899896,
"grad_norm": 1.0368537902832031,
"learning_rate": 1.1362838075166843e-05,
"loss": 0.1499,
"step": 2200
},
{
"epoch": 2.3182297154899896,
"eval_loss": 0.40890181064605713,
"eval_runtime": 13.6591,
"eval_samples_per_second": 35.141,
"eval_steps_per_second": 2.196,
"step": 2200
},
{
"epoch": 2.328767123287671,
"grad_norm": 0.6726427674293518,
"learning_rate": 1.1187214611872146e-05,
"loss": 0.15,
"step": 2210
},
{
"epoch": 2.339304531085353,
"grad_norm": 0.6462188363075256,
"learning_rate": 1.1011591148577451e-05,
"loss": 0.1452,
"step": 2220
},
{
"epoch": 2.349841938883035,
"grad_norm": 0.7393044829368591,
"learning_rate": 1.0835967685282754e-05,
"loss": 0.1477,
"step": 2230
},
{
"epoch": 2.3603793466807166,
"grad_norm": 0.8233257532119751,
"learning_rate": 1.0660344221988058e-05,
"loss": 0.154,
"step": 2240
},
{
"epoch": 2.3709167544783982,
"grad_norm": 0.8854420781135559,
"learning_rate": 1.0484720758693362e-05,
"loss": 0.1443,
"step": 2250
},
{
"epoch": 2.3709167544783982,
"eval_loss": 0.40763771533966064,
"eval_runtime": 13.6523,
"eval_samples_per_second": 35.159,
"eval_steps_per_second": 2.197,
"step": 2250
},
{
"epoch": 2.3814541622760803,
"grad_norm": 0.7281904220581055,
"learning_rate": 1.0309097295398666e-05,
"loss": 0.1504,
"step": 2260
},
{
"epoch": 2.391991570073762,
"grad_norm": 0.7077420353889465,
"learning_rate": 1.013347383210397e-05,
"loss": 0.1453,
"step": 2270
},
{
"epoch": 2.4025289778714436,
"grad_norm": 0.6380478739738464,
"learning_rate": 9.957850368809273e-06,
"loss": 0.1468,
"step": 2280
},
{
"epoch": 2.4130663856691252,
"grad_norm": 0.5880696773529053,
"learning_rate": 9.782226905514577e-06,
"loss": 0.1402,
"step": 2290
},
{
"epoch": 2.4236037934668073,
"grad_norm": 0.7334967851638794,
"learning_rate": 9.606603442219881e-06,
"loss": 0.1449,
"step": 2300
},
{
"epoch": 2.4236037934668073,
"eval_loss": 0.41403505206108093,
"eval_runtime": 13.6561,
"eval_samples_per_second": 35.149,
"eval_steps_per_second": 2.197,
"step": 2300
},
{
"epoch": 2.434141201264489,
"grad_norm": 0.7387108206748962,
"learning_rate": 9.430979978925185e-06,
"loss": 0.1392,
"step": 2310
},
{
"epoch": 2.4446786090621706,
"grad_norm": 0.6824939846992493,
"learning_rate": 9.255356515630488e-06,
"loss": 0.1446,
"step": 2320
},
{
"epoch": 2.4552160168598522,
"grad_norm": 0.7604888677597046,
"learning_rate": 9.079733052335793e-06,
"loss": 0.1403,
"step": 2330
},
{
"epoch": 2.4657534246575343,
"grad_norm": 0.851366400718689,
"learning_rate": 8.904109589041095e-06,
"loss": 0.1463,
"step": 2340
},
{
"epoch": 2.476290832455216,
"grad_norm": 0.6607230305671692,
"learning_rate": 8.728486125746401e-06,
"loss": 0.1433,
"step": 2350
},
{
"epoch": 2.476290832455216,
"eval_loss": 0.40974321961402893,
"eval_runtime": 13.648,
"eval_samples_per_second": 35.17,
"eval_steps_per_second": 2.198,
"step": 2350
},
{
"epoch": 2.4868282402528976,
"grad_norm": 0.785926342010498,
"learning_rate": 8.552862662451703e-06,
"loss": 0.1403,
"step": 2360
},
{
"epoch": 2.4973656480505797,
"grad_norm": 0.7373778223991394,
"learning_rate": 8.377239199157007e-06,
"loss": 0.1399,
"step": 2370
},
{
"epoch": 2.5079030558482613,
"grad_norm": 0.9675643444061279,
"learning_rate": 8.201615735862311e-06,
"loss": 0.1481,
"step": 2380
},
{
"epoch": 2.518440463645943,
"grad_norm": 0.7026488780975342,
"learning_rate": 8.025992272567615e-06,
"loss": 0.1385,
"step": 2390
},
{
"epoch": 2.528977871443625,
"grad_norm": 0.6957457065582275,
"learning_rate": 7.85036880927292e-06,
"loss": 0.1444,
"step": 2400
},
{
"epoch": 2.528977871443625,
"eval_loss": 0.4191787838935852,
"eval_runtime": 13.6468,
"eval_samples_per_second": 35.173,
"eval_steps_per_second": 2.198,
"step": 2400
},
{
"epoch": 2.5395152792413067,
"grad_norm": 0.6703396439552307,
"learning_rate": 7.674745345978223e-06,
"loss": 0.1426,
"step": 2410
},
{
"epoch": 2.5500526870389884,
"grad_norm": 0.8772585391998291,
"learning_rate": 7.499121882683527e-06,
"loss": 0.1415,
"step": 2420
},
{
"epoch": 2.5605900948366704,
"grad_norm": 0.8206095099449158,
"learning_rate": 7.32349841938883e-06,
"loss": 0.1366,
"step": 2430
},
{
"epoch": 2.571127502634352,
"grad_norm": 0.6491121053695679,
"learning_rate": 7.147874956094135e-06,
"loss": 0.1432,
"step": 2440
},
{
"epoch": 2.5816649104320337,
"grad_norm": 0.7407194972038269,
"learning_rate": 6.972251492799438e-06,
"loss": 0.1415,
"step": 2450
},
{
"epoch": 2.5816649104320337,
"eval_loss": 0.42183056473731995,
"eval_runtime": 13.6108,
"eval_samples_per_second": 35.266,
"eval_steps_per_second": 2.204,
"step": 2450
},
{
"epoch": 2.5922023182297154,
"grad_norm": 0.6888071298599243,
"learning_rate": 6.796628029504742e-06,
"loss": 0.1355,
"step": 2460
},
{
"epoch": 2.602739726027397,
"grad_norm": 0.9180698394775391,
"learning_rate": 6.621004566210046e-06,
"loss": 0.1446,
"step": 2470
},
{
"epoch": 2.613277133825079,
"grad_norm": 0.7826014757156372,
"learning_rate": 6.44538110291535e-06,
"loss": 0.1431,
"step": 2480
},
{
"epoch": 2.6238145416227607,
"grad_norm": 0.6720303297042847,
"learning_rate": 6.269757639620653e-06,
"loss": 0.14,
"step": 2490
},
{
"epoch": 2.6343519494204424,
"grad_norm": 0.932731032371521,
"learning_rate": 6.094134176325957e-06,
"loss": 0.1391,
"step": 2500
},
{
"epoch": 2.6343519494204424,
"eval_loss": 0.42194727063179016,
"eval_runtime": 13.6121,
"eval_samples_per_second": 35.263,
"eval_steps_per_second": 2.204,
"step": 2500
},
{
"epoch": 2.6448893572181245,
"grad_norm": 0.6245344281196594,
"learning_rate": 5.918510713031261e-06,
"loss": 0.1355,
"step": 2510
},
{
"epoch": 2.655426765015806,
"grad_norm": 0.705546498298645,
"learning_rate": 5.742887249736565e-06,
"loss": 0.1372,
"step": 2520
},
{
"epoch": 2.6659641728134877,
"grad_norm": 0.7347897291183472,
"learning_rate": 5.567263786441869e-06,
"loss": 0.1479,
"step": 2530
},
{
"epoch": 2.67650158061117,
"grad_norm": 0.7643938660621643,
"learning_rate": 5.391640323147173e-06,
"loss": 0.1394,
"step": 2540
},
{
"epoch": 2.6870389884088515,
"grad_norm": 0.6740264892578125,
"learning_rate": 5.216016859852477e-06,
"loss": 0.1391,
"step": 2550
},
{
"epoch": 2.6870389884088515,
"eval_loss": 0.42758312821388245,
"eval_runtime": 13.6383,
"eval_samples_per_second": 35.195,
"eval_steps_per_second": 2.2,
"step": 2550
},
{
"epoch": 2.697576396206533,
"grad_norm": 0.6369270086288452,
"learning_rate": 5.040393396557781e-06,
"loss": 0.1364,
"step": 2560
},
{
"epoch": 2.708113804004215,
"grad_norm": 0.7021162509918213,
"learning_rate": 4.864769933263085e-06,
"loss": 0.1386,
"step": 2570
},
{
"epoch": 2.718651211801897,
"grad_norm": 0.6202614307403564,
"learning_rate": 4.689146469968389e-06,
"loss": 0.1399,
"step": 2580
},
{
"epoch": 2.7291886195995785,
"grad_norm": 0.89564448595047,
"learning_rate": 4.513523006673692e-06,
"loss": 0.142,
"step": 2590
},
{
"epoch": 2.73972602739726,
"grad_norm": 0.7164297103881836,
"learning_rate": 4.337899543378996e-06,
"loss": 0.1387,
"step": 2600
},
{
"epoch": 2.73972602739726,
"eval_loss": 0.43157538771629333,
"eval_runtime": 13.631,
"eval_samples_per_second": 35.214,
"eval_steps_per_second": 2.201,
"step": 2600
}
],
"logging_steps": 10,
"max_steps": 2847,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.117575732486308e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}