sedrickkeh's picture
End of training
e23e34d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9956108266276518,
"eval_steps": 500,
"global_step": 1023,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029261155815654718,
"grad_norm": 14.668970176640238,
"learning_rate": 3.2467532467532465e-07,
"loss": 0.8895,
"step": 10
},
{
"epoch": 0.058522311631309436,
"grad_norm": 8.496797294755972,
"learning_rate": 6.493506493506493e-07,
"loss": 0.8811,
"step": 20
},
{
"epoch": 0.08778346744696415,
"grad_norm": 5.273308597570782,
"learning_rate": 9.740259740259742e-07,
"loss": 0.8154,
"step": 30
},
{
"epoch": 0.11704462326261887,
"grad_norm": 1.433583789015455,
"learning_rate": 1.2987012987012986e-06,
"loss": 0.7741,
"step": 40
},
{
"epoch": 0.14630577907827358,
"grad_norm": 1.3261000645447931,
"learning_rate": 1.6233766233766235e-06,
"loss": 0.75,
"step": 50
},
{
"epoch": 0.1755669348939283,
"grad_norm": 1.2156691515832625,
"learning_rate": 1.9480519480519483e-06,
"loss": 0.7318,
"step": 60
},
{
"epoch": 0.20482809070958302,
"grad_norm": 1.1401188906701956,
"learning_rate": 2.2727272727272728e-06,
"loss": 0.7198,
"step": 70
},
{
"epoch": 0.23408924652523774,
"grad_norm": 1.1329393438430202,
"learning_rate": 2.597402597402597e-06,
"loss": 0.7105,
"step": 80
},
{
"epoch": 0.26335040234089246,
"grad_norm": 1.1907330376838692,
"learning_rate": 2.922077922077922e-06,
"loss": 0.7019,
"step": 90
},
{
"epoch": 0.29261155815654716,
"grad_norm": 1.1364555929890368,
"learning_rate": 3.246753246753247e-06,
"loss": 0.6877,
"step": 100
},
{
"epoch": 0.3218727139722019,
"grad_norm": 1.1288685178187858,
"learning_rate": 3.5714285714285718e-06,
"loss": 0.6901,
"step": 110
},
{
"epoch": 0.3511338697878566,
"grad_norm": 0.8110371303333744,
"learning_rate": 3.896103896103897e-06,
"loss": 0.689,
"step": 120
},
{
"epoch": 0.38039502560351135,
"grad_norm": 0.5818728259370487,
"learning_rate": 4.220779220779221e-06,
"loss": 0.6671,
"step": 130
},
{
"epoch": 0.40965618141916604,
"grad_norm": 0.5304327111513184,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.6646,
"step": 140
},
{
"epoch": 0.4389173372348208,
"grad_norm": 0.36685425891028794,
"learning_rate": 4.870129870129871e-06,
"loss": 0.658,
"step": 150
},
{
"epoch": 0.4681784930504755,
"grad_norm": 0.41183157231085515,
"learning_rate": 4.9994118939080335e-06,
"loss": 0.6589,
"step": 160
},
{
"epoch": 0.49743964886613024,
"grad_norm": 0.4031968246741201,
"learning_rate": 4.995818914209129e-06,
"loss": 0.6632,
"step": 170
},
{
"epoch": 0.5267008046817849,
"grad_norm": 0.3548947303493851,
"learning_rate": 4.988964370070926e-06,
"loss": 0.6621,
"step": 180
},
{
"epoch": 0.5559619604974396,
"grad_norm": 0.3686485941331636,
"learning_rate": 4.978857219089375e-06,
"loss": 0.6499,
"step": 190
},
{
"epoch": 0.5852231163130943,
"grad_norm": 0.35209709708441134,
"learning_rate": 4.965510669403914e-06,
"loss": 0.6505,
"step": 200
},
{
"epoch": 0.6144842721287491,
"grad_norm": 0.37518333436501056,
"learning_rate": 4.948942162436912e-06,
"loss": 0.6568,
"step": 210
},
{
"epoch": 0.6437454279444038,
"grad_norm": 0.36805268655746204,
"learning_rate": 4.929173350101025e-06,
"loss": 0.6443,
"step": 220
},
{
"epoch": 0.6730065837600585,
"grad_norm": 0.3786724277264378,
"learning_rate": 4.906230066504232e-06,
"loss": 0.644,
"step": 230
},
{
"epoch": 0.7022677395757132,
"grad_norm": 0.45256944433012264,
"learning_rate": 4.880142294189524e-06,
"loss": 0.6509,
"step": 240
},
{
"epoch": 0.731528895391368,
"grad_norm": 0.3534837053618171,
"learning_rate": 4.850944124953386e-06,
"loss": 0.6489,
"step": 250
},
{
"epoch": 0.7607900512070227,
"grad_norm": 0.3462376731623333,
"learning_rate": 4.818673715294245e-06,
"loss": 0.6512,
"step": 260
},
{
"epoch": 0.7900512070226774,
"grad_norm": 0.3659651498084311,
"learning_rate": 4.783373236549124e-06,
"loss": 0.6533,
"step": 270
},
{
"epoch": 0.8193123628383321,
"grad_norm": 0.3464863446071885,
"learning_rate": 4.7450888197836705e-06,
"loss": 0.6519,
"step": 280
},
{
"epoch": 0.8485735186539868,
"grad_norm": 0.3446817147136836,
"learning_rate": 4.703870495507544e-06,
"loss": 0.6439,
"step": 290
},
{
"epoch": 0.8778346744696416,
"grad_norm": 0.3663029532843638,
"learning_rate": 4.659772128293987e-06,
"loss": 0.6357,
"step": 300
},
{
"epoch": 0.9070958302852963,
"grad_norm": 0.3478317684389932,
"learning_rate": 4.612851346388991e-06,
"loss": 0.6487,
"step": 310
},
{
"epoch": 0.936356986100951,
"grad_norm": 0.3335766728870111,
"learning_rate": 4.563169466402049e-06,
"loss": 0.6405,
"step": 320
},
{
"epoch": 0.9656181419166057,
"grad_norm": 0.3761185295308495,
"learning_rate": 4.510791413176912e-06,
"loss": 0.6436,
"step": 330
},
{
"epoch": 0.9948792977322605,
"grad_norm": 0.35211941336512004,
"learning_rate": 4.455785634947077e-06,
"loss": 0.6395,
"step": 340
},
{
"epoch": 0.9978054133138259,
"eval_loss": 0.6440162658691406,
"eval_runtime": 342.2489,
"eval_samples_per_second": 26.907,
"eval_steps_per_second": 0.421,
"step": 341
},
{
"epoch": 1.025237746891002,
"grad_norm": 0.36200167522916055,
"learning_rate": 4.39822401388685e-06,
"loss": 0.6599,
"step": 350
},
{
"epoch": 1.054498902706657,
"grad_norm": 0.368658897798918,
"learning_rate": 4.338181772174911e-06,
"loss": 0.6147,
"step": 360
},
{
"epoch": 1.0837600585223117,
"grad_norm": 0.39675018773491727,
"learning_rate": 4.275737373693118e-06,
"loss": 0.6179,
"step": 370
},
{
"epoch": 1.1130212143379663,
"grad_norm": 0.31910150452705416,
"learning_rate": 4.210972421489018e-06,
"loss": 0.6151,
"step": 380
},
{
"epoch": 1.142282370153621,
"grad_norm": 0.4146599554888977,
"learning_rate": 4.143971551136056e-06,
"loss": 0.6203,
"step": 390
},
{
"epoch": 1.1715435259692757,
"grad_norm": 0.37145656101773866,
"learning_rate": 4.074822320130852e-06,
"loss": 0.6219,
"step": 400
},
{
"epoch": 1.2008046817849305,
"grad_norm": 0.3369796774780415,
"learning_rate": 4.003615093472073e-06,
"loss": 0.6216,
"step": 410
},
{
"epoch": 1.2300658376005853,
"grad_norm": 0.3498594570629417,
"learning_rate": 3.930442925570424e-06,
"loss": 0.6093,
"step": 420
},
{
"epoch": 1.2593269934162399,
"grad_norm": 0.32836497624245026,
"learning_rate": 3.855401438644094e-06,
"loss": 0.6195,
"step": 430
},
{
"epoch": 1.2885881492318947,
"grad_norm": 0.3837720666133545,
"learning_rate": 3.7785886977585562e-06,
"loss": 0.6116,
"step": 440
},
{
"epoch": 1.3178493050475493,
"grad_norm": 0.34346193540291065,
"learning_rate": 3.7001050826740294e-06,
"loss": 0.6157,
"step": 450
},
{
"epoch": 1.347110460863204,
"grad_norm": 0.33632056202086974,
"learning_rate": 3.6200531566680695e-06,
"loss": 0.6128,
"step": 460
},
{
"epoch": 1.3763716166788589,
"grad_norm": 0.33016686130410977,
"learning_rate": 3.5385375325047167e-06,
"loss": 0.6101,
"step": 470
},
{
"epoch": 1.4056327724945135,
"grad_norm": 0.3322405360416849,
"learning_rate": 3.4556647357253485e-06,
"loss": 0.6107,
"step": 480
},
{
"epoch": 1.4348939283101683,
"grad_norm": 0.3766853715733656,
"learning_rate": 3.3715430654398994e-06,
"loss": 0.6172,
"step": 490
},
{
"epoch": 1.464155084125823,
"grad_norm": 0.3420044761963859,
"learning_rate": 3.286282452800349e-06,
"loss": 0.6054,
"step": 500
},
{
"epoch": 1.4934162399414777,
"grad_norm": 0.3312255181380896,
"learning_rate": 3.1999943173414545e-06,
"loss": 0.6135,
"step": 510
},
{
"epoch": 1.5226773957571325,
"grad_norm": 0.34026173676689114,
"learning_rate": 3.1127914213764365e-06,
"loss": 0.6158,
"step": 520
},
{
"epoch": 1.5519385515727873,
"grad_norm": 0.33625103408517465,
"learning_rate": 3.0247877226379123e-06,
"loss": 0.6145,
"step": 530
},
{
"epoch": 1.5811997073884418,
"grad_norm": 0.3367264230640958,
"learning_rate": 2.9360982253566372e-06,
"loss": 0.621,
"step": 540
},
{
"epoch": 1.6104608632040964,
"grad_norm": 0.3508021187615357,
"learning_rate": 2.8468388299726714e-06,
"loss": 0.6116,
"step": 550
},
{
"epoch": 1.6397220190197512,
"grad_norm": 0.3329430390973009,
"learning_rate": 2.7571261816753756e-06,
"loss": 0.6135,
"step": 560
},
{
"epoch": 1.668983174835406,
"grad_norm": 0.32493005225042776,
"learning_rate": 2.667077517970155e-06,
"loss": 0.6039,
"step": 570
},
{
"epoch": 1.6982443306510606,
"grad_norm": 0.3416247340300911,
"learning_rate": 2.5768105154711563e-06,
"loss": 0.6134,
"step": 580
},
{
"epoch": 1.7275054864667154,
"grad_norm": 0.32203254574334095,
"learning_rate": 2.4864431361201358e-06,
"loss": 0.6131,
"step": 590
},
{
"epoch": 1.7567666422823702,
"grad_norm": 0.34441300170978906,
"learning_rate": 2.396093473032457e-06,
"loss": 0.61,
"step": 600
},
{
"epoch": 1.7860277980980248,
"grad_norm": 0.34502988222958175,
"learning_rate": 2.305879596171672e-06,
"loss": 0.6126,
"step": 610
},
{
"epoch": 1.8152889539136796,
"grad_norm": 0.31129433406639495,
"learning_rate": 2.2159193980543504e-06,
"loss": 0.6129,
"step": 620
},
{
"epoch": 1.8445501097293344,
"grad_norm": 0.3234044124788837,
"learning_rate": 2.126330439686806e-06,
"loss": 0.6121,
"step": 630
},
{
"epoch": 1.873811265544989,
"grad_norm": 0.32298142157786064,
"learning_rate": 2.037229796935041e-06,
"loss": 0.6065,
"step": 640
},
{
"epoch": 1.9030724213606436,
"grad_norm": 0.33703026308837425,
"learning_rate": 1.948733907528678e-06,
"loss": 0.6124,
"step": 650
},
{
"epoch": 1.9323335771762986,
"grad_norm": 0.31918477425731495,
"learning_rate": 1.8609584188988135e-06,
"loss": 0.6128,
"step": 660
},
{
"epoch": 1.9615947329919532,
"grad_norm": 0.3240942780939225,
"learning_rate": 1.774018037048647e-06,
"loss": 0.6016,
"step": 670
},
{
"epoch": 1.9908558888076078,
"grad_norm": 0.3172940503810845,
"learning_rate": 1.6880263766543742e-06,
"loss": 0.6162,
"step": 680
},
{
"epoch": 1.9967081199707388,
"eval_loss": 0.6352197527885437,
"eval_runtime": 341.3485,
"eval_samples_per_second": 26.978,
"eval_steps_per_second": 0.422,
"step": 682
},
{
"epoch": 2.0212143379663496,
"grad_norm": 0.3175314791094243,
"learning_rate": 1.6030958125922416e-06,
"loss": 0.6344,
"step": 690
},
{
"epoch": 2.050475493782004,
"grad_norm": 0.31427421868845,
"learning_rate": 1.5193373330857852e-06,
"loss": 0.5848,
"step": 700
},
{
"epoch": 2.0797366495976592,
"grad_norm": 0.3050138399442785,
"learning_rate": 1.436860394665161e-06,
"loss": 0.5869,
"step": 710
},
{
"epoch": 2.108997805413314,
"grad_norm": 0.3287719989093099,
"learning_rate": 1.3557727791281144e-06,
"loss": 0.588,
"step": 720
},
{
"epoch": 2.1382589612289684,
"grad_norm": 0.3211488131597453,
"learning_rate": 1.2761804526894973e-06,
"loss": 0.5897,
"step": 730
},
{
"epoch": 2.1675201170446234,
"grad_norm": 0.3354985605470603,
"learning_rate": 1.1981874275034292e-06,
"loss": 0.5945,
"step": 740
},
{
"epoch": 2.196781272860278,
"grad_norm": 0.330245659117232,
"learning_rate": 1.121895625739026e-06,
"loss": 0.5931,
"step": 750
},
{
"epoch": 2.2260424286759326,
"grad_norm": 0.34341233350847544,
"learning_rate": 1.0474047463873615e-06,
"loss": 0.5903,
"step": 760
},
{
"epoch": 2.255303584491587,
"grad_norm": 0.30870638315996124,
"learning_rate": 9.74812134973689e-07,
"loss": 0.5884,
"step": 770
},
{
"epoch": 2.284564740307242,
"grad_norm": 0.31977910069700466,
"learning_rate": 9.042126563452127e-07,
"loss": 0.5878,
"step": 780
},
{
"epoch": 2.313825896122897,
"grad_norm": 0.3031431539019493,
"learning_rate": 8.356985707006268e-07,
"loss": 0.5858,
"step": 790
},
{
"epoch": 2.3430870519385514,
"grad_norm": 0.3128129431092359,
"learning_rate": 7.6935941302344e-07,
"loss": 0.5935,
"step": 800
},
{
"epoch": 2.3723482077542064,
"grad_norm": 0.3086236399140173,
"learning_rate": 7.052818760766511e-07,
"loss": 0.5884,
"step": 810
},
{
"epoch": 2.401609363569861,
"grad_norm": 0.3209593796492324,
"learning_rate": 6.435496971116578e-07,
"loss": 0.5945,
"step": 820
},
{
"epoch": 2.4308705193855156,
"grad_norm": 0.2958964862772339,
"learning_rate": 5.842435484394701e-07,
"loss": 0.5868,
"step": 830
},
{
"epoch": 2.4601316752011706,
"grad_norm": 0.3051666167851246,
"learning_rate": 5.274409320072202e-07,
"loss": 0.5872,
"step": 840
},
{
"epoch": 2.489392831016825,
"grad_norm": 0.3116559897212902,
"learning_rate": 4.7321607811774056e-07,
"loss": 0.5889,
"step": 850
},
{
"epoch": 2.5186539868324798,
"grad_norm": 0.3162392315755417,
"learning_rate": 4.2163984842456043e-07,
"loss": 0.5821,
"step": 860
},
{
"epoch": 2.547915142648135,
"grad_norm": 0.3108046500670305,
"learning_rate": 3.7277964332909645e-07,
"loss": 0.5844,
"step": 870
},
{
"epoch": 2.5771762984637894,
"grad_norm": 0.2946758208140416,
"learning_rate": 3.266993139010438e-07,
"loss": 0.5885,
"step": 880
},
{
"epoch": 2.606437454279444,
"grad_norm": 0.30055832996790277,
"learning_rate": 2.8345907843707304e-07,
"loss": 0.5881,
"step": 890
},
{
"epoch": 2.6356986100950985,
"grad_norm": 0.30952781379566374,
"learning_rate": 2.4311544376688035e-07,
"loss": 0.5925,
"step": 900
},
{
"epoch": 2.6649597659107536,
"grad_norm": 0.3047050770609814,
"learning_rate": 2.0572113140941985e-07,
"loss": 0.5929,
"step": 910
},
{
"epoch": 2.694220921726408,
"grad_norm": 0.31153516958982186,
"learning_rate": 1.7132500867582695e-07,
"loss": 0.5868,
"step": 920
},
{
"epoch": 2.723482077542063,
"grad_norm": 0.3185842923901254,
"learning_rate": 1.399720248090608e-07,
"loss": 0.5901,
"step": 930
},
{
"epoch": 2.7527432333577178,
"grad_norm": 0.3109345062558724,
"learning_rate": 1.1170315224372507e-07,
"loss": 0.589,
"step": 940
},
{
"epoch": 2.7820043891733723,
"grad_norm": 0.29604620944870486,
"learning_rate": 8.655533306282687e-08,
"loss": 0.5876,
"step": 950
},
{
"epoch": 2.811265544989027,
"grad_norm": 0.36359227990850435,
"learning_rate": 6.456143072144261e-08,
"loss": 0.5915,
"step": 960
},
{
"epoch": 2.840526700804682,
"grad_norm": 0.2960058344228556,
"learning_rate": 4.5750187100383204e-08,
"loss": 0.5872,
"step": 970
},
{
"epoch": 2.8697878566203365,
"grad_norm": 0.30121823974621786,
"learning_rate": 3.0146184945978595e-08,
"loss": 0.5873,
"step": 980
},
{
"epoch": 2.899049012435991,
"grad_norm": 0.2997640067590943,
"learning_rate": 1.7769815745066476e-08,
"loss": 0.5799,
"step": 990
},
{
"epoch": 2.928310168251646,
"grad_norm": 0.3145839088496146,
"learning_rate": 8.637253077165287e-09,
"loss": 0.5878,
"step": 1000
},
{
"epoch": 2.9575713240673007,
"grad_norm": 0.30040451461060846,
"learning_rate": 2.760431478658343e-09,
"loss": 0.5791,
"step": 1010
},
{
"epoch": 2.9868324798829553,
"grad_norm": 0.30168448002353826,
"learning_rate": 1.470308466056114e-10,
"loss": 0.591,
"step": 1020
},
{
"epoch": 2.9956108266276518,
"eval_loss": 0.6357085704803467,
"eval_runtime": 340.5768,
"eval_samples_per_second": 27.039,
"eval_steps_per_second": 0.423,
"step": 1023
},
{
"epoch": 2.9956108266276518,
"step": 1023,
"total_flos": 2144987064041472.0,
"train_loss": 0.6305035024094792,
"train_runtime": 54804.0253,
"train_samples_per_second": 9.577,
"train_steps_per_second": 0.019
}
],
"logging_steps": 10,
"max_steps": 1023,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2144987064041472.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}