beast33's picture
Training in progress, step 150, checkpoint
916a20d verified
raw
history blame
27.7 kB
{
"best_metric": 0.38091349601745605,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.12317799219872716,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000821186614658181,
"grad_norm": 2.0305542945861816,
"learning_rate": 1e-05,
"loss": 0.7261,
"step": 1
},
{
"epoch": 0.000821186614658181,
"eval_loss": 1.465772271156311,
"eval_runtime": 288.2262,
"eval_samples_per_second": 7.116,
"eval_steps_per_second": 1.78,
"step": 1
},
{
"epoch": 0.001642373229316362,
"grad_norm": 2.114654541015625,
"learning_rate": 2e-05,
"loss": 0.7726,
"step": 2
},
{
"epoch": 0.002463559843974543,
"grad_norm": 2.2381458282470703,
"learning_rate": 3e-05,
"loss": 0.8371,
"step": 3
},
{
"epoch": 0.003284746458632724,
"grad_norm": 1.9415267705917358,
"learning_rate": 4e-05,
"loss": 0.7885,
"step": 4
},
{
"epoch": 0.0041059330732909054,
"grad_norm": 1.484900712966919,
"learning_rate": 5e-05,
"loss": 0.7522,
"step": 5
},
{
"epoch": 0.004927119687949086,
"grad_norm": 1.259929895401001,
"learning_rate": 6e-05,
"loss": 0.7248,
"step": 6
},
{
"epoch": 0.005748306302607267,
"grad_norm": 0.9762012958526611,
"learning_rate": 7e-05,
"loss": 0.5922,
"step": 7
},
{
"epoch": 0.006569492917265448,
"grad_norm": 1.0204635858535767,
"learning_rate": 8e-05,
"loss": 0.5452,
"step": 8
},
{
"epoch": 0.00739067953192363,
"grad_norm": 0.8520223498344421,
"learning_rate": 9e-05,
"loss": 0.4675,
"step": 9
},
{
"epoch": 0.008211866146581811,
"grad_norm": 1.470017433166504,
"learning_rate": 0.0001,
"loss": 0.4164,
"step": 10
},
{
"epoch": 0.009033052761239991,
"grad_norm": 0.8858131766319275,
"learning_rate": 9.99983777858264e-05,
"loss": 0.3847,
"step": 11
},
{
"epoch": 0.009854239375898173,
"grad_norm": 1.0039604902267456,
"learning_rate": 9.999351124856874e-05,
"loss": 0.3836,
"step": 12
},
{
"epoch": 0.010675425990556354,
"grad_norm": 0.6317289471626282,
"learning_rate": 9.998540070400966e-05,
"loss": 0.3325,
"step": 13
},
{
"epoch": 0.011496612605214535,
"grad_norm": 0.680807888507843,
"learning_rate": 9.997404667843075e-05,
"loss": 0.3493,
"step": 14
},
{
"epoch": 0.012317799219872716,
"grad_norm": 0.6124119758605957,
"learning_rate": 9.995944990857849e-05,
"loss": 0.3204,
"step": 15
},
{
"epoch": 0.013138985834530896,
"grad_norm": 0.6300580501556396,
"learning_rate": 9.994161134161634e-05,
"loss": 0.3341,
"step": 16
},
{
"epoch": 0.013960172449189078,
"grad_norm": 0.608344554901123,
"learning_rate": 9.992053213506334e-05,
"loss": 0.3386,
"step": 17
},
{
"epoch": 0.01478135906384726,
"grad_norm": 0.5785073041915894,
"learning_rate": 9.989621365671902e-05,
"loss": 0.3035,
"step": 18
},
{
"epoch": 0.01560254567850544,
"grad_norm": 0.6188136339187622,
"learning_rate": 9.986865748457457e-05,
"loss": 0.3162,
"step": 19
},
{
"epoch": 0.016423732293163622,
"grad_norm": 0.5587109923362732,
"learning_rate": 9.983786540671051e-05,
"loss": 0.273,
"step": 20
},
{
"epoch": 0.017244918907821802,
"grad_norm": 0.5472216606140137,
"learning_rate": 9.980383942118066e-05,
"loss": 0.3215,
"step": 21
},
{
"epoch": 0.018066105522479982,
"grad_norm": 0.4810258150100708,
"learning_rate": 9.976658173588244e-05,
"loss": 0.307,
"step": 22
},
{
"epoch": 0.018887292137138165,
"grad_norm": 0.5424895882606506,
"learning_rate": 9.972609476841367e-05,
"loss": 0.3065,
"step": 23
},
{
"epoch": 0.019708478751796345,
"grad_norm": 0.5074129700660706,
"learning_rate": 9.968238114591566e-05,
"loss": 0.2774,
"step": 24
},
{
"epoch": 0.020529665366454525,
"grad_norm": 0.5599948167800903,
"learning_rate": 9.96354437049027e-05,
"loss": 0.2691,
"step": 25
},
{
"epoch": 0.02135085198111271,
"grad_norm": 0.6742061972618103,
"learning_rate": 9.95852854910781e-05,
"loss": 0.2898,
"step": 26
},
{
"epoch": 0.02217203859577089,
"grad_norm": 0.5526089072227478,
"learning_rate": 9.953190975913647e-05,
"loss": 0.3071,
"step": 27
},
{
"epoch": 0.02299322521042907,
"grad_norm": 0.5390534996986389,
"learning_rate": 9.947531997255256e-05,
"loss": 0.2906,
"step": 28
},
{
"epoch": 0.023814411825087253,
"grad_norm": 0.504539430141449,
"learning_rate": 9.941551980335652e-05,
"loss": 0.2688,
"step": 29
},
{
"epoch": 0.024635598439745433,
"grad_norm": 0.4898300766944885,
"learning_rate": 9.935251313189564e-05,
"loss": 0.2845,
"step": 30
},
{
"epoch": 0.025456785054403613,
"grad_norm": 0.49974775314331055,
"learning_rate": 9.928630404658255e-05,
"loss": 0.2702,
"step": 31
},
{
"epoch": 0.026277971669061793,
"grad_norm": 0.5819835662841797,
"learning_rate": 9.921689684362989e-05,
"loss": 0.2699,
"step": 32
},
{
"epoch": 0.027099158283719976,
"grad_norm": 0.6171815395355225,
"learning_rate": 9.914429602677162e-05,
"loss": 0.3477,
"step": 33
},
{
"epoch": 0.027920344898378156,
"grad_norm": 0.6020647883415222,
"learning_rate": 9.906850630697068e-05,
"loss": 0.2901,
"step": 34
},
{
"epoch": 0.028741531513036336,
"grad_norm": 0.7458943724632263,
"learning_rate": 9.898953260211338e-05,
"loss": 0.2498,
"step": 35
},
{
"epoch": 0.02956271812769452,
"grad_norm": 0.6588975787162781,
"learning_rate": 9.890738003669029e-05,
"loss": 0.2599,
"step": 36
},
{
"epoch": 0.0303839047423527,
"grad_norm": 0.6839740872383118,
"learning_rate": 9.882205394146361e-05,
"loss": 0.337,
"step": 37
},
{
"epoch": 0.03120509135701088,
"grad_norm": 0.6753020882606506,
"learning_rate": 9.87335598531214e-05,
"loss": 0.3069,
"step": 38
},
{
"epoch": 0.032026277971669063,
"grad_norm": 0.8709951043128967,
"learning_rate": 9.864190351391822e-05,
"loss": 0.3396,
"step": 39
},
{
"epoch": 0.032847464586327244,
"grad_norm": 0.6819374561309814,
"learning_rate": 9.85470908713026e-05,
"loss": 0.2963,
"step": 40
},
{
"epoch": 0.033668651200985424,
"grad_norm": 0.856820821762085,
"learning_rate": 9.844912807753104e-05,
"loss": 0.289,
"step": 41
},
{
"epoch": 0.034489837815643604,
"grad_norm": 0.7196516990661621,
"learning_rate": 9.834802148926882e-05,
"loss": 0.2858,
"step": 42
},
{
"epoch": 0.035311024430301784,
"grad_norm": 0.7757695913314819,
"learning_rate": 9.824377766717759e-05,
"loss": 0.2882,
"step": 43
},
{
"epoch": 0.036132211044959964,
"grad_norm": 0.7751405835151672,
"learning_rate": 9.813640337548954e-05,
"loss": 0.3174,
"step": 44
},
{
"epoch": 0.03695339765961815,
"grad_norm": 0.8659068942070007,
"learning_rate": 9.802590558156862e-05,
"loss": 0.2818,
"step": 45
},
{
"epoch": 0.03777458427427633,
"grad_norm": 1.336848258972168,
"learning_rate": 9.791229145545831e-05,
"loss": 0.3209,
"step": 46
},
{
"epoch": 0.03859577088893451,
"grad_norm": 0.9144354462623596,
"learning_rate": 9.779556836941645e-05,
"loss": 0.28,
"step": 47
},
{
"epoch": 0.03941695750359269,
"grad_norm": 0.8315229415893555,
"learning_rate": 9.767574389743682e-05,
"loss": 0.276,
"step": 48
},
{
"epoch": 0.04023814411825087,
"grad_norm": 0.967755913734436,
"learning_rate": 9.755282581475769e-05,
"loss": 0.3298,
"step": 49
},
{
"epoch": 0.04105933073290905,
"grad_norm": 1.0207139253616333,
"learning_rate": 9.742682209735727e-05,
"loss": 0.3058,
"step": 50
},
{
"epoch": 0.04105933073290905,
"eval_loss": 0.44632911682128906,
"eval_runtime": 291.2748,
"eval_samples_per_second": 7.041,
"eval_steps_per_second": 1.761,
"step": 50
},
{
"epoch": 0.04188051734756724,
"grad_norm": 1.5864020586013794,
"learning_rate": 9.729774092143627e-05,
"loss": 0.4838,
"step": 51
},
{
"epoch": 0.04270170396222542,
"grad_norm": 1.2497475147247314,
"learning_rate": 9.716559066288715e-05,
"loss": 0.4411,
"step": 52
},
{
"epoch": 0.0435228905768836,
"grad_norm": 0.9487363696098328,
"learning_rate": 9.703037989675087e-05,
"loss": 0.3657,
"step": 53
},
{
"epoch": 0.04434407719154178,
"grad_norm": 0.33357444405555725,
"learning_rate": 9.689211739666023e-05,
"loss": 0.2727,
"step": 54
},
{
"epoch": 0.04516526380619996,
"grad_norm": 0.4681488573551178,
"learning_rate": 9.675081213427076e-05,
"loss": 0.315,
"step": 55
},
{
"epoch": 0.04598645042085814,
"grad_norm": 0.4412896931171417,
"learning_rate": 9.66064732786784e-05,
"loss": 0.3051,
"step": 56
},
{
"epoch": 0.04680763703551632,
"grad_norm": 0.42806002497673035,
"learning_rate": 9.645911019582467e-05,
"loss": 0.296,
"step": 57
},
{
"epoch": 0.047628823650174505,
"grad_norm": 0.3795586824417114,
"learning_rate": 9.630873244788883e-05,
"loss": 0.3184,
"step": 58
},
{
"epoch": 0.048450010264832685,
"grad_norm": 0.34351328015327454,
"learning_rate": 9.615534979266745e-05,
"loss": 0.2832,
"step": 59
},
{
"epoch": 0.049271196879490865,
"grad_norm": 0.35215750336647034,
"learning_rate": 9.599897218294122e-05,
"loss": 0.3083,
"step": 60
},
{
"epoch": 0.050092383494149045,
"grad_norm": 0.3156924843788147,
"learning_rate": 9.583960976582913e-05,
"loss": 0.2577,
"step": 61
},
{
"epoch": 0.050913570108807225,
"grad_norm": 0.4058922231197357,
"learning_rate": 9.567727288213005e-05,
"loss": 0.3156,
"step": 62
},
{
"epoch": 0.051734756723465405,
"grad_norm": 0.3673064410686493,
"learning_rate": 9.551197206565173e-05,
"loss": 0.2932,
"step": 63
},
{
"epoch": 0.052555943338123585,
"grad_norm": 0.3226417601108551,
"learning_rate": 9.534371804252728e-05,
"loss": 0.2404,
"step": 64
},
{
"epoch": 0.05337712995278177,
"grad_norm": 0.3365015685558319,
"learning_rate": 9.517252173051911e-05,
"loss": 0.263,
"step": 65
},
{
"epoch": 0.05419831656743995,
"grad_norm": 0.38001781702041626,
"learning_rate": 9.49983942383106e-05,
"loss": 0.2973,
"step": 66
},
{
"epoch": 0.05501950318209813,
"grad_norm": 0.5203686356544495,
"learning_rate": 9.482134686478519e-05,
"loss": 0.2693,
"step": 67
},
{
"epoch": 0.05584068979675631,
"grad_norm": 0.32921281456947327,
"learning_rate": 9.464139109829321e-05,
"loss": 0.2742,
"step": 68
},
{
"epoch": 0.05666187641141449,
"grad_norm": 0.43800121545791626,
"learning_rate": 9.445853861590647e-05,
"loss": 0.2915,
"step": 69
},
{
"epoch": 0.05748306302607267,
"grad_norm": 0.32359007000923157,
"learning_rate": 9.42728012826605e-05,
"loss": 0.218,
"step": 70
},
{
"epoch": 0.05830424964073085,
"grad_norm": 0.39324089884757996,
"learning_rate": 9.408419115078471e-05,
"loss": 0.2635,
"step": 71
},
{
"epoch": 0.05912543625538904,
"grad_norm": 0.45099887251853943,
"learning_rate": 9.389272045892024e-05,
"loss": 0.2402,
"step": 72
},
{
"epoch": 0.05994662287004722,
"grad_norm": 0.4052051603794098,
"learning_rate": 9.36984016313259e-05,
"loss": 0.2775,
"step": 73
},
{
"epoch": 0.0607678094847054,
"grad_norm": 0.41129499673843384,
"learning_rate": 9.350124727707197e-05,
"loss": 0.2564,
"step": 74
},
{
"epoch": 0.06158899609936358,
"grad_norm": 0.3723108470439911,
"learning_rate": 9.330127018922194e-05,
"loss": 0.2614,
"step": 75
},
{
"epoch": 0.06241018271402176,
"grad_norm": 0.39635589718818665,
"learning_rate": 9.309848334400246e-05,
"loss": 0.2825,
"step": 76
},
{
"epoch": 0.06323136932867994,
"grad_norm": 0.4818005859851837,
"learning_rate": 9.289289989996133e-05,
"loss": 0.2539,
"step": 77
},
{
"epoch": 0.06405255594333813,
"grad_norm": 0.4484040439128876,
"learning_rate": 9.268453319711363e-05,
"loss": 0.2857,
"step": 78
},
{
"epoch": 0.0648737425579963,
"grad_norm": 0.46060118079185486,
"learning_rate": 9.247339675607605e-05,
"loss": 0.2745,
"step": 79
},
{
"epoch": 0.06569492917265449,
"grad_norm": 0.3952708840370178,
"learning_rate": 9.225950427718975e-05,
"loss": 0.2954,
"step": 80
},
{
"epoch": 0.06651611578731266,
"grad_norm": 0.4594082534313202,
"learning_rate": 9.204286963963111e-05,
"loss": 0.2647,
"step": 81
},
{
"epoch": 0.06733730240197085,
"grad_norm": 0.498677134513855,
"learning_rate": 9.182350690051133e-05,
"loss": 0.2741,
"step": 82
},
{
"epoch": 0.06815848901662903,
"grad_norm": 0.48790332674980164,
"learning_rate": 9.160143029396422e-05,
"loss": 0.217,
"step": 83
},
{
"epoch": 0.06897967563128721,
"grad_norm": 0.4500594437122345,
"learning_rate": 9.13766542302225e-05,
"loss": 0.2478,
"step": 84
},
{
"epoch": 0.0698008622459454,
"grad_norm": 0.477200984954834,
"learning_rate": 9.114919329468282e-05,
"loss": 0.276,
"step": 85
},
{
"epoch": 0.07062204886060357,
"grad_norm": 0.5421808362007141,
"learning_rate": 9.091906224695935e-05,
"loss": 0.3018,
"step": 86
},
{
"epoch": 0.07144323547526175,
"grad_norm": 0.473712295293808,
"learning_rate": 9.068627601992598e-05,
"loss": 0.2687,
"step": 87
},
{
"epoch": 0.07226442208991993,
"grad_norm": 0.5594393610954285,
"learning_rate": 9.045084971874738e-05,
"loss": 0.2515,
"step": 88
},
{
"epoch": 0.07308560870457811,
"grad_norm": 0.6386433839797974,
"learning_rate": 9.021279861989885e-05,
"loss": 0.2529,
"step": 89
},
{
"epoch": 0.0739067953192363,
"grad_norm": 0.5319457650184631,
"learning_rate": 8.997213817017507e-05,
"loss": 0.2646,
"step": 90
},
{
"epoch": 0.07472798193389447,
"grad_norm": 0.595516562461853,
"learning_rate": 8.972888398568772e-05,
"loss": 0.2756,
"step": 91
},
{
"epoch": 0.07554916854855266,
"grad_norm": 0.6601677536964417,
"learning_rate": 8.948305185085225e-05,
"loss": 0.2805,
"step": 92
},
{
"epoch": 0.07637035516321083,
"grad_norm": 0.6162546873092651,
"learning_rate": 8.92346577173636e-05,
"loss": 0.2435,
"step": 93
},
{
"epoch": 0.07719154177786902,
"grad_norm": 0.6918389797210693,
"learning_rate": 8.898371770316111e-05,
"loss": 0.3159,
"step": 94
},
{
"epoch": 0.07801272839252721,
"grad_norm": 0.7181240916252136,
"learning_rate": 8.873024809138272e-05,
"loss": 0.2413,
"step": 95
},
{
"epoch": 0.07883391500718538,
"grad_norm": 1.1472549438476562,
"learning_rate": 8.847426532930831e-05,
"loss": 0.2949,
"step": 96
},
{
"epoch": 0.07965510162184357,
"grad_norm": 0.9359822273254395,
"learning_rate": 8.821578602729242e-05,
"loss": 0.316,
"step": 97
},
{
"epoch": 0.08047628823650174,
"grad_norm": 0.7444966435432434,
"learning_rate": 8.795482695768658e-05,
"loss": 0.3119,
"step": 98
},
{
"epoch": 0.08129747485115993,
"grad_norm": 1.2565944194793701,
"learning_rate": 8.769140505375085e-05,
"loss": 0.2877,
"step": 99
},
{
"epoch": 0.0821186614658181,
"grad_norm": 0.9129545092582703,
"learning_rate": 8.742553740855506e-05,
"loss": 0.3209,
"step": 100
},
{
"epoch": 0.0821186614658181,
"eval_loss": 0.4635893702507019,
"eval_runtime": 290.5387,
"eval_samples_per_second": 7.059,
"eval_steps_per_second": 1.766,
"step": 100
},
{
"epoch": 0.08293984808047629,
"grad_norm": 1.5546329021453857,
"learning_rate": 8.715724127386972e-05,
"loss": 0.5163,
"step": 101
},
{
"epoch": 0.08376103469513448,
"grad_norm": 1.350873589515686,
"learning_rate": 8.688653405904652e-05,
"loss": 0.4791,
"step": 102
},
{
"epoch": 0.08458222130979265,
"grad_norm": 0.984677255153656,
"learning_rate": 8.661343332988869e-05,
"loss": 0.3377,
"step": 103
},
{
"epoch": 0.08540340792445084,
"grad_norm": 0.3391437530517578,
"learning_rate": 8.633795680751116e-05,
"loss": 0.2633,
"step": 104
},
{
"epoch": 0.08622459453910901,
"grad_norm": 0.2502461373806,
"learning_rate": 8.606012236719073e-05,
"loss": 0.2567,
"step": 105
},
{
"epoch": 0.0870457811537672,
"grad_norm": 0.28781577944755554,
"learning_rate": 8.577994803720606e-05,
"loss": 0.2769,
"step": 106
},
{
"epoch": 0.08786696776842537,
"grad_norm": 0.30542901158332825,
"learning_rate": 8.549745199766792e-05,
"loss": 0.2965,
"step": 107
},
{
"epoch": 0.08868815438308356,
"grad_norm": 0.3351757526397705,
"learning_rate": 8.521265257933948e-05,
"loss": 0.2774,
"step": 108
},
{
"epoch": 0.08950934099774174,
"grad_norm": 0.2978907525539398,
"learning_rate": 8.492556826244687e-05,
"loss": 0.2744,
"step": 109
},
{
"epoch": 0.09033052761239992,
"grad_norm": 0.2558261752128601,
"learning_rate": 8.463621767547998e-05,
"loss": 0.2312,
"step": 110
},
{
"epoch": 0.0911517142270581,
"grad_norm": 0.30879148840904236,
"learning_rate": 8.434461959398376e-05,
"loss": 0.2845,
"step": 111
},
{
"epoch": 0.09197290084171628,
"grad_norm": 0.309334397315979,
"learning_rate": 8.405079293933986e-05,
"loss": 0.266,
"step": 112
},
{
"epoch": 0.09279408745637446,
"grad_norm": 0.3099028170108795,
"learning_rate": 8.375475677753881e-05,
"loss": 0.2692,
"step": 113
},
{
"epoch": 0.09361527407103264,
"grad_norm": 0.30877748131752014,
"learning_rate": 8.345653031794292e-05,
"loss": 0.284,
"step": 114
},
{
"epoch": 0.09443646068569082,
"grad_norm": 0.2932473123073578,
"learning_rate": 8.315613291203976e-05,
"loss": 0.268,
"step": 115
},
{
"epoch": 0.09525764730034901,
"grad_norm": 0.337277889251709,
"learning_rate": 8.285358405218655e-05,
"loss": 0.2448,
"step": 116
},
{
"epoch": 0.09607883391500718,
"grad_norm": 0.3811333477497101,
"learning_rate": 8.25489033703452e-05,
"loss": 0.293,
"step": 117
},
{
"epoch": 0.09690002052966537,
"grad_norm": 0.34305429458618164,
"learning_rate": 8.224211063680853e-05,
"loss": 0.2596,
"step": 118
},
{
"epoch": 0.09772120714432354,
"grad_norm": 0.3743656277656555,
"learning_rate": 8.19332257589174e-05,
"loss": 0.277,
"step": 119
},
{
"epoch": 0.09854239375898173,
"grad_norm": 0.2987310290336609,
"learning_rate": 8.162226877976887e-05,
"loss": 0.2484,
"step": 120
},
{
"epoch": 0.0993635803736399,
"grad_norm": 0.2911909222602844,
"learning_rate": 8.130925987691569e-05,
"loss": 0.2297,
"step": 121
},
{
"epoch": 0.10018476698829809,
"grad_norm": 0.29610538482666016,
"learning_rate": 8.099421936105702e-05,
"loss": 0.2224,
"step": 122
},
{
"epoch": 0.10100595360295628,
"grad_norm": 0.31938186287879944,
"learning_rate": 8.067716767472045e-05,
"loss": 0.2158,
"step": 123
},
{
"epoch": 0.10182714021761445,
"grad_norm": 0.3307250440120697,
"learning_rate": 8.035812539093557e-05,
"loss": 0.2443,
"step": 124
},
{
"epoch": 0.10264832683227264,
"grad_norm": 0.33900371193885803,
"learning_rate": 8.003711321189895e-05,
"loss": 0.2455,
"step": 125
},
{
"epoch": 0.10346951344693081,
"grad_norm": 0.33954647183418274,
"learning_rate": 7.971415196763088e-05,
"loss": 0.2413,
"step": 126
},
{
"epoch": 0.104290700061589,
"grad_norm": 0.3721504211425781,
"learning_rate": 7.938926261462366e-05,
"loss": 0.2724,
"step": 127
},
{
"epoch": 0.10511188667624717,
"grad_norm": 0.3788248300552368,
"learning_rate": 7.906246623448183e-05,
"loss": 0.2727,
"step": 128
},
{
"epoch": 0.10593307329090536,
"grad_norm": 0.3605785667896271,
"learning_rate": 7.873378403255419e-05,
"loss": 0.2781,
"step": 129
},
{
"epoch": 0.10675425990556354,
"grad_norm": 0.33415114879608154,
"learning_rate": 7.840323733655778e-05,
"loss": 0.2226,
"step": 130
},
{
"epoch": 0.10757544652022172,
"grad_norm": 0.43883877992630005,
"learning_rate": 7.807084759519405e-05,
"loss": 0.2474,
"step": 131
},
{
"epoch": 0.1083966331348799,
"grad_norm": 0.3597790598869324,
"learning_rate": 7.773663637675694e-05,
"loss": 0.2375,
"step": 132
},
{
"epoch": 0.10921781974953808,
"grad_norm": 0.41835817694664,
"learning_rate": 7.740062536773352e-05,
"loss": 0.2869,
"step": 133
},
{
"epoch": 0.11003900636419627,
"grad_norm": 0.40062186121940613,
"learning_rate": 7.706283637139658e-05,
"loss": 0.2451,
"step": 134
},
{
"epoch": 0.11086019297885444,
"grad_norm": 0.46534448862075806,
"learning_rate": 7.672329130639005e-05,
"loss": 0.2834,
"step": 135
},
{
"epoch": 0.11168137959351263,
"grad_norm": 0.4524616599082947,
"learning_rate": 7.638201220530665e-05,
"loss": 0.2579,
"step": 136
},
{
"epoch": 0.11250256620817081,
"grad_norm": 0.42796769738197327,
"learning_rate": 7.603902121325813e-05,
"loss": 0.2507,
"step": 137
},
{
"epoch": 0.11332375282282899,
"grad_norm": 0.6147335767745972,
"learning_rate": 7.569434058643844e-05,
"loss": 0.2547,
"step": 138
},
{
"epoch": 0.11414493943748717,
"grad_norm": 0.5195381045341492,
"learning_rate": 7.534799269067953e-05,
"loss": 0.2429,
"step": 139
},
{
"epoch": 0.11496612605214535,
"grad_norm": 0.549555242061615,
"learning_rate": 7.500000000000001e-05,
"loss": 0.2826,
"step": 140
},
{
"epoch": 0.11578731266680353,
"grad_norm": 0.5704975128173828,
"learning_rate": 7.465038509514688e-05,
"loss": 0.2789,
"step": 141
},
{
"epoch": 0.1166084992814617,
"grad_norm": 0.6339702606201172,
"learning_rate": 7.42991706621303e-05,
"loss": 0.2289,
"step": 142
},
{
"epoch": 0.11742968589611989,
"grad_norm": 0.728171706199646,
"learning_rate": 7.394637949075154e-05,
"loss": 0.3104,
"step": 143
},
{
"epoch": 0.11825087251077808,
"grad_norm": 0.5731471180915833,
"learning_rate": 7.35920344731241e-05,
"loss": 0.2612,
"step": 144
},
{
"epoch": 0.11907205912543625,
"grad_norm": 0.7613599896430969,
"learning_rate": 7.323615860218843e-05,
"loss": 0.3024,
"step": 145
},
{
"epoch": 0.11989324574009444,
"grad_norm": 0.5790632963180542,
"learning_rate": 7.287877497021978e-05,
"loss": 0.2493,
"step": 146
},
{
"epoch": 0.12071443235475261,
"grad_norm": 0.746548593044281,
"learning_rate": 7.251990676732984e-05,
"loss": 0.2999,
"step": 147
},
{
"epoch": 0.1215356189694108,
"grad_norm": 0.7720208168029785,
"learning_rate": 7.215957727996207e-05,
"loss": 0.2836,
"step": 148
},
{
"epoch": 0.12235680558406897,
"grad_norm": 0.6100975275039673,
"learning_rate": 7.179780988938051e-05,
"loss": 0.2187,
"step": 149
},
{
"epoch": 0.12317799219872716,
"grad_norm": 0.7999358177185059,
"learning_rate": 7.143462807015271e-05,
"loss": 0.2709,
"step": 150
},
{
"epoch": 0.12317799219872716,
"eval_loss": 0.38091349601745605,
"eval_runtime": 291.0342,
"eval_samples_per_second": 7.047,
"eval_steps_per_second": 1.763,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.6081050741230797e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}