adammandic87's picture
Training in progress, step 200, checkpoint
21bf514 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0243116756822464,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000121558378411232,
"grad_norm": 1.3522109985351562,
"learning_rate": 2e-05,
"loss": 9.0768,
"step": 1
},
{
"epoch": 0.000121558378411232,
"eval_loss": 2.5203659534454346,
"eval_runtime": 130.544,
"eval_samples_per_second": 26.535,
"eval_steps_per_second": 13.268,
"step": 1
},
{
"epoch": 0.000243116756822464,
"grad_norm": 0.9176049828529358,
"learning_rate": 4e-05,
"loss": 8.9894,
"step": 2
},
{
"epoch": 0.000364675135233696,
"grad_norm": 1.0146596431732178,
"learning_rate": 6e-05,
"loss": 9.935,
"step": 3
},
{
"epoch": 0.000486233513644928,
"grad_norm": 1.0469788312911987,
"learning_rate": 8e-05,
"loss": 9.3424,
"step": 4
},
{
"epoch": 0.00060779189205616,
"grad_norm": 0.8254321813583374,
"learning_rate": 0.0001,
"loss": 8.339,
"step": 5
},
{
"epoch": 0.000729350270467392,
"grad_norm": 1.032138705253601,
"learning_rate": 0.00012,
"loss": 9.6692,
"step": 6
},
{
"epoch": 0.000850908648878624,
"grad_norm": 1.692064881324768,
"learning_rate": 0.00014,
"loss": 9.8725,
"step": 7
},
{
"epoch": 0.000972467027289856,
"grad_norm": 1.5163041353225708,
"learning_rate": 0.00016,
"loss": 9.3607,
"step": 8
},
{
"epoch": 0.001094025405701088,
"grad_norm": 3.152874231338501,
"learning_rate": 0.00018,
"loss": 9.9624,
"step": 9
},
{
"epoch": 0.00121558378411232,
"grad_norm": 1.7264574766159058,
"learning_rate": 0.0002,
"loss": 10.4001,
"step": 10
},
{
"epoch": 0.0013371421625235518,
"grad_norm": 4.396702766418457,
"learning_rate": 0.0001999863304992469,
"loss": 8.8946,
"step": 11
},
{
"epoch": 0.001458700540934784,
"grad_norm": 1.9022140502929688,
"learning_rate": 0.00019994532573409262,
"loss": 10.3819,
"step": 12
},
{
"epoch": 0.001580258919346016,
"grad_norm": 2.1845006942749023,
"learning_rate": 0.00019987699691483048,
"loss": 9.6356,
"step": 13
},
{
"epoch": 0.001701817297757248,
"grad_norm": 2.10140061378479,
"learning_rate": 0.00019978136272187747,
"loss": 9.6127,
"step": 14
},
{
"epoch": 0.0018233756761684798,
"grad_norm": 2.068610668182373,
"learning_rate": 0.000199658449300667,
"loss": 11.08,
"step": 15
},
{
"epoch": 0.001944934054579712,
"grad_norm": 3.655803680419922,
"learning_rate": 0.00019950829025450114,
"loss": 10.0545,
"step": 16
},
{
"epoch": 0.002066492432990944,
"grad_norm": 1.8618426322937012,
"learning_rate": 0.00019933092663536382,
"loss": 9.6545,
"step": 17
},
{
"epoch": 0.002188050811402176,
"grad_norm": 1.8773517608642578,
"learning_rate": 0.00019912640693269752,
"loss": 7.7819,
"step": 18
},
{
"epoch": 0.002309609189813408,
"grad_norm": 2.198024272918701,
"learning_rate": 0.00019889478706014687,
"loss": 9.8384,
"step": 19
},
{
"epoch": 0.00243116756822464,
"grad_norm": 2.1132476329803467,
"learning_rate": 0.00019863613034027224,
"loss": 8.5882,
"step": 20
},
{
"epoch": 0.0025527259466358717,
"grad_norm": 2.646947145462036,
"learning_rate": 0.00019835050748723824,
"loss": 9.4244,
"step": 21
},
{
"epoch": 0.0026742843250471037,
"grad_norm": 2.169708728790283,
"learning_rate": 0.00019803799658748094,
"loss": 8.9,
"step": 22
},
{
"epoch": 0.002795842703458336,
"grad_norm": 4.217128276824951,
"learning_rate": 0.00019769868307835994,
"loss": 8.5476,
"step": 23
},
{
"epoch": 0.002917401081869568,
"grad_norm": 1.9587572813034058,
"learning_rate": 0.0001973326597248006,
"loss": 9.5256,
"step": 24
},
{
"epoch": 0.0030389594602808,
"grad_norm": 4.422936916351318,
"learning_rate": 0.00019694002659393305,
"loss": 8.403,
"step": 25
},
{
"epoch": 0.003160517838692032,
"grad_norm": 1.6125566959381104,
"learning_rate": 0.00019652089102773488,
"loss": 9.2696,
"step": 26
},
{
"epoch": 0.003282076217103264,
"grad_norm": 1.9092199802398682,
"learning_rate": 0.00019607536761368484,
"loss": 9.0772,
"step": 27
},
{
"epoch": 0.003403634595514496,
"grad_norm": 3.2192418575286865,
"learning_rate": 0.00019560357815343577,
"loss": 9.7968,
"step": 28
},
{
"epoch": 0.0035251929739257277,
"grad_norm": 2.5830280780792236,
"learning_rate": 0.00019510565162951537,
"loss": 9.7017,
"step": 29
},
{
"epoch": 0.0036467513523369597,
"grad_norm": 2.7067151069641113,
"learning_rate": 0.00019458172417006347,
"loss": 10.3412,
"step": 30
},
{
"epoch": 0.0037683097307481916,
"grad_norm": 3.9291067123413086,
"learning_rate": 0.00019403193901161613,
"loss": 9.8539,
"step": 31
},
{
"epoch": 0.003889868109159424,
"grad_norm": 1.7529088258743286,
"learning_rate": 0.0001934564464599461,
"loss": 8.7478,
"step": 32
},
{
"epoch": 0.004011426487570656,
"grad_norm": 1.7117114067077637,
"learning_rate": 0.00019285540384897073,
"loss": 8.765,
"step": 33
},
{
"epoch": 0.004132984865981888,
"grad_norm": 1.856972575187683,
"learning_rate": 0.00019222897549773848,
"loss": 9.777,
"step": 34
},
{
"epoch": 0.00425454324439312,
"grad_norm": 2.3596813678741455,
"learning_rate": 0.00019157733266550575,
"loss": 9.5862,
"step": 35
},
{
"epoch": 0.004376101622804352,
"grad_norm": 2.5083954334259033,
"learning_rate": 0.00019090065350491626,
"loss": 9.3247,
"step": 36
},
{
"epoch": 0.004497660001215584,
"grad_norm": 1.3706068992614746,
"learning_rate": 0.00019019912301329592,
"loss": 9.3214,
"step": 37
},
{
"epoch": 0.004619218379626816,
"grad_norm": 1.7020275592803955,
"learning_rate": 0.00018947293298207635,
"loss": 8.5611,
"step": 38
},
{
"epoch": 0.004740776758038048,
"grad_norm": 1.8647181987762451,
"learning_rate": 0.0001887222819443612,
"loss": 9.9951,
"step": 39
},
{
"epoch": 0.00486233513644928,
"grad_norm": 2.506580352783203,
"learning_rate": 0.0001879473751206489,
"loss": 9.6237,
"step": 40
},
{
"epoch": 0.0049838935148605115,
"grad_norm": 1.9629899263381958,
"learning_rate": 0.00018714842436272773,
"loss": 9.371,
"step": 41
},
{
"epoch": 0.0051054518932717435,
"grad_norm": 2.3143656253814697,
"learning_rate": 0.00018632564809575742,
"loss": 9.235,
"step": 42
},
{
"epoch": 0.005227010271682975,
"grad_norm": 2.5331368446350098,
"learning_rate": 0.0001854792712585539,
"loss": 10.1562,
"step": 43
},
{
"epoch": 0.005348568650094207,
"grad_norm": 4.555860996246338,
"learning_rate": 0.00018460952524209355,
"loss": 8.2419,
"step": 44
},
{
"epoch": 0.005470127028505439,
"grad_norm": 1.6661875247955322,
"learning_rate": 0.00018371664782625287,
"loss": 8.2566,
"step": 45
},
{
"epoch": 0.005591685406916672,
"grad_norm": 1.7546186447143555,
"learning_rate": 0.00018280088311480201,
"loss": 9.1446,
"step": 46
},
{
"epoch": 0.005713243785327904,
"grad_norm": 1.751132845878601,
"learning_rate": 0.00018186248146866927,
"loss": 8.6259,
"step": 47
},
{
"epoch": 0.005834802163739136,
"grad_norm": 2.203023672103882,
"learning_rate": 0.00018090169943749476,
"loss": 9.3622,
"step": 48
},
{
"epoch": 0.005956360542150368,
"grad_norm": 2.7363836765289307,
"learning_rate": 0.0001799187996894925,
"loss": 8.0726,
"step": 49
},
{
"epoch": 0.0060779189205616,
"grad_norm": 2.010042905807495,
"learning_rate": 0.00017891405093963938,
"loss": 9.8848,
"step": 50
},
{
"epoch": 0.0060779189205616,
"eval_loss": 2.22426700592041,
"eval_runtime": 130.4637,
"eval_samples_per_second": 26.551,
"eval_steps_per_second": 13.276,
"step": 50
},
{
"epoch": 0.006199477298972832,
"grad_norm": 2.2631587982177734,
"learning_rate": 0.00017788772787621126,
"loss": 9.446,
"step": 51
},
{
"epoch": 0.006321035677384064,
"grad_norm": 4.675655364990234,
"learning_rate": 0.00017684011108568592,
"loss": 9.3449,
"step": 52
},
{
"epoch": 0.006442594055795296,
"grad_norm": 1.7152106761932373,
"learning_rate": 0.0001757714869760335,
"loss": 9.475,
"step": 53
},
{
"epoch": 0.006564152434206528,
"grad_norm": 1.5290554761886597,
"learning_rate": 0.0001746821476984154,
"loss": 9.117,
"step": 54
},
{
"epoch": 0.00668571081261776,
"grad_norm": 2.0921902656555176,
"learning_rate": 0.00017357239106731317,
"loss": 9.5546,
"step": 55
},
{
"epoch": 0.006807269191028992,
"grad_norm": 2.1920764446258545,
"learning_rate": 0.00017244252047910892,
"loss": 8.953,
"step": 56
},
{
"epoch": 0.0069288275694402235,
"grad_norm": 1.7566354274749756,
"learning_rate": 0.00017129284482913972,
"loss": 9.6787,
"step": 57
},
{
"epoch": 0.0070503859478514555,
"grad_norm": 1.4819419384002686,
"learning_rate": 0.00017012367842724887,
"loss": 7.3843,
"step": 58
},
{
"epoch": 0.007171944326262687,
"grad_norm": 1.6789450645446777,
"learning_rate": 0.0001689353409118566,
"loss": 9.802,
"step": 59
},
{
"epoch": 0.007293502704673919,
"grad_norm": 1.282571792602539,
"learning_rate": 0.00016772815716257412,
"loss": 7.7288,
"step": 60
},
{
"epoch": 0.007415061083085151,
"grad_norm": 2.066798448562622,
"learning_rate": 0.0001665024572113848,
"loss": 9.3806,
"step": 61
},
{
"epoch": 0.007536619461496383,
"grad_norm": 1.7693638801574707,
"learning_rate": 0.00016525857615241687,
"loss": 9.6393,
"step": 62
},
{
"epoch": 0.007658177839907615,
"grad_norm": 1.7502071857452393,
"learning_rate": 0.00016399685405033167,
"loss": 8.3708,
"step": 63
},
{
"epoch": 0.007779736218318848,
"grad_norm": 2.8502800464630127,
"learning_rate": 0.0001627176358473537,
"loss": 8.8738,
"step": 64
},
{
"epoch": 0.00790129459673008,
"grad_norm": 2.4103214740753174,
"learning_rate": 0.0001614212712689668,
"loss": 9.5765,
"step": 65
},
{
"epoch": 0.008022852975141312,
"grad_norm": 1.8917535543441772,
"learning_rate": 0.00016010811472830252,
"loss": 9.4486,
"step": 66
},
{
"epoch": 0.008144411353552544,
"grad_norm": 1.5890737771987915,
"learning_rate": 0.00015877852522924732,
"loss": 8.5677,
"step": 67
},
{
"epoch": 0.008265969731963776,
"grad_norm": 2.1370444297790527,
"learning_rate": 0.00015743286626829437,
"loss": 9.4762,
"step": 68
},
{
"epoch": 0.008387528110375008,
"grad_norm": 1.7275190353393555,
"learning_rate": 0.0001560715057351673,
"loss": 8.829,
"step": 69
},
{
"epoch": 0.00850908648878624,
"grad_norm": 1.7048512697219849,
"learning_rate": 0.00015469481581224272,
"loss": 8.8259,
"step": 70
},
{
"epoch": 0.008630644867197472,
"grad_norm": 1.903066635131836,
"learning_rate": 0.0001533031728727994,
"loss": 9.6929,
"step": 71
},
{
"epoch": 0.008752203245608704,
"grad_norm": 1.5013515949249268,
"learning_rate": 0.00015189695737812152,
"loss": 8.1273,
"step": 72
},
{
"epoch": 0.008873761624019936,
"grad_norm": 1.6483365297317505,
"learning_rate": 0.0001504765537734844,
"loss": 8.8742,
"step": 73
},
{
"epoch": 0.008995320002431167,
"grad_norm": 1.8312402963638306,
"learning_rate": 0.00014904235038305083,
"loss": 8.7848,
"step": 74
},
{
"epoch": 0.0091168783808424,
"grad_norm": 1.3816986083984375,
"learning_rate": 0.00014759473930370736,
"loss": 8.2036,
"step": 75
},
{
"epoch": 0.009238436759253631,
"grad_norm": 2.2364020347595215,
"learning_rate": 0.0001461341162978688,
"loss": 10.4628,
"step": 76
},
{
"epoch": 0.009359995137664863,
"grad_norm": 1.573431134223938,
"learning_rate": 0.00014466088068528068,
"loss": 8.8974,
"step": 77
},
{
"epoch": 0.009481553516076095,
"grad_norm": 2.187255859375,
"learning_rate": 0.00014317543523384928,
"loss": 8.5312,
"step": 78
},
{
"epoch": 0.009603111894487327,
"grad_norm": 2.0174520015716553,
"learning_rate": 0.00014167818604952906,
"loss": 8.829,
"step": 79
},
{
"epoch": 0.00972467027289856,
"grad_norm": 1.8616173267364502,
"learning_rate": 0.00014016954246529696,
"loss": 8.3692,
"step": 80
},
{
"epoch": 0.009846228651309791,
"grad_norm": 1.8460533618927002,
"learning_rate": 0.00013864991692924523,
"loss": 8.6283,
"step": 81
},
{
"epoch": 0.009967787029721023,
"grad_norm": 2.5459718704223633,
"learning_rate": 0.00013711972489182208,
"loss": 8.4909,
"step": 82
},
{
"epoch": 0.010089345408132255,
"grad_norm": 1.9989314079284668,
"learning_rate": 0.00013557938469225167,
"loss": 8.9659,
"step": 83
},
{
"epoch": 0.010210903786543487,
"grad_norm": 1.3886724710464478,
"learning_rate": 0.00013402931744416433,
"loss": 9.1106,
"step": 84
},
{
"epoch": 0.010332462164954719,
"grad_norm": 2.354243040084839,
"learning_rate": 0.00013246994692046836,
"loss": 9.2569,
"step": 85
},
{
"epoch": 0.01045402054336595,
"grad_norm": 2.085102081298828,
"learning_rate": 0.00013090169943749476,
"loss": 8.9907,
"step": 86
},
{
"epoch": 0.010575578921777183,
"grad_norm": 1.2773852348327637,
"learning_rate": 0.0001293250037384465,
"loss": 8.7438,
"step": 87
},
{
"epoch": 0.010697137300188415,
"grad_norm": 1.6545124053955078,
"learning_rate": 0.00012774029087618446,
"loss": 8.7085,
"step": 88
},
{
"epoch": 0.010818695678599647,
"grad_norm": 2.1138100624084473,
"learning_rate": 0.00012614799409538198,
"loss": 8.8577,
"step": 89
},
{
"epoch": 0.010940254057010879,
"grad_norm": 1.8220653533935547,
"learning_rate": 0.00012454854871407994,
"loss": 9.198,
"step": 90
},
{
"epoch": 0.011061812435422112,
"grad_norm": 1.6138783693313599,
"learning_rate": 0.00012294239200467516,
"loss": 8.9333,
"step": 91
},
{
"epoch": 0.011183370813833344,
"grad_norm": 1.6465097665786743,
"learning_rate": 0.0001213299630743747,
"loss": 9.123,
"step": 92
},
{
"epoch": 0.011304929192244576,
"grad_norm": 1.7768160104751587,
"learning_rate": 0.00011971170274514802,
"loss": 8.6597,
"step": 93
},
{
"epoch": 0.011426487570655808,
"grad_norm": 1.6465460062026978,
"learning_rate": 0.000118088053433211,
"loss": 9.841,
"step": 94
},
{
"epoch": 0.01154804594906704,
"grad_norm": 1.3596290349960327,
"learning_rate": 0.00011645945902807341,
"loss": 8.5273,
"step": 95
},
{
"epoch": 0.011669604327478272,
"grad_norm": 1.612764596939087,
"learning_rate": 0.0001148263647711842,
"loss": 8.4275,
"step": 96
},
{
"epoch": 0.011791162705889504,
"grad_norm": 2.3744266033172607,
"learning_rate": 0.00011318921713420691,
"loss": 9.6372,
"step": 97
},
{
"epoch": 0.011912721084300736,
"grad_norm": 1.4123613834381104,
"learning_rate": 0.00011154846369695863,
"loss": 8.4314,
"step": 98
},
{
"epoch": 0.012034279462711968,
"grad_norm": 1.9357587099075317,
"learning_rate": 0.0001099045530250463,
"loss": 8.7244,
"step": 99
},
{
"epoch": 0.0121558378411232,
"grad_norm": 1.6507443189620972,
"learning_rate": 0.00010825793454723325,
"loss": 10.0314,
"step": 100
},
{
"epoch": 0.0121558378411232,
"eval_loss": 2.20096755027771,
"eval_runtime": 130.4185,
"eval_samples_per_second": 26.561,
"eval_steps_per_second": 13.28,
"step": 100
},
{
"epoch": 0.012277396219534432,
"grad_norm": 2.1093552112579346,
"learning_rate": 0.00010660905843256994,
"loss": 10.0093,
"step": 101
},
{
"epoch": 0.012398954597945664,
"grad_norm": 2.1423592567443848,
"learning_rate": 0.00010495837546732224,
"loss": 9.6464,
"step": 102
},
{
"epoch": 0.012520512976356896,
"grad_norm": 1.413726568222046,
"learning_rate": 0.00010330633693173082,
"loss": 9.0782,
"step": 103
},
{
"epoch": 0.012642071354768128,
"grad_norm": 2.7693593502044678,
"learning_rate": 0.00010165339447663587,
"loss": 9.1446,
"step": 104
},
{
"epoch": 0.01276362973317936,
"grad_norm": 1.9296963214874268,
"learning_rate": 0.0001,
"loss": 8.2252,
"step": 105
},
{
"epoch": 0.012885188111590591,
"grad_norm": 2.1879448890686035,
"learning_rate": 9.834660552336415e-05,
"loss": 10.2142,
"step": 106
},
{
"epoch": 0.013006746490001823,
"grad_norm": 1.3834993839263916,
"learning_rate": 9.669366306826919e-05,
"loss": 9.5412,
"step": 107
},
{
"epoch": 0.013128304868413055,
"grad_norm": 1.5268234014511108,
"learning_rate": 9.504162453267777e-05,
"loss": 8.0744,
"step": 108
},
{
"epoch": 0.013249863246824287,
"grad_norm": 1.9845620393753052,
"learning_rate": 9.339094156743007e-05,
"loss": 8.4186,
"step": 109
},
{
"epoch": 0.01337142162523552,
"grad_norm": 1.6854311227798462,
"learning_rate": 9.174206545276677e-05,
"loss": 8.4435,
"step": 110
},
{
"epoch": 0.013492980003646751,
"grad_norm": 1.5681287050247192,
"learning_rate": 9.009544697495374e-05,
"loss": 8.6079,
"step": 111
},
{
"epoch": 0.013614538382057983,
"grad_norm": 1.6393003463745117,
"learning_rate": 8.845153630304139e-05,
"loss": 9.3246,
"step": 112
},
{
"epoch": 0.013736096760469215,
"grad_norm": 1.6968841552734375,
"learning_rate": 8.681078286579311e-05,
"loss": 7.5143,
"step": 113
},
{
"epoch": 0.013857655138880447,
"grad_norm": 1.4313429594039917,
"learning_rate": 8.517363522881579e-05,
"loss": 9.078,
"step": 114
},
{
"epoch": 0.013979213517291679,
"grad_norm": 1.592039942741394,
"learning_rate": 8.35405409719266e-05,
"loss": 8.1703,
"step": 115
},
{
"epoch": 0.014100771895702911,
"grad_norm": 1.4118988513946533,
"learning_rate": 8.191194656678904e-05,
"loss": 8.941,
"step": 116
},
{
"epoch": 0.014222330274114143,
"grad_norm": 1.3782672882080078,
"learning_rate": 8.028829725485199e-05,
"loss": 8.4541,
"step": 117
},
{
"epoch": 0.014343888652525375,
"grad_norm": 2.4915947914123535,
"learning_rate": 7.867003692562534e-05,
"loss": 7.8308,
"step": 118
},
{
"epoch": 0.014465447030936607,
"grad_norm": 1.6528788805007935,
"learning_rate": 7.705760799532485e-05,
"loss": 8.732,
"step": 119
},
{
"epoch": 0.014587005409347839,
"grad_norm": 2.2975354194641113,
"learning_rate": 7.54514512859201e-05,
"loss": 9.3207,
"step": 120
},
{
"epoch": 0.01470856378775907,
"grad_norm": 2.4572110176086426,
"learning_rate": 7.385200590461803e-05,
"loss": 9.4007,
"step": 121
},
{
"epoch": 0.014830122166170303,
"grad_norm": 1.338627576828003,
"learning_rate": 7.225970912381556e-05,
"loss": 8.8468,
"step": 122
},
{
"epoch": 0.014951680544581535,
"grad_norm": 1.862164855003357,
"learning_rate": 7.067499626155354e-05,
"loss": 8.9436,
"step": 123
},
{
"epoch": 0.015073238922992767,
"grad_norm": 2.2516064643859863,
"learning_rate": 6.909830056250527e-05,
"loss": 8.7996,
"step": 124
},
{
"epoch": 0.015194797301403998,
"grad_norm": 1.4722728729248047,
"learning_rate": 6.753005307953167e-05,
"loss": 7.7244,
"step": 125
},
{
"epoch": 0.01531635567981523,
"grad_norm": 2.090157985687256,
"learning_rate": 6.59706825558357e-05,
"loss": 8.5753,
"step": 126
},
{
"epoch": 0.015437914058226464,
"grad_norm": 1.8846075534820557,
"learning_rate": 6.442061530774834e-05,
"loss": 8.9389,
"step": 127
},
{
"epoch": 0.015559472436637696,
"grad_norm": 1.606086254119873,
"learning_rate": 6.28802751081779e-05,
"loss": 7.7107,
"step": 128
},
{
"epoch": 0.015681030815048926,
"grad_norm": 1.6152966022491455,
"learning_rate": 6.135008307075481e-05,
"loss": 9.4444,
"step": 129
},
{
"epoch": 0.01580258919346016,
"grad_norm": 2.8925600051879883,
"learning_rate": 5.983045753470308e-05,
"loss": 8.2291,
"step": 130
},
{
"epoch": 0.01592414757187139,
"grad_norm": 1.3763501644134521,
"learning_rate": 5.832181395047098e-05,
"loss": 8.9548,
"step": 131
},
{
"epoch": 0.016045705950282624,
"grad_norm": 1.7891381978988647,
"learning_rate": 5.6824564766150726e-05,
"loss": 8.7198,
"step": 132
},
{
"epoch": 0.016167264328693854,
"grad_norm": 1.6122995615005493,
"learning_rate": 5.533911931471936e-05,
"loss": 9.8015,
"step": 133
},
{
"epoch": 0.016288822707105088,
"grad_norm": 2.285371780395508,
"learning_rate": 5.386588370213124e-05,
"loss": 8.8946,
"step": 134
},
{
"epoch": 0.016410381085516318,
"grad_norm": 2.1016831398010254,
"learning_rate": 5.240526069629265e-05,
"loss": 8.5699,
"step": 135
},
{
"epoch": 0.01653193946392755,
"grad_norm": 1.6653213500976562,
"learning_rate": 5.095764961694922e-05,
"loss": 8.6184,
"step": 136
},
{
"epoch": 0.016653497842338782,
"grad_norm": 1.4281337261199951,
"learning_rate": 4.952344622651566e-05,
"loss": 8.0501,
"step": 137
},
{
"epoch": 0.016775056220750015,
"grad_norm": 1.9814000129699707,
"learning_rate": 4.810304262187852e-05,
"loss": 7.7907,
"step": 138
},
{
"epoch": 0.016896614599161246,
"grad_norm": 1.6379835605621338,
"learning_rate": 4.669682712720065e-05,
"loss": 8.7783,
"step": 139
},
{
"epoch": 0.01701817297757248,
"grad_norm": 2.8867030143737793,
"learning_rate": 4.530518418775733e-05,
"loss": 8.2023,
"step": 140
},
{
"epoch": 0.01713973135598371,
"grad_norm": 1.8214524984359741,
"learning_rate": 4.392849426483274e-05,
"loss": 9.0595,
"step": 141
},
{
"epoch": 0.017261289734394943,
"grad_norm": 1.807656168937683,
"learning_rate": 4.256713373170564e-05,
"loss": 8.4609,
"step": 142
},
{
"epoch": 0.017382848112806173,
"grad_norm": 1.7904338836669922,
"learning_rate": 4.12214747707527e-05,
"loss": 8.0106,
"step": 143
},
{
"epoch": 0.017504406491217407,
"grad_norm": 2.073862314224243,
"learning_rate": 3.9891885271697496e-05,
"loss": 8.6199,
"step": 144
},
{
"epoch": 0.01762596486962864,
"grad_norm": 1.665391445159912,
"learning_rate": 3.857872873103322e-05,
"loss": 9.1573,
"step": 145
},
{
"epoch": 0.01774752324803987,
"grad_norm": 2.4493348598480225,
"learning_rate": 3.7282364152646297e-05,
"loss": 7.3018,
"step": 146
},
{
"epoch": 0.017869081626451105,
"grad_norm": 1.4470022916793823,
"learning_rate": 3.600314594966834e-05,
"loss": 8.5612,
"step": 147
},
{
"epoch": 0.017990640004862335,
"grad_norm": 1.7825874090194702,
"learning_rate": 3.4741423847583134e-05,
"loss": 9.3814,
"step": 148
},
{
"epoch": 0.01811219838327357,
"grad_norm": 1.5861843824386597,
"learning_rate": 3.349754278861517e-05,
"loss": 9.1074,
"step": 149
},
{
"epoch": 0.0182337567616848,
"grad_norm": 1.8981313705444336,
"learning_rate": 3.227184283742591e-05,
"loss": 7.7665,
"step": 150
},
{
"epoch": 0.0182337567616848,
"eval_loss": 2.19260835647583,
"eval_runtime": 130.7594,
"eval_samples_per_second": 26.491,
"eval_steps_per_second": 13.246,
"step": 150
},
{
"epoch": 0.018355315140096033,
"grad_norm": 1.6255931854248047,
"learning_rate": 3.106465908814342e-05,
"loss": 9.076,
"step": 151
},
{
"epoch": 0.018476873518507263,
"grad_norm": 1.4169337749481201,
"learning_rate": 2.9876321572751144e-05,
"loss": 8.5266,
"step": 152
},
{
"epoch": 0.018598431896918496,
"grad_norm": 1.3011150360107422,
"learning_rate": 2.87071551708603e-05,
"loss": 8.4848,
"step": 153
},
{
"epoch": 0.018719990275329727,
"grad_norm": 2.2448577880859375,
"learning_rate": 2.7557479520891104e-05,
"loss": 8.4936,
"step": 154
},
{
"epoch": 0.01884154865374096,
"grad_norm": 2.336907386779785,
"learning_rate": 2.6427608932686843e-05,
"loss": 10.1081,
"step": 155
},
{
"epoch": 0.01896310703215219,
"grad_norm": 2.3736586570739746,
"learning_rate": 2.5317852301584643e-05,
"loss": 8.715,
"step": 156
},
{
"epoch": 0.019084665410563424,
"grad_norm": 2.2278640270233154,
"learning_rate": 2.422851302396655e-05,
"loss": 7.6252,
"step": 157
},
{
"epoch": 0.019206223788974654,
"grad_norm": 2.075263261795044,
"learning_rate": 2.315988891431412e-05,
"loss": 8.8813,
"step": 158
},
{
"epoch": 0.019327782167385888,
"grad_norm": 1.630843162536621,
"learning_rate": 2.2112272123788768e-05,
"loss": 9.5883,
"step": 159
},
{
"epoch": 0.01944934054579712,
"grad_norm": 1.8123506307601929,
"learning_rate": 2.1085949060360654e-05,
"loss": 8.3388,
"step": 160
},
{
"epoch": 0.019570898924208352,
"grad_norm": 1.678495168685913,
"learning_rate": 2.008120031050753e-05,
"loss": 7.75,
"step": 161
},
{
"epoch": 0.019692457302619582,
"grad_norm": 1.6239267587661743,
"learning_rate": 1.9098300562505266e-05,
"loss": 9.6735,
"step": 162
},
{
"epoch": 0.019814015681030816,
"grad_norm": 1.7330944538116455,
"learning_rate": 1.8137518531330767e-05,
"loss": 8.2531,
"step": 163
},
{
"epoch": 0.019935574059442046,
"grad_norm": 1.772774338722229,
"learning_rate": 1.7199116885197995e-05,
"loss": 9.5867,
"step": 164
},
{
"epoch": 0.02005713243785328,
"grad_norm": 2.354120969772339,
"learning_rate": 1.6283352173747145e-05,
"loss": 9.5565,
"step": 165
},
{
"epoch": 0.02017869081626451,
"grad_norm": 1.7209793329238892,
"learning_rate": 1.5390474757906446e-05,
"loss": 9.5981,
"step": 166
},
{
"epoch": 0.020300249194675744,
"grad_norm": 1.2247873544692993,
"learning_rate": 1.4520728741446089e-05,
"loss": 8.0355,
"step": 167
},
{
"epoch": 0.020421807573086974,
"grad_norm": 1.4264851808547974,
"learning_rate": 1.3674351904242611e-05,
"loss": 8.0481,
"step": 168
},
{
"epoch": 0.020543365951498208,
"grad_norm": 1.3829172849655151,
"learning_rate": 1.2851575637272262e-05,
"loss": 9.0446,
"step": 169
},
{
"epoch": 0.020664924329909438,
"grad_norm": 1.413722038269043,
"learning_rate": 1.2052624879351104e-05,
"loss": 7.2469,
"step": 170
},
{
"epoch": 0.02078648270832067,
"grad_norm": 1.6910505294799805,
"learning_rate": 1.1277718055638819e-05,
"loss": 8.2903,
"step": 171
},
{
"epoch": 0.0209080410867319,
"grad_norm": 1.9138679504394531,
"learning_rate": 1.0527067017923654e-05,
"loss": 9.0652,
"step": 172
},
{
"epoch": 0.021029599465143135,
"grad_norm": 1.5398927927017212,
"learning_rate": 9.80087698670411e-06,
"loss": 9.4462,
"step": 173
},
{
"epoch": 0.021151157843554366,
"grad_norm": 2.0731565952301025,
"learning_rate": 9.09934649508375e-06,
"loss": 9.2584,
"step": 174
},
{
"epoch": 0.0212727162219656,
"grad_norm": 1.8292239904403687,
"learning_rate": 8.422667334494249e-06,
"loss": 8.2206,
"step": 175
},
{
"epoch": 0.02139427460037683,
"grad_norm": 1.8700852394104004,
"learning_rate": 7.771024502261526e-06,
"loss": 9.4161,
"step": 176
},
{
"epoch": 0.021515832978788063,
"grad_norm": 2.166424512863159,
"learning_rate": 7.144596151029303e-06,
"loss": 8.0841,
"step": 177
},
{
"epoch": 0.021637391357199293,
"grad_norm": 1.5074312686920166,
"learning_rate": 6.543553540053926e-06,
"loss": 9.621,
"step": 178
},
{
"epoch": 0.021758949735610527,
"grad_norm": 1.4082826375961304,
"learning_rate": 5.968060988383883e-06,
"loss": 9.3076,
"step": 179
},
{
"epoch": 0.021880508114021757,
"grad_norm": 1.45763099193573,
"learning_rate": 5.418275829936537e-06,
"loss": 8.8478,
"step": 180
},
{
"epoch": 0.02200206649243299,
"grad_norm": 2.5203921794891357,
"learning_rate": 4.8943483704846475e-06,
"loss": 8.3001,
"step": 181
},
{
"epoch": 0.022123624870844225,
"grad_norm": 1.9692637920379639,
"learning_rate": 4.3964218465642355e-06,
"loss": 9.1614,
"step": 182
},
{
"epoch": 0.022245183249255455,
"grad_norm": 1.598758339881897,
"learning_rate": 3.924632386315186e-06,
"loss": 9.3152,
"step": 183
},
{
"epoch": 0.02236674162766669,
"grad_norm": 1.3611668348312378,
"learning_rate": 3.4791089722651436e-06,
"loss": 8.5055,
"step": 184
},
{
"epoch": 0.02248830000607792,
"grad_norm": 1.7095437049865723,
"learning_rate": 3.059973406066963e-06,
"loss": 8.6328,
"step": 185
},
{
"epoch": 0.022609858384489152,
"grad_norm": 1.3015220165252686,
"learning_rate": 2.667340275199426e-06,
"loss": 7.9608,
"step": 186
},
{
"epoch": 0.022731416762900383,
"grad_norm": 1.7117013931274414,
"learning_rate": 2.3013169216400733e-06,
"loss": 8.2766,
"step": 187
},
{
"epoch": 0.022852975141311616,
"grad_norm": 1.5060421228408813,
"learning_rate": 1.9620034125190644e-06,
"loss": 10.397,
"step": 188
},
{
"epoch": 0.022974533519722846,
"grad_norm": 1.8349546194076538,
"learning_rate": 1.6494925127617634e-06,
"loss": 9.6316,
"step": 189
},
{
"epoch": 0.02309609189813408,
"grad_norm": 2.0943706035614014,
"learning_rate": 1.3638696597277679e-06,
"loss": 7.7704,
"step": 190
},
{
"epoch": 0.02321765027654531,
"grad_norm": 1.7935388088226318,
"learning_rate": 1.1052129398531507e-06,
"loss": 10.2541,
"step": 191
},
{
"epoch": 0.023339208654956544,
"grad_norm": 1.8074511289596558,
"learning_rate": 8.735930673024806e-07,
"loss": 9.2515,
"step": 192
},
{
"epoch": 0.023460767033367774,
"grad_norm": 1.4618196487426758,
"learning_rate": 6.690733646361857e-07,
"loss": 8.3641,
"step": 193
},
{
"epoch": 0.023582325411779008,
"grad_norm": 2.332951307296753,
"learning_rate": 4.917097454988584e-07,
"loss": 9.805,
"step": 194
},
{
"epoch": 0.023703883790190238,
"grad_norm": 1.3441449403762817,
"learning_rate": 3.415506993330153e-07,
"loss": 8.7603,
"step": 195
},
{
"epoch": 0.023825442168601472,
"grad_norm": 1.8775608539581299,
"learning_rate": 2.1863727812254653e-07,
"loss": 9.3356,
"step": 196
},
{
"epoch": 0.023947000547012702,
"grad_norm": 1.3865495920181274,
"learning_rate": 1.230030851695263e-07,
"loss": 8.5184,
"step": 197
},
{
"epoch": 0.024068558925423936,
"grad_norm": 2.735950469970703,
"learning_rate": 5.467426590739511e-08,
"loss": 8.6827,
"step": 198
},
{
"epoch": 0.024190117303835166,
"grad_norm": 1.6667054891586304,
"learning_rate": 1.3669500753099585e-08,
"loss": 9.2634,
"step": 199
},
{
"epoch": 0.0243116756822464,
"grad_norm": 1.8722801208496094,
"learning_rate": 0.0,
"loss": 8.9515,
"step": 200
},
{
"epoch": 0.0243116756822464,
"eval_loss": 2.1910746097564697,
"eval_runtime": 130.64,
"eval_samples_per_second": 26.516,
"eval_steps_per_second": 13.258,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.762410749113139e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}