eddysang's picture
Training in progress, step 150, checkpoint
f489ec3 verified
raw
history blame
27.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.19962570180910794,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013308380120607194,
"grad_norm": 3.5958077907562256,
"learning_rate": 7.499999999999999e-06,
"loss": 53.6584,
"step": 1
},
{
"epoch": 0.0013308380120607194,
"eval_loss": 1.6596457958221436,
"eval_runtime": 576.7284,
"eval_samples_per_second": 4.389,
"eval_steps_per_second": 2.195,
"step": 1
},
{
"epoch": 0.0026616760241214388,
"grad_norm": 4.593629360198975,
"learning_rate": 1.4999999999999999e-05,
"loss": 53.83,
"step": 2
},
{
"epoch": 0.003992514036182159,
"grad_norm": 3.8698318004608154,
"learning_rate": 2.2499999999999998e-05,
"loss": 53.7949,
"step": 3
},
{
"epoch": 0.0053233520482428775,
"grad_norm": 3.574679136276245,
"learning_rate": 2.9999999999999997e-05,
"loss": 51.8953,
"step": 4
},
{
"epoch": 0.006654190060303597,
"grad_norm": 3.828587532043457,
"learning_rate": 3.75e-05,
"loss": 54.6933,
"step": 5
},
{
"epoch": 0.007985028072364318,
"grad_norm": 4.688767433166504,
"learning_rate": 4.4999999999999996e-05,
"loss": 53.4681,
"step": 6
},
{
"epoch": 0.009315866084425037,
"grad_norm": 3.8683738708496094,
"learning_rate": 5.2499999999999995e-05,
"loss": 52.2187,
"step": 7
},
{
"epoch": 0.010646704096485755,
"grad_norm": 4.84136962890625,
"learning_rate": 5.9999999999999995e-05,
"loss": 51.3524,
"step": 8
},
{
"epoch": 0.011977542108546475,
"grad_norm": 4.16347599029541,
"learning_rate": 6.75e-05,
"loss": 52.5617,
"step": 9
},
{
"epoch": 0.013308380120607194,
"grad_norm": 4.299065589904785,
"learning_rate": 7.5e-05,
"loss": 51.664,
"step": 10
},
{
"epoch": 0.014639218132667914,
"grad_norm": 4.201577663421631,
"learning_rate": 8.25e-05,
"loss": 48.1549,
"step": 11
},
{
"epoch": 0.015970056144728635,
"grad_norm": 4.490873336791992,
"learning_rate": 8.999999999999999e-05,
"loss": 51.543,
"step": 12
},
{
"epoch": 0.017300894156789353,
"grad_norm": 5.399848461151123,
"learning_rate": 9.75e-05,
"loss": 50.0357,
"step": 13
},
{
"epoch": 0.018631732168850074,
"grad_norm": 5.014857769012451,
"learning_rate": 0.00010499999999999999,
"loss": 52.9329,
"step": 14
},
{
"epoch": 0.019962570180910792,
"grad_norm": 4.966302871704102,
"learning_rate": 0.0001125,
"loss": 49.8081,
"step": 15
},
{
"epoch": 0.02129340819297151,
"grad_norm": 5.2359299659729,
"learning_rate": 0.00011999999999999999,
"loss": 50.2341,
"step": 16
},
{
"epoch": 0.02262424620503223,
"grad_norm": 5.209284782409668,
"learning_rate": 0.00012749999999999998,
"loss": 47.965,
"step": 17
},
{
"epoch": 0.02395508421709295,
"grad_norm": 5.249284267425537,
"learning_rate": 0.000135,
"loss": 51.0081,
"step": 18
},
{
"epoch": 0.02528592222915367,
"grad_norm": 6.256328582763672,
"learning_rate": 0.0001425,
"loss": 51.1662,
"step": 19
},
{
"epoch": 0.02661676024121439,
"grad_norm": 5.501735687255859,
"learning_rate": 0.00015,
"loss": 47.9934,
"step": 20
},
{
"epoch": 0.02794759825327511,
"grad_norm": 5.087883472442627,
"learning_rate": 0.00014997810105601446,
"loss": 49.7915,
"step": 21
},
{
"epoch": 0.029278436265335828,
"grad_norm": 7.2340874671936035,
"learning_rate": 0.0001499124170124245,
"loss": 49.9978,
"step": 22
},
{
"epoch": 0.03060927427739655,
"grad_norm": 5.028345584869385,
"learning_rate": 0.00014980298622686183,
"loss": 50.2118,
"step": 23
},
{
"epoch": 0.03194011228945727,
"grad_norm": 4.296029090881348,
"learning_rate": 0.00014964987260382363,
"loss": 46.4395,
"step": 24
},
{
"epoch": 0.033270950301517985,
"grad_norm": 4.090815544128418,
"learning_rate": 0.00014945316555735403,
"loss": 50.296,
"step": 25
},
{
"epoch": 0.034601788313578706,
"grad_norm": 4.5317063331604,
"learning_rate": 0.0001492129799588288,
"loss": 49.3161,
"step": 26
},
{
"epoch": 0.03593262632563943,
"grad_norm": 4.290125846862793,
"learning_rate": 0.0001489294560698738,
"loss": 48.7192,
"step": 27
},
{
"epoch": 0.03726346433770015,
"grad_norm": 4.7845635414123535,
"learning_rate": 0.000148602759460456,
"loss": 47.9046,
"step": 28
},
{
"epoch": 0.03859430234976086,
"grad_norm": 9.48447036743164,
"learning_rate": 0.0001482330809121954,
"loss": 47.9862,
"step": 29
},
{
"epoch": 0.039925140361821584,
"grad_norm": 5.269348621368408,
"learning_rate": 0.00014782063630695388,
"loss": 47.6904,
"step": 30
},
{
"epoch": 0.041255978373882306,
"grad_norm": 4.928490161895752,
"learning_rate": 0.00014736566650076636,
"loss": 48.8313,
"step": 31
},
{
"epoch": 0.04258681638594302,
"grad_norm": 5.5268449783325195,
"learning_rate": 0.00014686843718318744,
"loss": 47.7213,
"step": 32
},
{
"epoch": 0.04391765439800374,
"grad_norm": 4.2176079750061035,
"learning_rate": 0.00014632923872213652,
"loss": 47.0136,
"step": 33
},
{
"epoch": 0.04524849241006446,
"grad_norm": 4.410348892211914,
"learning_rate": 0.0001457483859943307,
"loss": 45.6157,
"step": 34
},
{
"epoch": 0.046579330422125184,
"grad_norm": 4.219701766967773,
"learning_rate": 0.00014512621820140611,
"loss": 47.701,
"step": 35
},
{
"epoch": 0.0479101684341859,
"grad_norm": 4.424015522003174,
"learning_rate": 0.0001444630986718332,
"loss": 45.6344,
"step": 36
},
{
"epoch": 0.04924100644624662,
"grad_norm": 4.436514854431152,
"learning_rate": 0.00014375941464874368,
"loss": 46.8412,
"step": 37
},
{
"epoch": 0.05057184445830734,
"grad_norm": 4.463353157043457,
"learning_rate": 0.0001430155770637909,
"loss": 45.4691,
"step": 38
},
{
"epoch": 0.05190268247036806,
"grad_norm": 5.982223033905029,
"learning_rate": 0.00014223202029717776,
"loss": 46.5923,
"step": 39
},
{
"epoch": 0.05323352048242878,
"grad_norm": 4.8244147300720215,
"learning_rate": 0.0001414092019239907,
"loss": 46.1269,
"step": 40
},
{
"epoch": 0.0545643584944895,
"grad_norm": 4.208717346191406,
"learning_rate": 0.00014054760244698884,
"loss": 49.8436,
"step": 41
},
{
"epoch": 0.05589519650655022,
"grad_norm": 3.8489153385162354,
"learning_rate": 0.00013964772501600368,
"loss": 45.9188,
"step": 42
},
{
"epoch": 0.05722603451861094,
"grad_norm": 3.9226901531219482,
"learning_rate": 0.00013871009513411408,
"loss": 45.8486,
"step": 43
},
{
"epoch": 0.058556872530671655,
"grad_norm": 4.560520172119141,
"learning_rate": 0.00013773526035076698,
"loss": 46.0863,
"step": 44
},
{
"epoch": 0.05988771054273238,
"grad_norm": 5.09638786315918,
"learning_rate": 0.00013672378994202423,
"loss": 45.7102,
"step": 45
},
{
"epoch": 0.0612185485547931,
"grad_norm": 4.719268321990967,
"learning_rate": 0.00013567627457812106,
"loss": 45.6349,
"step": 46
},
{
"epoch": 0.06254938656685381,
"grad_norm": 4.546507835388184,
"learning_rate": 0.00013459332597853157,
"loss": 47.0023,
"step": 47
},
{
"epoch": 0.06388022457891454,
"grad_norm": 4.938004970550537,
"learning_rate": 0.00013347557655474167,
"loss": 42.3119,
"step": 48
},
{
"epoch": 0.06521106259097526,
"grad_norm": 4.659527778625488,
"learning_rate": 0.0001323236790409386,
"loss": 46.6489,
"step": 49
},
{
"epoch": 0.06654190060303597,
"grad_norm": 4.204468727111816,
"learning_rate": 0.00013113830611283258,
"loss": 45.922,
"step": 50
},
{
"epoch": 0.06654190060303597,
"eval_loss": 1.4664535522460938,
"eval_runtime": 577.3931,
"eval_samples_per_second": 4.383,
"eval_steps_per_second": 2.193,
"step": 50
},
{
"epoch": 0.0678727386150967,
"grad_norm": 4.2126569747924805,
"learning_rate": 0.00012992014999483302,
"loss": 47.7475,
"step": 51
},
{
"epoch": 0.06920357662715741,
"grad_norm": 4.303404331207275,
"learning_rate": 0.00012866992205580908,
"loss": 45.0831,
"step": 52
},
{
"epoch": 0.07053441463921813,
"grad_norm": 4.410624027252197,
"learning_rate": 0.00012738835239367027,
"loss": 47.5925,
"step": 53
},
{
"epoch": 0.07186525265127885,
"grad_norm": 4.882404804229736,
"learning_rate": 0.00012607618940900977,
"loss": 46.8029,
"step": 54
},
{
"epoch": 0.07319609066333957,
"grad_norm": 4.176435947418213,
"learning_rate": 0.00012473419936805962,
"loss": 46.6091,
"step": 55
},
{
"epoch": 0.0745269286754003,
"grad_norm": 5.707892894744873,
"learning_rate": 0.0001233631659552128,
"loss": 46.4886,
"step": 56
},
{
"epoch": 0.07585776668746101,
"grad_norm": 5.236654758453369,
"learning_rate": 0.00012196388981537352,
"loss": 45.3293,
"step": 57
},
{
"epoch": 0.07718860469952173,
"grad_norm": 4.779541492462158,
"learning_rate": 0.00012053718808640333,
"loss": 46.4131,
"step": 58
},
{
"epoch": 0.07851944271158245,
"grad_norm": 4.2664666175842285,
"learning_rate": 0.00011908389392193547,
"loss": 44.697,
"step": 59
},
{
"epoch": 0.07985028072364317,
"grad_norm": 4.622812747955322,
"learning_rate": 0.00011760485600483667,
"loss": 42.2174,
"step": 60
},
{
"epoch": 0.08118111873570388,
"grad_norm": 6.746915817260742,
"learning_rate": 0.00011610093805160025,
"loss": 45.9152,
"step": 61
},
{
"epoch": 0.08251195674776461,
"grad_norm": 8.02103328704834,
"learning_rate": 0.00011457301830795994,
"loss": 46.4736,
"step": 62
},
{
"epoch": 0.08384279475982533,
"grad_norm": 4.841928958892822,
"learning_rate": 0.00011302198903601928,
"loss": 46.3832,
"step": 63
},
{
"epoch": 0.08517363277188604,
"grad_norm": 4.746523857116699,
"learning_rate": 0.00011144875599319543,
"loss": 48.0093,
"step": 64
},
{
"epoch": 0.08650447078394677,
"grad_norm": 4.790536403656006,
"learning_rate": 0.00010985423790328263,
"loss": 45.4722,
"step": 65
},
{
"epoch": 0.08783530879600748,
"grad_norm": 5.480260372161865,
"learning_rate": 0.0001082393659199431,
"loss": 48.5512,
"step": 66
},
{
"epoch": 0.08916614680806821,
"grad_norm": 4.939945220947266,
"learning_rate": 0.00010660508308293968,
"loss": 47.4139,
"step": 67
},
{
"epoch": 0.09049698482012893,
"grad_norm": 4.303680896759033,
"learning_rate": 0.00010495234376742714,
"loss": 46.2616,
"step": 68
},
{
"epoch": 0.09182782283218964,
"grad_norm": 4.947854518890381,
"learning_rate": 0.00010328211312662403,
"loss": 47.2555,
"step": 69
},
{
"epoch": 0.09315866084425037,
"grad_norm": 4.968333721160889,
"learning_rate": 0.00010159536652819016,
"loss": 47.186,
"step": 70
},
{
"epoch": 0.09448949885631108,
"grad_norm": 4.353626728057861,
"learning_rate": 9.989308898463963e-05,
"loss": 46.0646,
"step": 71
},
{
"epoch": 0.0958203368683718,
"grad_norm": 4.719557762145996,
"learning_rate": 9.817627457812105e-05,
"loss": 44.8426,
"step": 72
},
{
"epoch": 0.09715117488043253,
"grad_norm": 4.732901096343994,
"learning_rate": 9.644592587990168e-05,
"loss": 44.4109,
"step": 73
},
{
"epoch": 0.09848201289249324,
"grad_norm": 4.423262119293213,
"learning_rate": 9.470305336489401e-05,
"loss": 46.6247,
"step": 74
},
{
"epoch": 0.09981285090455397,
"grad_norm": 4.192385673522949,
"learning_rate": 9.294867482156682e-05,
"loss": 47.4476,
"step": 75
},
{
"epoch": 0.10114368891661468,
"grad_norm": 4.283864974975586,
"learning_rate": 9.118381475758547e-05,
"loss": 46.0139,
"step": 76
},
{
"epoch": 0.1024745269286754,
"grad_norm": 5.160930633544922,
"learning_rate": 8.940950380152812e-05,
"loss": 46.4454,
"step": 77
},
{
"epoch": 0.10380536494073613,
"grad_norm": 4.090420722961426,
"learning_rate": 8.762677810102787e-05,
"loss": 44.5024,
"step": 78
},
{
"epoch": 0.10513620295279684,
"grad_norm": 4.580371379852295,
"learning_rate": 8.583667871769158e-05,
"loss": 45.4198,
"step": 79
},
{
"epoch": 0.10646704096485755,
"grad_norm": 5.361691951751709,
"learning_rate": 8.404025101914921e-05,
"loss": 46.9717,
"step": 80
},
{
"epoch": 0.10779787897691828,
"grad_norm": 5.872640609741211,
"learning_rate": 8.223854406858862e-05,
"loss": 46.8967,
"step": 81
},
{
"epoch": 0.109128716988979,
"grad_norm": 4.439775466918945,
"learning_rate": 8.043261001213218e-05,
"loss": 45.777,
"step": 82
},
{
"epoch": 0.11045955500103971,
"grad_norm": 4.8212056159973145,
"learning_rate": 7.862350346441302e-05,
"loss": 46.7968,
"step": 83
},
{
"epoch": 0.11179039301310044,
"grad_norm": 4.29143762588501,
"learning_rate": 7.681228089270991e-05,
"loss": 44.3015,
"step": 84
},
{
"epoch": 0.11312123102516115,
"grad_norm": 4.222655773162842,
"learning_rate": 7.5e-05,
"loss": 45.1899,
"step": 85
},
{
"epoch": 0.11445206903722188,
"grad_norm": 4.317388534545898,
"learning_rate": 7.318771910729009e-05,
"loss": 45.5435,
"step": 86
},
{
"epoch": 0.1157829070492826,
"grad_norm": 4.136228561401367,
"learning_rate": 7.137649653558697e-05,
"loss": 47.2873,
"step": 87
},
{
"epoch": 0.11711374506134331,
"grad_norm": 4.299015998840332,
"learning_rate": 6.956738998786783e-05,
"loss": 47.1202,
"step": 88
},
{
"epoch": 0.11844458307340404,
"grad_norm": 5.2958784103393555,
"learning_rate": 6.776145593141136e-05,
"loss": 48.6786,
"step": 89
},
{
"epoch": 0.11977542108546475,
"grad_norm": 4.868504524230957,
"learning_rate": 6.595974898085078e-05,
"loss": 45.666,
"step": 90
},
{
"epoch": 0.12110625909752547,
"grad_norm": 5.617905139923096,
"learning_rate": 6.416332128230842e-05,
"loss": 44.0781,
"step": 91
},
{
"epoch": 0.1224370971095862,
"grad_norm": 5.250097751617432,
"learning_rate": 6.23732218989721e-05,
"loss": 45.062,
"step": 92
},
{
"epoch": 0.12376793512164691,
"grad_norm": 4.5629096031188965,
"learning_rate": 6.059049619847186e-05,
"loss": 45.0607,
"step": 93
},
{
"epoch": 0.12509877313370762,
"grad_norm": 5.416568279266357,
"learning_rate": 5.881618524241454e-05,
"loss": 46.1678,
"step": 94
},
{
"epoch": 0.12642961114576834,
"grad_norm": 4.383199691772461,
"learning_rate": 5.7051325178433185e-05,
"loss": 44.4619,
"step": 95
},
{
"epoch": 0.12776044915782908,
"grad_norm": 5.828498840332031,
"learning_rate": 5.5296946635105976e-05,
"loss": 44.6781,
"step": 96
},
{
"epoch": 0.1290912871698898,
"grad_norm": 5.047531604766846,
"learning_rate": 5.355407412009831e-05,
"loss": 45.2548,
"step": 97
},
{
"epoch": 0.1304221251819505,
"grad_norm": 4.3439507484436035,
"learning_rate": 5.182372542187895e-05,
"loss": 44.1937,
"step": 98
},
{
"epoch": 0.13175296319401122,
"grad_norm": 4.7746782302856445,
"learning_rate": 5.0106911015360376e-05,
"loss": 45.7256,
"step": 99
},
{
"epoch": 0.13308380120607194,
"grad_norm": 4.695093631744385,
"learning_rate": 4.840463347180982e-05,
"loss": 45.351,
"step": 100
},
{
"epoch": 0.13308380120607194,
"eval_loss": 1.4355225563049316,
"eval_runtime": 576.6715,
"eval_samples_per_second": 4.389,
"eval_steps_per_second": 2.195,
"step": 100
},
{
"epoch": 0.13441463921813268,
"grad_norm": 4.263814926147461,
"learning_rate": 4.671788687337597e-05,
"loss": 45.3437,
"step": 101
},
{
"epoch": 0.1357454772301934,
"grad_norm": 4.525152683258057,
"learning_rate": 4.5047656232572844e-05,
"loss": 45.6854,
"step": 102
},
{
"epoch": 0.1370763152422541,
"grad_norm": 4.433913230895996,
"learning_rate": 4.339491691706033e-05,
"loss": 46.3307,
"step": 103
},
{
"epoch": 0.13840715325431482,
"grad_norm": 4.97377347946167,
"learning_rate": 4.176063408005691e-05,
"loss": 47.4479,
"step": 104
},
{
"epoch": 0.13973799126637554,
"grad_norm": 5.846158504486084,
"learning_rate": 4.014576209671735e-05,
"loss": 46.6026,
"step": 105
},
{
"epoch": 0.14106882927843625,
"grad_norm": 6.544958591461182,
"learning_rate": 3.855124400680454e-05,
"loss": 47.0602,
"step": 106
},
{
"epoch": 0.142399667290497,
"grad_norm": 4.549857139587402,
"learning_rate": 3.697801096398074e-05,
"loss": 44.632,
"step": 107
},
{
"epoch": 0.1437305053025577,
"grad_norm": 4.476944446563721,
"learning_rate": 3.542698169204003e-05,
"loss": 44.4824,
"step": 108
},
{
"epoch": 0.14506134331461842,
"grad_norm": 5.5804314613342285,
"learning_rate": 3.389906194839976e-05,
"loss": 45.0248,
"step": 109
},
{
"epoch": 0.14639218132667914,
"grad_norm": 5.525055408477783,
"learning_rate": 3.239514399516332e-05,
"loss": 44.9731,
"step": 110
},
{
"epoch": 0.14772301933873985,
"grad_norm": 4.815814018249512,
"learning_rate": 3.091610607806452e-05,
"loss": 43.9473,
"step": 111
},
{
"epoch": 0.1490538573508006,
"grad_norm": 4.596780300140381,
"learning_rate": 2.946281191359666e-05,
"loss": 43.4745,
"step": 112
},
{
"epoch": 0.1503846953628613,
"grad_norm": 4.904426097869873,
"learning_rate": 2.803611018462647e-05,
"loss": 46.6434,
"step": 113
},
{
"epoch": 0.15171553337492202,
"grad_norm": 5.079036712646484,
"learning_rate": 2.663683404478722e-05,
"loss": 45.0433,
"step": 114
},
{
"epoch": 0.15304637138698274,
"grad_norm": 4.7992072105407715,
"learning_rate": 2.5265800631940373e-05,
"loss": 45.1488,
"step": 115
},
{
"epoch": 0.15437720939904345,
"grad_norm": 8.740615844726562,
"learning_rate": 2.3923810590990202e-05,
"loss": 46.0447,
"step": 116
},
{
"epoch": 0.15570804741110417,
"grad_norm": 4.366965293884277,
"learning_rate": 2.2611647606329732e-05,
"loss": 43.9194,
"step": 117
},
{
"epoch": 0.1570388854231649,
"grad_norm": 4.738860607147217,
"learning_rate": 2.1330077944190924e-05,
"loss": 44.4921,
"step": 118
},
{
"epoch": 0.15836972343522562,
"grad_norm": 5.531933307647705,
"learning_rate": 2.0079850005167007e-05,
"loss": 47.8288,
"step": 119
},
{
"epoch": 0.15970056144728634,
"grad_norm": 5.006840229034424,
"learning_rate": 1.8861693887167408e-05,
"loss": 47.1657,
"step": 120
},
{
"epoch": 0.16103139945934705,
"grad_norm": 4.700850963592529,
"learning_rate": 1.767632095906137e-05,
"loss": 45.0584,
"step": 121
},
{
"epoch": 0.16236223747140777,
"grad_norm": 5.489245414733887,
"learning_rate": 1.652442344525833e-05,
"loss": 45.8487,
"step": 122
},
{
"epoch": 0.1636930754834685,
"grad_norm": 4.946503162384033,
"learning_rate": 1.5406674021468438e-05,
"loss": 46.2189,
"step": 123
},
{
"epoch": 0.16502391349552922,
"grad_norm": 8.596658706665039,
"learning_rate": 1.4323725421878949e-05,
"loss": 44.8387,
"step": 124
},
{
"epoch": 0.16635475150758994,
"grad_norm": 4.896723747253418,
"learning_rate": 1.3276210057975772e-05,
"loss": 46.1841,
"step": 125
},
{
"epoch": 0.16768558951965065,
"grad_norm": 5.261892795562744,
"learning_rate": 1.2264739649232993e-05,
"loss": 44.6968,
"step": 126
},
{
"epoch": 0.16901642753171137,
"grad_norm": 5.7036027908325195,
"learning_rate": 1.1289904865885935e-05,
"loss": 43.3378,
"step": 127
},
{
"epoch": 0.17034726554377208,
"grad_norm": 4.333169460296631,
"learning_rate": 1.0352274983996303e-05,
"loss": 46.5715,
"step": 128
},
{
"epoch": 0.17167810355583282,
"grad_norm": 5.31616735458374,
"learning_rate": 9.452397553011157e-06,
"loss": 46.6349,
"step": 129
},
{
"epoch": 0.17300894156789354,
"grad_norm": 4.633855819702148,
"learning_rate": 8.590798076009264e-06,
"loss": 46.4312,
"step": 130
},
{
"epoch": 0.17433977957995425,
"grad_norm": 4.295351982116699,
"learning_rate": 7.767979702822217e-06,
"loss": 45.6427,
"step": 131
},
{
"epoch": 0.17567061759201497,
"grad_norm": 4.867600917816162,
"learning_rate": 6.984422936209094e-06,
"loss": 43.972,
"step": 132
},
{
"epoch": 0.17700145560407568,
"grad_norm": 5.0242695808410645,
"learning_rate": 6.240585351256319e-06,
"loss": 44.2976,
"step": 133
},
{
"epoch": 0.17833229361613642,
"grad_norm": 5.848794460296631,
"learning_rate": 5.536901328166773e-06,
"loss": 46.4279,
"step": 134
},
{
"epoch": 0.17966313162819714,
"grad_norm": 4.636799335479736,
"learning_rate": 4.8737817985938955e-06,
"loss": 43.4801,
"step": 135
},
{
"epoch": 0.18099396964025785,
"grad_norm": 4.535057544708252,
"learning_rate": 4.251614005669263e-06,
"loss": 44.8191,
"step": 136
},
{
"epoch": 0.18232480765231857,
"grad_norm": 7.119007587432861,
"learning_rate": 3.670761277863485e-06,
"loss": 43.3487,
"step": 137
},
{
"epoch": 0.18365564566437928,
"grad_norm": 5.639936923980713,
"learning_rate": 3.131562816812533e-06,
"loss": 46.6444,
"step": 138
},
{
"epoch": 0.18498648367644,
"grad_norm": 5.142310619354248,
"learning_rate": 2.6343334992336485e-06,
"loss": 43.399,
"step": 139
},
{
"epoch": 0.18631732168850074,
"grad_norm": 4.855108737945557,
"learning_rate": 2.179363693046099e-06,
"loss": 44.3601,
"step": 140
},
{
"epoch": 0.18764815970056145,
"grad_norm": 4.390917778015137,
"learning_rate": 1.7669190878045914e-06,
"loss": 45.47,
"step": 141
},
{
"epoch": 0.18897899771262217,
"grad_norm": 5.170991897583008,
"learning_rate": 1.3972405395439922e-06,
"loss": 45.9972,
"step": 142
},
{
"epoch": 0.19030983572468288,
"grad_norm": 4.540877342224121,
"learning_rate": 1.0705439301261887e-06,
"loss": 45.5314,
"step": 143
},
{
"epoch": 0.1916406737367436,
"grad_norm": 5.012913703918457,
"learning_rate": 7.870200411711658e-07,
"loss": 44.3529,
"step": 144
},
{
"epoch": 0.19297151174880434,
"grad_norm": 4.687958240509033,
"learning_rate": 5.46834442645952e-07,
"loss": 45.6731,
"step": 145
},
{
"epoch": 0.19430234976086505,
"grad_norm": 4.852694988250732,
"learning_rate": 3.501273961763529e-07,
"loss": 43.5008,
"step": 146
},
{
"epoch": 0.19563318777292577,
"grad_norm": 4.63925838470459,
"learning_rate": 1.9701377313817158e-07,
"loss": 45.8929,
"step": 147
},
{
"epoch": 0.19696402578498648,
"grad_norm": 4.218976020812988,
"learning_rate": 8.758298757550186e-08,
"loss": 45.9191,
"step": 148
},
{
"epoch": 0.1982948637970472,
"grad_norm": 5.067894458770752,
"learning_rate": 2.1898943985529384e-08,
"loss": 46.9714,
"step": 149
},
{
"epoch": 0.19962570180910794,
"grad_norm": 5.027199745178223,
"learning_rate": 0.0,
"loss": 46.4302,
"step": 150
},
{
"epoch": 0.19962570180910794,
"eval_loss": 1.4293036460876465,
"eval_runtime": 573.7255,
"eval_samples_per_second": 4.412,
"eval_steps_per_second": 2.207,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.47874607229698e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}