aleegis10's picture
Training in progress, step 200, checkpoint
a714730 verified
raw
history blame
36.2 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.38113387327298714,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0019056693663649356,
"grad_norm": 1.625303864479065,
"learning_rate": 1e-05,
"loss": 2.0806,
"step": 1
},
{
"epoch": 0.0019056693663649356,
"eval_loss": NaN,
"eval_runtime": 121.9902,
"eval_samples_per_second": 7.246,
"eval_steps_per_second": 1.812,
"step": 1
},
{
"epoch": 0.003811338732729871,
"grad_norm": 1.8919113874435425,
"learning_rate": 2e-05,
"loss": 2.4173,
"step": 2
},
{
"epoch": 0.005717008099094807,
"grad_norm": 2.010105848312378,
"learning_rate": 3e-05,
"loss": 2.6527,
"step": 3
},
{
"epoch": 0.007622677465459742,
"grad_norm": 1.7687952518463135,
"learning_rate": 4e-05,
"loss": 2.6151,
"step": 4
},
{
"epoch": 0.009528346831824679,
"grad_norm": 1.6465333700180054,
"learning_rate": 5e-05,
"loss": 2.6334,
"step": 5
},
{
"epoch": 0.011434016198189614,
"grad_norm": 1.404477834701538,
"learning_rate": 6e-05,
"loss": 2.547,
"step": 6
},
{
"epoch": 0.01333968556455455,
"grad_norm": 1.258142352104187,
"learning_rate": 7e-05,
"loss": 2.5915,
"step": 7
},
{
"epoch": 0.015245354930919485,
"grad_norm": 1.5234754085540771,
"learning_rate": 8e-05,
"loss": 2.5803,
"step": 8
},
{
"epoch": 0.01715102429728442,
"grad_norm": 1.6583815813064575,
"learning_rate": 9e-05,
"loss": 2.5519,
"step": 9
},
{
"epoch": 0.019056693663649357,
"grad_norm": 1.2219481468200684,
"learning_rate": 0.0001,
"loss": 2.3874,
"step": 10
},
{
"epoch": 0.02096236303001429,
"grad_norm": 1.083174467086792,
"learning_rate": 9.999316524962345e-05,
"loss": 2.4246,
"step": 11
},
{
"epoch": 0.022868032396379228,
"grad_norm": 1.1686445474624634,
"learning_rate": 9.997266286704631e-05,
"loss": 2.5171,
"step": 12
},
{
"epoch": 0.024773701762744165,
"grad_norm": 1.1928153038024902,
"learning_rate": 9.993849845741524e-05,
"loss": 2.3594,
"step": 13
},
{
"epoch": 0.0266793711291091,
"grad_norm": 1.1012721061706543,
"learning_rate": 9.989068136093873e-05,
"loss": 2.2893,
"step": 14
},
{
"epoch": 0.028585040495474036,
"grad_norm": 1.0885003805160522,
"learning_rate": 9.98292246503335e-05,
"loss": 2.4461,
"step": 15
},
{
"epoch": 0.03049070986183897,
"grad_norm": 1.0348674058914185,
"learning_rate": 9.975414512725057e-05,
"loss": 2.2247,
"step": 16
},
{
"epoch": 0.03239637922820391,
"grad_norm": 1.0466992855072021,
"learning_rate": 9.966546331768191e-05,
"loss": 2.3983,
"step": 17
},
{
"epoch": 0.03430204859456884,
"grad_norm": 1.0777167081832886,
"learning_rate": 9.956320346634876e-05,
"loss": 2.3311,
"step": 18
},
{
"epoch": 0.03620771796093378,
"grad_norm": 1.0364409685134888,
"learning_rate": 9.944739353007344e-05,
"loss": 2.3972,
"step": 19
},
{
"epoch": 0.038113387327298714,
"grad_norm": 1.2143179178237915,
"learning_rate": 9.931806517013612e-05,
"loss": 2.3061,
"step": 20
},
{
"epoch": 0.04001905669366365,
"grad_norm": 1.1169112920761108,
"learning_rate": 9.917525374361912e-05,
"loss": 2.2864,
"step": 21
},
{
"epoch": 0.04192472606002858,
"grad_norm": 2.8071861267089844,
"learning_rate": 9.901899829374047e-05,
"loss": 2.4627,
"step": 22
},
{
"epoch": 0.04383039542639352,
"grad_norm": 1.6181660890579224,
"learning_rate": 9.884934153917997e-05,
"loss": 2.4329,
"step": 23
},
{
"epoch": 0.045736064792758456,
"grad_norm": 7.056554317474365,
"learning_rate": 9.86663298624003e-05,
"loss": 2.3044,
"step": 24
},
{
"epoch": 0.04764173415912339,
"grad_norm": 1.5961129665374756,
"learning_rate": 9.847001329696653e-05,
"loss": 2.311,
"step": 25
},
{
"epoch": 0.04954740352548833,
"grad_norm": 1.412833571434021,
"learning_rate": 9.826044551386744e-05,
"loss": 2.3759,
"step": 26
},
{
"epoch": 0.05145307289185326,
"grad_norm": 1.2436467409133911,
"learning_rate": 9.803768380684242e-05,
"loss": 2.2569,
"step": 27
},
{
"epoch": 0.0533587422582182,
"grad_norm": 1.2787665128707886,
"learning_rate": 9.780178907671789e-05,
"loss": 2.415,
"step": 28
},
{
"epoch": 0.055264411624583135,
"grad_norm": 1.1875362396240234,
"learning_rate": 9.755282581475769e-05,
"loss": 2.0744,
"step": 29
},
{
"epoch": 0.05717008099094807,
"grad_norm": 1.468946933746338,
"learning_rate": 9.729086208503174e-05,
"loss": 2.3473,
"step": 30
},
{
"epoch": 0.05907575035731301,
"grad_norm": 1.3427952527999878,
"learning_rate": 9.701596950580806e-05,
"loss": 2.2288,
"step": 31
},
{
"epoch": 0.06098141972367794,
"grad_norm": 1.6122334003448486,
"learning_rate": 9.672822322997305e-05,
"loss": 2.427,
"step": 32
},
{
"epoch": 0.06288708909004288,
"grad_norm": 2.7167866230010986,
"learning_rate": 9.642770192448536e-05,
"loss": 2.4146,
"step": 33
},
{
"epoch": 0.06479275845640782,
"grad_norm": 1.385132074356079,
"learning_rate": 9.611448774886924e-05,
"loss": 2.1858,
"step": 34
},
{
"epoch": 0.06669842782277274,
"grad_norm": 2.5712013244628906,
"learning_rate": 9.578866633275288e-05,
"loss": 2.3198,
"step": 35
},
{
"epoch": 0.06860409718913768,
"grad_norm": 2.7239487171173096,
"learning_rate": 9.545032675245813e-05,
"loss": 2.302,
"step": 36
},
{
"epoch": 0.07050976655550262,
"grad_norm": 1.5528560876846313,
"learning_rate": 9.509956150664796e-05,
"loss": 2.1923,
"step": 37
},
{
"epoch": 0.07241543592186755,
"grad_norm": 1.9108004570007324,
"learning_rate": 9.473646649103818e-05,
"loss": 2.4346,
"step": 38
},
{
"epoch": 0.07432110528823249,
"grad_norm": 1.7357029914855957,
"learning_rate": 9.43611409721806e-05,
"loss": 2.3665,
"step": 39
},
{
"epoch": 0.07622677465459743,
"grad_norm": 1.6708160638809204,
"learning_rate": 9.397368756032445e-05,
"loss": 2.3946,
"step": 40
},
{
"epoch": 0.07813244402096237,
"grad_norm": 1.7986937761306763,
"learning_rate": 9.357421218136386e-05,
"loss": 2.5051,
"step": 41
},
{
"epoch": 0.0800381133873273,
"grad_norm": 1.7630176544189453,
"learning_rate": 9.316282404787871e-05,
"loss": 2.2678,
"step": 42
},
{
"epoch": 0.08194378275369224,
"grad_norm": 1.8785440921783447,
"learning_rate": 9.273963562927695e-05,
"loss": 2.1948,
"step": 43
},
{
"epoch": 0.08384945212005716,
"grad_norm": 2.319279193878174,
"learning_rate": 9.230476262104677e-05,
"loss": 2.3691,
"step": 44
},
{
"epoch": 0.0857551214864221,
"grad_norm": 4.61505651473999,
"learning_rate": 9.185832391312644e-05,
"loss": 3.0313,
"step": 45
},
{
"epoch": 0.08766079085278704,
"grad_norm": 0.0,
"learning_rate": 9.140044155740101e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.08956646021915197,
"grad_norm": 0.0,
"learning_rate": 9.093124073433463e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.09147212958551691,
"grad_norm": 0.0,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.09337779895188185,
"grad_norm": 0.0,
"learning_rate": 8.995939984474624e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.09528346831824679,
"grad_norm": 3.32122540473938,
"learning_rate": 8.945702546981969e-05,
"loss": 2.6096,
"step": 50
},
{
"epoch": 0.09528346831824679,
"eval_loss": NaN,
"eval_runtime": 124.017,
"eval_samples_per_second": 7.128,
"eval_steps_per_second": 1.782,
"step": 50
},
{
"epoch": 0.09718913768461172,
"grad_norm": 3.257054090499878,
"learning_rate": 8.894386393810563e-05,
"loss": 2.475,
"step": 51
},
{
"epoch": 0.09909480705097666,
"grad_norm": 1.825034737586975,
"learning_rate": 8.842005554284296e-05,
"loss": 2.1688,
"step": 52
},
{
"epoch": 0.1010004764173416,
"grad_norm": 1.2291990518569946,
"learning_rate": 8.788574348801675e-05,
"loss": 2.2075,
"step": 53
},
{
"epoch": 0.10290614578370652,
"grad_norm": 0.8810988664627075,
"learning_rate": 8.73410738492077e-05,
"loss": 2.0334,
"step": 54
},
{
"epoch": 0.10481181515007146,
"grad_norm": 0.8452022671699524,
"learning_rate": 8.678619553365659e-05,
"loss": 2.2784,
"step": 55
},
{
"epoch": 0.1067174845164364,
"grad_norm": 0.8677942752838135,
"learning_rate": 8.622126023955446e-05,
"loss": 2.3311,
"step": 56
},
{
"epoch": 0.10862315388280133,
"grad_norm": 0.8011043071746826,
"learning_rate": 8.564642241456986e-05,
"loss": 2.1818,
"step": 57
},
{
"epoch": 0.11052882324916627,
"grad_norm": 0.8731439709663391,
"learning_rate": 8.506183921362443e-05,
"loss": 2.2584,
"step": 58
},
{
"epoch": 0.1124344926155312,
"grad_norm": 0.903266966342926,
"learning_rate": 8.44676704559283e-05,
"loss": 2.1772,
"step": 59
},
{
"epoch": 0.11434016198189614,
"grad_norm": 0.8754334449768066,
"learning_rate": 8.386407858128706e-05,
"loss": 2.2054,
"step": 60
},
{
"epoch": 0.11624583134826108,
"grad_norm": 1.0421199798583984,
"learning_rate": 8.32512286056924e-05,
"loss": 2.1207,
"step": 61
},
{
"epoch": 0.11815150071462602,
"grad_norm": 0.8233661651611328,
"learning_rate": 8.262928807620843e-05,
"loss": 2.0804,
"step": 62
},
{
"epoch": 0.12005717008099095,
"grad_norm": 0.9691787362098694,
"learning_rate": 8.199842702516583e-05,
"loss": 2.264,
"step": 63
},
{
"epoch": 0.12196283944735588,
"grad_norm": 0.9734489321708679,
"learning_rate": 8.135881792367686e-05,
"loss": 2.2164,
"step": 64
},
{
"epoch": 0.12386850881372082,
"grad_norm": 1.0550222396850586,
"learning_rate": 8.07106356344834e-05,
"loss": 2.3509,
"step": 65
},
{
"epoch": 0.12577417818008577,
"grad_norm": 0.9587940573692322,
"learning_rate": 8.005405736415126e-05,
"loss": 2.2228,
"step": 66
},
{
"epoch": 0.1276798475464507,
"grad_norm": 0.966098427772522,
"learning_rate": 7.938926261462366e-05,
"loss": 2.2955,
"step": 67
},
{
"epoch": 0.12958551691281564,
"grad_norm": 0.9006718993186951,
"learning_rate": 7.871643313414718e-05,
"loss": 2.2223,
"step": 68
},
{
"epoch": 0.13149118627918055,
"grad_norm": 0.9012450575828552,
"learning_rate": 7.803575286758364e-05,
"loss": 2.2281,
"step": 69
},
{
"epoch": 0.1333968556455455,
"grad_norm": 0.9210295677185059,
"learning_rate": 7.734740790612136e-05,
"loss": 2.1927,
"step": 70
},
{
"epoch": 0.13530252501191042,
"grad_norm": 0.9985268712043762,
"learning_rate": 7.66515864363997e-05,
"loss": 2.1856,
"step": 71
},
{
"epoch": 0.13720819437827536,
"grad_norm": 1.0568547248840332,
"learning_rate": 7.594847868906076e-05,
"loss": 2.2347,
"step": 72
},
{
"epoch": 0.1391138637446403,
"grad_norm": 1.3930721282958984,
"learning_rate": 7.52382768867422e-05,
"loss": 2.2211,
"step": 73
},
{
"epoch": 0.14101953311100524,
"grad_norm": 1.1122373342514038,
"learning_rate": 7.452117519152542e-05,
"loss": 2.2579,
"step": 74
},
{
"epoch": 0.14292520247737017,
"grad_norm": 1.0553274154663086,
"learning_rate": 7.379736965185368e-05,
"loss": 2.2314,
"step": 75
},
{
"epoch": 0.1448308718437351,
"grad_norm": 1.0698802471160889,
"learning_rate": 7.30670581489344e-05,
"loss": 2.1543,
"step": 76
},
{
"epoch": 0.14673654121010005,
"grad_norm": 1.6581573486328125,
"learning_rate": 7.233044034264034e-05,
"loss": 2.2807,
"step": 77
},
{
"epoch": 0.14864221057646498,
"grad_norm": 1.1972084045410156,
"learning_rate": 7.158771761692464e-05,
"loss": 2.2719,
"step": 78
},
{
"epoch": 0.15054787994282992,
"grad_norm": 1.1362383365631104,
"learning_rate": 7.083909302476453e-05,
"loss": 2.1507,
"step": 79
},
{
"epoch": 0.15245354930919486,
"grad_norm": 1.153376579284668,
"learning_rate": 7.008477123264848e-05,
"loss": 2.2695,
"step": 80
},
{
"epoch": 0.1543592186755598,
"grad_norm": 1.2021276950836182,
"learning_rate": 6.932495846462261e-05,
"loss": 2.305,
"step": 81
},
{
"epoch": 0.15626488804192473,
"grad_norm": 1.243454098701477,
"learning_rate": 6.855986244591104e-05,
"loss": 2.1861,
"step": 82
},
{
"epoch": 0.15817055740828967,
"grad_norm": 1.4724884033203125,
"learning_rate": 6.778969234612584e-05,
"loss": 2.1153,
"step": 83
},
{
"epoch": 0.1600762267746546,
"grad_norm": 1.2578407526016235,
"learning_rate": 6.701465872208216e-05,
"loss": 2.1895,
"step": 84
},
{
"epoch": 0.16198189614101954,
"grad_norm": 1.3038272857666016,
"learning_rate": 6.623497346023418e-05,
"loss": 2.1578,
"step": 85
},
{
"epoch": 0.16388756550738448,
"grad_norm": 1.8126806020736694,
"learning_rate": 6.545084971874738e-05,
"loss": 2.1949,
"step": 86
},
{
"epoch": 0.16579323487374942,
"grad_norm": 1.3623523712158203,
"learning_rate": 6.466250186922325e-05,
"loss": 2.3411,
"step": 87
},
{
"epoch": 0.16769890424011433,
"grad_norm": 1.370890498161316,
"learning_rate": 6.387014543809223e-05,
"loss": 2.2452,
"step": 88
},
{
"epoch": 0.16960457360647926,
"grad_norm": 1.3815104961395264,
"learning_rate": 6.307399704769099e-05,
"loss": 2.1947,
"step": 89
},
{
"epoch": 0.1715102429728442,
"grad_norm": 1.5048907995224,
"learning_rate": 6.227427435703997e-05,
"loss": 2.3206,
"step": 90
},
{
"epoch": 0.17341591233920914,
"grad_norm": 1.8256431818008423,
"learning_rate": 6.147119600233758e-05,
"loss": 2.2274,
"step": 91
},
{
"epoch": 0.17532158170557408,
"grad_norm": 1.8468575477600098,
"learning_rate": 6.066498153718735e-05,
"loss": 2.2922,
"step": 92
},
{
"epoch": 0.177227251071939,
"grad_norm": 1.6818069219589233,
"learning_rate": 5.985585137257401e-05,
"loss": 2.2491,
"step": 93
},
{
"epoch": 0.17913292043830395,
"grad_norm": 2.0240490436553955,
"learning_rate": 5.90440267166055e-05,
"loss": 2.575,
"step": 94
},
{
"epoch": 0.1810385898046689,
"grad_norm": 2.463879346847534,
"learning_rate": 5.8229729514036705e-05,
"loss": 2.186,
"step": 95
},
{
"epoch": 0.18294425917103382,
"grad_norm": 0.0,
"learning_rate": 5.74131823855921e-05,
"loss": 0.0,
"step": 96
},
{
"epoch": 0.18484992853739876,
"grad_norm": 0.0,
"learning_rate": 5.6594608567103456e-05,
"loss": 0.0,
"step": 97
},
{
"epoch": 0.1867555979037637,
"grad_norm": 0.0,
"learning_rate": 5.577423184847932e-05,
"loss": 0.0,
"step": 98
},
{
"epoch": 0.18866126727012864,
"grad_norm": 0.0,
"learning_rate": 5.495227651252315e-05,
"loss": 0.0,
"step": 99
},
{
"epoch": 0.19056693663649357,
"grad_norm": 7.715859413146973,
"learning_rate": 5.4128967273616625e-05,
"loss": 2.6799,
"step": 100
},
{
"epoch": 0.19056693663649357,
"eval_loss": NaN,
"eval_runtime": 123.8787,
"eval_samples_per_second": 7.136,
"eval_steps_per_second": 1.784,
"step": 100
},
{
"epoch": 0.1924726060028585,
"grad_norm": 6.374715805053711,
"learning_rate": 5.330452921628497e-05,
"loss": 2.4025,
"step": 101
},
{
"epoch": 0.19437827536922345,
"grad_norm": 3.087235927581787,
"learning_rate": 5.247918773366112e-05,
"loss": 2.3568,
"step": 102
},
{
"epoch": 0.19628394473558838,
"grad_norm": 2.604992151260376,
"learning_rate": 5.165316846586541e-05,
"loss": 2.4599,
"step": 103
},
{
"epoch": 0.19818961410195332,
"grad_norm": 2.30696964263916,
"learning_rate": 5.0826697238317935e-05,
"loss": 2.135,
"step": 104
},
{
"epoch": 0.20009528346831826,
"grad_norm": 1.6723530292510986,
"learning_rate": 5e-05,
"loss": 2.1672,
"step": 105
},
{
"epoch": 0.2020009528346832,
"grad_norm": 1.3099277019500732,
"learning_rate": 4.917330276168208e-05,
"loss": 2.1903,
"step": 106
},
{
"epoch": 0.2039066222010481,
"grad_norm": 0.9453718662261963,
"learning_rate": 4.834683153413459e-05,
"loss": 2.2202,
"step": 107
},
{
"epoch": 0.20581229156741304,
"grad_norm": 0.7894024848937988,
"learning_rate": 4.7520812266338885e-05,
"loss": 2.1519,
"step": 108
},
{
"epoch": 0.20771796093377798,
"grad_norm": 0.7461780309677124,
"learning_rate": 4.669547078371504e-05,
"loss": 2.1794,
"step": 109
},
{
"epoch": 0.20962363030014292,
"grad_norm": 0.8102741837501526,
"learning_rate": 4.5871032726383386e-05,
"loss": 2.1812,
"step": 110
},
{
"epoch": 0.21152929966650785,
"grad_norm": 0.8100122213363647,
"learning_rate": 4.504772348747687e-05,
"loss": 2.1625,
"step": 111
},
{
"epoch": 0.2134349690328728,
"grad_norm": 0.8052271604537964,
"learning_rate": 4.4225768151520694e-05,
"loss": 2.2185,
"step": 112
},
{
"epoch": 0.21534063839923773,
"grad_norm": 0.8258956074714661,
"learning_rate": 4.3405391432896555e-05,
"loss": 2.228,
"step": 113
},
{
"epoch": 0.21724630776560266,
"grad_norm": 0.8672061562538147,
"learning_rate": 4.2586817614407895e-05,
"loss": 2.2591,
"step": 114
},
{
"epoch": 0.2191519771319676,
"grad_norm": 0.9598968625068665,
"learning_rate": 4.17702704859633e-05,
"loss": 2.266,
"step": 115
},
{
"epoch": 0.22105764649833254,
"grad_norm": 0.9424980878829956,
"learning_rate": 4.095597328339452e-05,
"loss": 2.1901,
"step": 116
},
{
"epoch": 0.22296331586469748,
"grad_norm": 0.8500011563301086,
"learning_rate": 4.0144148627425993e-05,
"loss": 2.1516,
"step": 117
},
{
"epoch": 0.2248689852310624,
"grad_norm": 0.9570940732955933,
"learning_rate": 3.933501846281267e-05,
"loss": 2.2747,
"step": 118
},
{
"epoch": 0.22677465459742735,
"grad_norm": 0.9292868971824646,
"learning_rate": 3.852880399766243e-05,
"loss": 2.1949,
"step": 119
},
{
"epoch": 0.2286803239637923,
"grad_norm": 0.9754165410995483,
"learning_rate": 3.772572564296005e-05,
"loss": 2.2281,
"step": 120
},
{
"epoch": 0.23058599333015722,
"grad_norm": 1.0084413290023804,
"learning_rate": 3.6926002952309016e-05,
"loss": 2.4057,
"step": 121
},
{
"epoch": 0.23249166269652216,
"grad_norm": 0.9409120678901672,
"learning_rate": 3.612985456190778e-05,
"loss": 2.2361,
"step": 122
},
{
"epoch": 0.2343973320628871,
"grad_norm": 1.0411348342895508,
"learning_rate": 3.533749813077677e-05,
"loss": 2.2445,
"step": 123
},
{
"epoch": 0.23630300142925204,
"grad_norm": 0.989592969417572,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.3068,
"step": 124
},
{
"epoch": 0.23820867079561697,
"grad_norm": 0.9536654353141785,
"learning_rate": 3.3765026539765834e-05,
"loss": 2.3733,
"step": 125
},
{
"epoch": 0.2401143401619819,
"grad_norm": 1.0774953365325928,
"learning_rate": 3.298534127791785e-05,
"loss": 2.1134,
"step": 126
},
{
"epoch": 0.24202000952834682,
"grad_norm": 1.1679447889328003,
"learning_rate": 3.221030765387417e-05,
"loss": 2.3144,
"step": 127
},
{
"epoch": 0.24392567889471176,
"grad_norm": 1.1911946535110474,
"learning_rate": 3.144013755408895e-05,
"loss": 2.2425,
"step": 128
},
{
"epoch": 0.2458313482610767,
"grad_norm": 1.1719609498977661,
"learning_rate": 3.0675041535377405e-05,
"loss": 2.4016,
"step": 129
},
{
"epoch": 0.24773701762744163,
"grad_norm": 1.1523762941360474,
"learning_rate": 2.991522876735154e-05,
"loss": 2.4412,
"step": 130
},
{
"epoch": 0.24964268699380657,
"grad_norm": 1.119282603263855,
"learning_rate": 2.916090697523549e-05,
"loss": 2.0928,
"step": 131
},
{
"epoch": 0.25154835636017153,
"grad_norm": 1.4509676694869995,
"learning_rate": 2.8412282383075363e-05,
"loss": 2.282,
"step": 132
},
{
"epoch": 0.25345402572653647,
"grad_norm": 1.133516550064087,
"learning_rate": 2.766955965735968e-05,
"loss": 2.2687,
"step": 133
},
{
"epoch": 0.2553596950929014,
"grad_norm": 1.1817891597747803,
"learning_rate": 2.693294185106562e-05,
"loss": 2.2668,
"step": 134
},
{
"epoch": 0.25726536445926634,
"grad_norm": 1.3289722204208374,
"learning_rate": 2.6202630348146324e-05,
"loss": 2.1709,
"step": 135
},
{
"epoch": 0.2591710338256313,
"grad_norm": 1.2520653009414673,
"learning_rate": 2.547882480847461e-05,
"loss": 2.285,
"step": 136
},
{
"epoch": 0.26107670319199616,
"grad_norm": 2.837770938873291,
"learning_rate": 2.476172311325783e-05,
"loss": 2.3204,
"step": 137
},
{
"epoch": 0.2629823725583611,
"grad_norm": 1.3282185792922974,
"learning_rate": 2.405152131093926e-05,
"loss": 2.2622,
"step": 138
},
{
"epoch": 0.26488804192472604,
"grad_norm": 1.3785673379898071,
"learning_rate": 2.3348413563600325e-05,
"loss": 2.2191,
"step": 139
},
{
"epoch": 0.266793711291091,
"grad_norm": 1.4621028900146484,
"learning_rate": 2.2652592093878666e-05,
"loss": 2.1923,
"step": 140
},
{
"epoch": 0.2686993806574559,
"grad_norm": 1.4957550764083862,
"learning_rate": 2.196424713241637e-05,
"loss": 2.223,
"step": 141
},
{
"epoch": 0.27060505002382085,
"grad_norm": 1.566874623298645,
"learning_rate": 2.128356686585282e-05,
"loss": 2.1457,
"step": 142
},
{
"epoch": 0.2725107193901858,
"grad_norm": 1.581588625907898,
"learning_rate": 2.061073738537635e-05,
"loss": 2.3324,
"step": 143
},
{
"epoch": 0.2744163887565507,
"grad_norm": 1.7475031614303589,
"learning_rate": 1.9945942635848748e-05,
"loss": 2.396,
"step": 144
},
{
"epoch": 0.27632205812291566,
"grad_norm": 2.1765027046203613,
"learning_rate": 1.928936436551661e-05,
"loss": 2.3309,
"step": 145
},
{
"epoch": 0.2782277274892806,
"grad_norm": 5.112452983856201,
"learning_rate": 1.8641182076323148e-05,
"loss": 3.0045,
"step": 146
},
{
"epoch": 0.28013339685564553,
"grad_norm": 0.0,
"learning_rate": 1.800157297483417e-05,
"loss": 0.0,
"step": 147
},
{
"epoch": 0.28203906622201047,
"grad_norm": 0.0,
"learning_rate": 1.7370711923791567e-05,
"loss": 0.0,
"step": 148
},
{
"epoch": 0.2839447355883754,
"grad_norm": 0.0,
"learning_rate": 1.6748771394307585e-05,
"loss": 0.0,
"step": 149
},
{
"epoch": 0.28585040495474034,
"grad_norm": 1.6631611585617065,
"learning_rate": 1.6135921418712956e-05,
"loss": 2.2213,
"step": 150
},
{
"epoch": 0.28585040495474034,
"eval_loss": NaN,
"eval_runtime": 123.886,
"eval_samples_per_second": 7.136,
"eval_steps_per_second": 1.784,
"step": 150
},
{
"epoch": 0.2877560743211053,
"grad_norm": 1.2977925539016724,
"learning_rate": 1.553232954407171e-05,
"loss": 1.8845,
"step": 151
},
{
"epoch": 0.2896617436874702,
"grad_norm": 1.4739603996276855,
"learning_rate": 1.4938160786375572e-05,
"loss": 2.1382,
"step": 152
},
{
"epoch": 0.29156741305383516,
"grad_norm": 1.3540451526641846,
"learning_rate": 1.435357758543015e-05,
"loss": 2.0628,
"step": 153
},
{
"epoch": 0.2934730824202001,
"grad_norm": 1.192720651626587,
"learning_rate": 1.3778739760445552e-05,
"loss": 2.1649,
"step": 154
},
{
"epoch": 0.29537875178656503,
"grad_norm": 1.1411631107330322,
"learning_rate": 1.3213804466343421e-05,
"loss": 2.2098,
"step": 155
},
{
"epoch": 0.29728442115292997,
"grad_norm": 1.1121792793273926,
"learning_rate": 1.2658926150792322e-05,
"loss": 2.3091,
"step": 156
},
{
"epoch": 0.2991900905192949,
"grad_norm": 0.9577322602272034,
"learning_rate": 1.2114256511983274e-05,
"loss": 2.1103,
"step": 157
},
{
"epoch": 0.30109575988565984,
"grad_norm": 1.0152143239974976,
"learning_rate": 1.157994445715706e-05,
"loss": 2.1191,
"step": 158
},
{
"epoch": 0.3030014292520248,
"grad_norm": 0.9139170050621033,
"learning_rate": 1.1056136061894384e-05,
"loss": 2.2654,
"step": 159
},
{
"epoch": 0.3049070986183897,
"grad_norm": 0.8486616611480713,
"learning_rate": 1.0542974530180327e-05,
"loss": 2.1172,
"step": 160
},
{
"epoch": 0.30681276798475465,
"grad_norm": 0.8078973293304443,
"learning_rate": 1.0040600155253765e-05,
"loss": 2.1286,
"step": 161
},
{
"epoch": 0.3087184373511196,
"grad_norm": 0.8643123507499695,
"learning_rate": 9.549150281252633e-06,
"loss": 2.2474,
"step": 162
},
{
"epoch": 0.3106241067174845,
"grad_norm": 0.8092776536941528,
"learning_rate": 9.068759265665384e-06,
"loss": 2.1999,
"step": 163
},
{
"epoch": 0.31252977608384946,
"grad_norm": 0.8323934674263,
"learning_rate": 8.599558442598998e-06,
"loss": 2.1502,
"step": 164
},
{
"epoch": 0.3144354454502144,
"grad_norm": 0.8425267934799194,
"learning_rate": 8.141676086873572e-06,
"loss": 2.2538,
"step": 165
},
{
"epoch": 0.31634111481657934,
"grad_norm": 0.8372315168380737,
"learning_rate": 7.695237378953223e-06,
"loss": 2.0642,
"step": 166
},
{
"epoch": 0.3182467841829443,
"grad_norm": 0.8230723738670349,
"learning_rate": 7.260364370723044e-06,
"loss": 2.0993,
"step": 167
},
{
"epoch": 0.3201524535493092,
"grad_norm": 0.8597812056541443,
"learning_rate": 6.837175952121306e-06,
"loss": 2.1144,
"step": 168
},
{
"epoch": 0.32205812291567415,
"grad_norm": 0.8669524192810059,
"learning_rate": 6.425787818636131e-06,
"loss": 2.2526,
"step": 169
},
{
"epoch": 0.3239637922820391,
"grad_norm": 0.8641037344932556,
"learning_rate": 6.026312439675552e-06,
"loss": 2.1673,
"step": 170
},
{
"epoch": 0.325869461648404,
"grad_norm": 0.8340052962303162,
"learning_rate": 5.6388590278194096e-06,
"loss": 2.2321,
"step": 171
},
{
"epoch": 0.32777513101476896,
"grad_norm": 0.894442081451416,
"learning_rate": 5.263533508961827e-06,
"loss": 2.0982,
"step": 172
},
{
"epoch": 0.3296808003811339,
"grad_norm": 0.9692354798316956,
"learning_rate": 4.900438493352055e-06,
"loss": 2.3198,
"step": 173
},
{
"epoch": 0.33158646974749884,
"grad_norm": 0.9209113717079163,
"learning_rate": 4.549673247541875e-06,
"loss": 2.0399,
"step": 174
},
{
"epoch": 0.3334921391138637,
"grad_norm": 0.9890367984771729,
"learning_rate": 4.2113336672471245e-06,
"loss": 2.2726,
"step": 175
},
{
"epoch": 0.33539780848022865,
"grad_norm": 1.0139867067337036,
"learning_rate": 3.885512251130763e-06,
"loss": 2.2643,
"step": 176
},
{
"epoch": 0.3373034778465936,
"grad_norm": 1.0811392068862915,
"learning_rate": 3.5722980755146517e-06,
"loss": 2.2314,
"step": 177
},
{
"epoch": 0.33920914721295853,
"grad_norm": 0.9895434975624084,
"learning_rate": 3.271776770026963e-06,
"loss": 2.1285,
"step": 178
},
{
"epoch": 0.34111481657932347,
"grad_norm": 1.0205448865890503,
"learning_rate": 2.9840304941919415e-06,
"loss": 2.2578,
"step": 179
},
{
"epoch": 0.3430204859456884,
"grad_norm": 1.1815122365951538,
"learning_rate": 2.7091379149682685e-06,
"loss": 2.2026,
"step": 180
},
{
"epoch": 0.34492615531205334,
"grad_norm": 1.0534892082214355,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.0888,
"step": 181
},
{
"epoch": 0.3468318246784183,
"grad_norm": 1.1304489374160767,
"learning_rate": 2.1982109232821178e-06,
"loss": 2.2371,
"step": 182
},
{
"epoch": 0.3487374940447832,
"grad_norm": 1.1426349878311157,
"learning_rate": 1.962316193157593e-06,
"loss": 2.1717,
"step": 183
},
{
"epoch": 0.35064316341114815,
"grad_norm": 1.275299072265625,
"learning_rate": 1.7395544861325718e-06,
"loss": 2.312,
"step": 184
},
{
"epoch": 0.3525488327775131,
"grad_norm": 1.1940147876739502,
"learning_rate": 1.5299867030334814e-06,
"loss": 2.2908,
"step": 185
},
{
"epoch": 0.354454502143878,
"grad_norm": 2.9150757789611816,
"learning_rate": 1.333670137599713e-06,
"loss": 2.371,
"step": 186
},
{
"epoch": 0.35636017151024296,
"grad_norm": 1.4302035570144653,
"learning_rate": 1.1506584608200367e-06,
"loss": 2.1789,
"step": 187
},
{
"epoch": 0.3582658408766079,
"grad_norm": 1.333971381187439,
"learning_rate": 9.810017062595322e-07,
"loss": 2.0292,
"step": 188
},
{
"epoch": 0.36017151024297284,
"grad_norm": 1.4303479194641113,
"learning_rate": 8.247462563808817e-07,
"loss": 2.0514,
"step": 189
},
{
"epoch": 0.3620771796093378,
"grad_norm": 1.6177750825881958,
"learning_rate": 6.819348298638839e-07,
"loss": 2.1123,
"step": 190
},
{
"epoch": 0.3639828489757027,
"grad_norm": 1.672381043434143,
"learning_rate": 5.526064699265753e-07,
"loss": 2.1767,
"step": 191
},
{
"epoch": 0.36588851834206765,
"grad_norm": 1.703062653541565,
"learning_rate": 4.367965336512403e-07,
"loss": 2.141,
"step": 192
},
{
"epoch": 0.3677941877084326,
"grad_norm": 1.9463555812835693,
"learning_rate": 3.3453668231809286e-07,
"loss": 2.5574,
"step": 193
},
{
"epoch": 0.3696998570747975,
"grad_norm": 2.1811716556549072,
"learning_rate": 2.458548727494292e-07,
"loss": 2.4226,
"step": 194
},
{
"epoch": 0.37160552644116246,
"grad_norm": 2.491175889968872,
"learning_rate": 1.7077534966650766e-07,
"loss": 2.0546,
"step": 195
},
{
"epoch": 0.3735111958075274,
"grad_norm": 0.0,
"learning_rate": 1.0931863906127327e-07,
"loss": 0.0,
"step": 196
},
{
"epoch": 0.37541686517389233,
"grad_norm": 0.0,
"learning_rate": 6.150154258476315e-08,
"loss": 0.0,
"step": 197
},
{
"epoch": 0.37732253454025727,
"grad_norm": 0.0,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.0,
"step": 198
},
{
"epoch": 0.3792282039066222,
"grad_norm": 3.1555798053741455,
"learning_rate": 6.834750376549792e-09,
"loss": 1.9376,
"step": 199
},
{
"epoch": 0.38113387327298714,
"grad_norm": 0.9697321057319641,
"learning_rate": 0.0,
"loss": 2.3541,
"step": 200
},
{
"epoch": 0.38113387327298714,
"eval_loss": NaN,
"eval_runtime": 124.2367,
"eval_samples_per_second": 7.115,
"eval_steps_per_second": 1.779,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.717266463446794e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}