jssky's picture
Training in progress, step 200, checkpoint
4a47c6a verified
raw
history blame
36.4 kB
{
"best_metric": 0.8628646731376648,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.11114198388441233,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005557099194220616,
"grad_norm": 0.3096221387386322,
"learning_rate": 1e-05,
"loss": 0.6874,
"step": 1
},
{
"epoch": 0.0011114198388441233,
"grad_norm": 0.3445380926132202,
"learning_rate": 2e-05,
"loss": 0.8533,
"step": 2
},
{
"epoch": 0.0016671297582661851,
"grad_norm": 0.38732942938804626,
"learning_rate": 3e-05,
"loss": 0.8416,
"step": 3
},
{
"epoch": 0.0022228396776882466,
"grad_norm": 0.40373197197914124,
"learning_rate": 4e-05,
"loss": 0.9278,
"step": 4
},
{
"epoch": 0.0027785495971103086,
"grad_norm": 0.5323252081871033,
"learning_rate": 5e-05,
"loss": 0.8707,
"step": 5
},
{
"epoch": 0.0033342595165323703,
"grad_norm": 0.40320611000061035,
"learning_rate": 6e-05,
"loss": 0.8885,
"step": 6
},
{
"epoch": 0.003889969435954432,
"grad_norm": 0.4491368234157562,
"learning_rate": 7e-05,
"loss": 0.9183,
"step": 7
},
{
"epoch": 0.004445679355376493,
"grad_norm": 0.4458727538585663,
"learning_rate": 8e-05,
"loss": 0.8906,
"step": 8
},
{
"epoch": 0.005001389274798555,
"grad_norm": 0.48434558510780334,
"learning_rate": 9e-05,
"loss": 0.9905,
"step": 9
},
{
"epoch": 0.005557099194220617,
"grad_norm": 0.48024508357048035,
"learning_rate": 0.0001,
"loss": 0.976,
"step": 10
},
{
"epoch": 0.006112809113642679,
"grad_norm": 0.4710797965526581,
"learning_rate": 9.999316524962345e-05,
"loss": 0.8832,
"step": 11
},
{
"epoch": 0.0066685190330647405,
"grad_norm": 0.6608445644378662,
"learning_rate": 9.997266286704631e-05,
"loss": 0.8135,
"step": 12
},
{
"epoch": 0.007224228952486802,
"grad_norm": 0.7733985185623169,
"learning_rate": 9.993849845741524e-05,
"loss": 0.9128,
"step": 13
},
{
"epoch": 0.007779938871908864,
"grad_norm": 0.7563806772232056,
"learning_rate": 9.989068136093873e-05,
"loss": 0.8965,
"step": 14
},
{
"epoch": 0.008335648791330925,
"grad_norm": 0.8561490178108215,
"learning_rate": 9.98292246503335e-05,
"loss": 1.0446,
"step": 15
},
{
"epoch": 0.008891358710752986,
"grad_norm": 0.5314223170280457,
"learning_rate": 9.975414512725057e-05,
"loss": 0.8947,
"step": 16
},
{
"epoch": 0.009447068630175049,
"grad_norm": 0.5220498442649841,
"learning_rate": 9.966546331768191e-05,
"loss": 0.9007,
"step": 17
},
{
"epoch": 0.01000277854959711,
"grad_norm": 0.4629230499267578,
"learning_rate": 9.956320346634876e-05,
"loss": 0.8739,
"step": 18
},
{
"epoch": 0.010558488469019172,
"grad_norm": 0.4347263276576996,
"learning_rate": 9.944739353007344e-05,
"loss": 0.9759,
"step": 19
},
{
"epoch": 0.011114198388441235,
"grad_norm": 0.4404182434082031,
"learning_rate": 9.931806517013612e-05,
"loss": 0.879,
"step": 20
},
{
"epoch": 0.011669908307863295,
"grad_norm": 0.4404293894767761,
"learning_rate": 9.917525374361912e-05,
"loss": 0.9113,
"step": 21
},
{
"epoch": 0.012225618227285358,
"grad_norm": 0.5099473595619202,
"learning_rate": 9.901899829374047e-05,
"loss": 0.9572,
"step": 22
},
{
"epoch": 0.012781328146707419,
"grad_norm": 0.49983498454093933,
"learning_rate": 9.884934153917997e-05,
"loss": 0.9505,
"step": 23
},
{
"epoch": 0.013337038066129481,
"grad_norm": 0.45751750469207764,
"learning_rate": 9.86663298624003e-05,
"loss": 0.9365,
"step": 24
},
{
"epoch": 0.013892747985551542,
"grad_norm": 0.4815827012062073,
"learning_rate": 9.847001329696653e-05,
"loss": 0.9681,
"step": 25
},
{
"epoch": 0.014448457904973604,
"grad_norm": 0.49498870968818665,
"learning_rate": 9.826044551386744e-05,
"loss": 0.8768,
"step": 26
},
{
"epoch": 0.015004167824395665,
"grad_norm": 0.5356186032295227,
"learning_rate": 9.803768380684242e-05,
"loss": 0.9148,
"step": 27
},
{
"epoch": 0.015559877743817728,
"grad_norm": 0.5918698310852051,
"learning_rate": 9.780178907671789e-05,
"loss": 0.9016,
"step": 28
},
{
"epoch": 0.01611558766323979,
"grad_norm": 0.5985519289970398,
"learning_rate": 9.755282581475769e-05,
"loss": 0.9812,
"step": 29
},
{
"epoch": 0.01667129758266185,
"grad_norm": 0.6009053587913513,
"learning_rate": 9.729086208503174e-05,
"loss": 0.9144,
"step": 30
},
{
"epoch": 0.01722700750208391,
"grad_norm": 0.6077443957328796,
"learning_rate": 9.701596950580806e-05,
"loss": 1.0156,
"step": 31
},
{
"epoch": 0.017782717421505972,
"grad_norm": 0.5306153893470764,
"learning_rate": 9.672822322997305e-05,
"loss": 0.8645,
"step": 32
},
{
"epoch": 0.018338427340928037,
"grad_norm": 0.6579713821411133,
"learning_rate": 9.642770192448536e-05,
"loss": 0.912,
"step": 33
},
{
"epoch": 0.018894137260350097,
"grad_norm": 0.6237910389900208,
"learning_rate": 9.611448774886924e-05,
"loss": 0.9448,
"step": 34
},
{
"epoch": 0.019449847179772158,
"grad_norm": 0.7714456915855408,
"learning_rate": 9.578866633275288e-05,
"loss": 1.013,
"step": 35
},
{
"epoch": 0.02000555709919422,
"grad_norm": 0.8368968367576599,
"learning_rate": 9.545032675245813e-05,
"loss": 1.0158,
"step": 36
},
{
"epoch": 0.020561267018616283,
"grad_norm": 0.7869086265563965,
"learning_rate": 9.509956150664796e-05,
"loss": 0.9657,
"step": 37
},
{
"epoch": 0.021116976938038344,
"grad_norm": 0.868666410446167,
"learning_rate": 9.473646649103818e-05,
"loss": 1.0059,
"step": 38
},
{
"epoch": 0.021672686857460405,
"grad_norm": 0.8402222394943237,
"learning_rate": 9.43611409721806e-05,
"loss": 0.9648,
"step": 39
},
{
"epoch": 0.02222839677688247,
"grad_norm": 1.085195779800415,
"learning_rate": 9.397368756032445e-05,
"loss": 0.914,
"step": 40
},
{
"epoch": 0.02278410669630453,
"grad_norm": 1.0071593523025513,
"learning_rate": 9.357421218136386e-05,
"loss": 1.0821,
"step": 41
},
{
"epoch": 0.02333981661572659,
"grad_norm": 0.9272382259368896,
"learning_rate": 9.316282404787871e-05,
"loss": 0.9226,
"step": 42
},
{
"epoch": 0.02389552653514865,
"grad_norm": 1.150288701057434,
"learning_rate": 9.273963562927695e-05,
"loss": 0.9991,
"step": 43
},
{
"epoch": 0.024451236454570716,
"grad_norm": 1.181030511856079,
"learning_rate": 9.230476262104677e-05,
"loss": 1.0868,
"step": 44
},
{
"epoch": 0.025006946373992776,
"grad_norm": 1.2696013450622559,
"learning_rate": 9.185832391312644e-05,
"loss": 0.9505,
"step": 45
},
{
"epoch": 0.025562656293414837,
"grad_norm": 1.4249521493911743,
"learning_rate": 9.140044155740101e-05,
"loss": 0.9655,
"step": 46
},
{
"epoch": 0.026118366212836898,
"grad_norm": 1.7254620790481567,
"learning_rate": 9.093124073433463e-05,
"loss": 1.0955,
"step": 47
},
{
"epoch": 0.026674076132258962,
"grad_norm": 1.5540577173233032,
"learning_rate": 9.045084971874738e-05,
"loss": 1.144,
"step": 48
},
{
"epoch": 0.027229786051681023,
"grad_norm": 1.849468469619751,
"learning_rate": 8.995939984474624e-05,
"loss": 1.0674,
"step": 49
},
{
"epoch": 0.027785495971103084,
"grad_norm": 2.3807289600372314,
"learning_rate": 8.945702546981969e-05,
"loss": 1.1557,
"step": 50
},
{
"epoch": 0.027785495971103084,
"eval_loss": 0.9262280464172363,
"eval_runtime": 485.2385,
"eval_samples_per_second": 6.246,
"eval_steps_per_second": 1.562,
"step": 50
},
{
"epoch": 0.028341205890525144,
"grad_norm": 0.6152607798576355,
"learning_rate": 8.894386393810563e-05,
"loss": 0.6218,
"step": 51
},
{
"epoch": 0.02889691580994721,
"grad_norm": 0.6119871735572815,
"learning_rate": 8.842005554284296e-05,
"loss": 0.7201,
"step": 52
},
{
"epoch": 0.02945262572936927,
"grad_norm": 0.6875957250595093,
"learning_rate": 8.788574348801675e-05,
"loss": 0.8161,
"step": 53
},
{
"epoch": 0.03000833564879133,
"grad_norm": 0.5192121863365173,
"learning_rate": 8.73410738492077e-05,
"loss": 0.7156,
"step": 54
},
{
"epoch": 0.03056404556821339,
"grad_norm": 0.5001811981201172,
"learning_rate": 8.678619553365659e-05,
"loss": 0.7691,
"step": 55
},
{
"epoch": 0.031119755487635455,
"grad_norm": 0.44354963302612305,
"learning_rate": 8.622126023955446e-05,
"loss": 0.7391,
"step": 56
},
{
"epoch": 0.03167546540705751,
"grad_norm": 0.4128439724445343,
"learning_rate": 8.564642241456986e-05,
"loss": 0.7258,
"step": 57
},
{
"epoch": 0.03223117532647958,
"grad_norm": 0.4332227110862732,
"learning_rate": 8.506183921362443e-05,
"loss": 0.8114,
"step": 58
},
{
"epoch": 0.03278688524590164,
"grad_norm": 0.49826887249946594,
"learning_rate": 8.44676704559283e-05,
"loss": 0.7566,
"step": 59
},
{
"epoch": 0.0333425951653237,
"grad_norm": 0.5935742259025574,
"learning_rate": 8.386407858128706e-05,
"loss": 0.8343,
"step": 60
},
{
"epoch": 0.03389830508474576,
"grad_norm": 0.5643736124038696,
"learning_rate": 8.32512286056924e-05,
"loss": 0.89,
"step": 61
},
{
"epoch": 0.03445401500416782,
"grad_norm": 0.46135735511779785,
"learning_rate": 8.262928807620843e-05,
"loss": 0.8583,
"step": 62
},
{
"epoch": 0.035009724923589884,
"grad_norm": 0.46768918633461,
"learning_rate": 8.199842702516583e-05,
"loss": 0.8019,
"step": 63
},
{
"epoch": 0.035565434843011945,
"grad_norm": 1.4605656862258911,
"learning_rate": 8.135881792367686e-05,
"loss": 0.9433,
"step": 64
},
{
"epoch": 0.03612114476243401,
"grad_norm": 0.643073320388794,
"learning_rate": 8.07106356344834e-05,
"loss": 0.8167,
"step": 65
},
{
"epoch": 0.03667685468185607,
"grad_norm": 0.5446807146072388,
"learning_rate": 8.005405736415126e-05,
"loss": 0.899,
"step": 66
},
{
"epoch": 0.037232564601278134,
"grad_norm": 0.687372624874115,
"learning_rate": 7.938926261462366e-05,
"loss": 0.8572,
"step": 67
},
{
"epoch": 0.037788274520700195,
"grad_norm": 0.5308534502983093,
"learning_rate": 7.871643313414718e-05,
"loss": 0.8851,
"step": 68
},
{
"epoch": 0.038343984440122256,
"grad_norm": 0.5406598448753357,
"learning_rate": 7.803575286758364e-05,
"loss": 0.9101,
"step": 69
},
{
"epoch": 0.038899694359544316,
"grad_norm": 0.5616788268089294,
"learning_rate": 7.734740790612136e-05,
"loss": 0.9062,
"step": 70
},
{
"epoch": 0.03945540427896638,
"grad_norm": 0.5090973377227783,
"learning_rate": 7.66515864363997e-05,
"loss": 0.9327,
"step": 71
},
{
"epoch": 0.04001111419838844,
"grad_norm": 0.5834062695503235,
"learning_rate": 7.594847868906076e-05,
"loss": 0.9146,
"step": 72
},
{
"epoch": 0.040566824117810506,
"grad_norm": 0.5961350202560425,
"learning_rate": 7.52382768867422e-05,
"loss": 0.8507,
"step": 73
},
{
"epoch": 0.041122534037232567,
"grad_norm": 0.5611476302146912,
"learning_rate": 7.452117519152542e-05,
"loss": 0.9023,
"step": 74
},
{
"epoch": 0.04167824395665463,
"grad_norm": 0.5981151461601257,
"learning_rate": 7.379736965185368e-05,
"loss": 0.9309,
"step": 75
},
{
"epoch": 0.04223395387607669,
"grad_norm": 0.6651081442832947,
"learning_rate": 7.30670581489344e-05,
"loss": 0.8387,
"step": 76
},
{
"epoch": 0.04278966379549875,
"grad_norm": 0.5755109190940857,
"learning_rate": 7.233044034264034e-05,
"loss": 0.9094,
"step": 77
},
{
"epoch": 0.04334537371492081,
"grad_norm": 0.6457306742668152,
"learning_rate": 7.158771761692464e-05,
"loss": 0.9344,
"step": 78
},
{
"epoch": 0.04390108363434287,
"grad_norm": 0.6681315898895264,
"learning_rate": 7.083909302476453e-05,
"loss": 0.9146,
"step": 79
},
{
"epoch": 0.04445679355376494,
"grad_norm": 0.6412218809127808,
"learning_rate": 7.008477123264848e-05,
"loss": 0.9006,
"step": 80
},
{
"epoch": 0.045012503473187,
"grad_norm": 0.7244966626167297,
"learning_rate": 6.932495846462261e-05,
"loss": 0.9162,
"step": 81
},
{
"epoch": 0.04556821339260906,
"grad_norm": 0.6981307864189148,
"learning_rate": 6.855986244591104e-05,
"loss": 0.9197,
"step": 82
},
{
"epoch": 0.04612392331203112,
"grad_norm": 0.6621314287185669,
"learning_rate": 6.778969234612584e-05,
"loss": 0.8846,
"step": 83
},
{
"epoch": 0.04667963323145318,
"grad_norm": 0.6982636451721191,
"learning_rate": 6.701465872208216e-05,
"loss": 0.9075,
"step": 84
},
{
"epoch": 0.04723534315087524,
"grad_norm": 0.8870342373847961,
"learning_rate": 6.623497346023418e-05,
"loss": 0.8958,
"step": 85
},
{
"epoch": 0.0477910530702973,
"grad_norm": 0.7478469610214233,
"learning_rate": 6.545084971874738e-05,
"loss": 0.9219,
"step": 86
},
{
"epoch": 0.048346762989719363,
"grad_norm": 0.8852052092552185,
"learning_rate": 6.466250186922325e-05,
"loss": 0.9669,
"step": 87
},
{
"epoch": 0.04890247290914143,
"grad_norm": 0.9566718935966492,
"learning_rate": 6.387014543809223e-05,
"loss": 0.9394,
"step": 88
},
{
"epoch": 0.04945818282856349,
"grad_norm": 0.96366947889328,
"learning_rate": 6.307399704769099e-05,
"loss": 0.8851,
"step": 89
},
{
"epoch": 0.05001389274798555,
"grad_norm": 1.0311588048934937,
"learning_rate": 6.227427435703997e-05,
"loss": 0.9561,
"step": 90
},
{
"epoch": 0.050569602667407614,
"grad_norm": 1.0592992305755615,
"learning_rate": 6.147119600233758e-05,
"loss": 1.003,
"step": 91
},
{
"epoch": 0.051125312586829674,
"grad_norm": 0.9809575080871582,
"learning_rate": 6.066498153718735e-05,
"loss": 1.0925,
"step": 92
},
{
"epoch": 0.051681022506251735,
"grad_norm": 1.1811975240707397,
"learning_rate": 5.985585137257401e-05,
"loss": 1.0152,
"step": 93
},
{
"epoch": 0.052236732425673796,
"grad_norm": 1.2322711944580078,
"learning_rate": 5.90440267166055e-05,
"loss": 1.0615,
"step": 94
},
{
"epoch": 0.05279244234509586,
"grad_norm": 1.2346220016479492,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.9731,
"step": 95
},
{
"epoch": 0.053348152264517924,
"grad_norm": 1.3589791059494019,
"learning_rate": 5.74131823855921e-05,
"loss": 1.071,
"step": 96
},
{
"epoch": 0.053903862183939985,
"grad_norm": 1.3197028636932373,
"learning_rate": 5.6594608567103456e-05,
"loss": 0.975,
"step": 97
},
{
"epoch": 0.054459572103362046,
"grad_norm": 1.6303969621658325,
"learning_rate": 5.577423184847932e-05,
"loss": 1.0707,
"step": 98
},
{
"epoch": 0.05501528202278411,
"grad_norm": 1.7257357835769653,
"learning_rate": 5.495227651252315e-05,
"loss": 1.0774,
"step": 99
},
{
"epoch": 0.05557099194220617,
"grad_norm": 2.2941479682922363,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.2457,
"step": 100
},
{
"epoch": 0.05557099194220617,
"eval_loss": 0.8977280855178833,
"eval_runtime": 485.037,
"eval_samples_per_second": 6.249,
"eval_steps_per_second": 1.563,
"step": 100
},
{
"epoch": 0.05612670186162823,
"grad_norm": 0.4302062392234802,
"learning_rate": 5.330452921628497e-05,
"loss": 0.497,
"step": 101
},
{
"epoch": 0.05668241178105029,
"grad_norm": 0.5863476991653442,
"learning_rate": 5.247918773366112e-05,
"loss": 0.6127,
"step": 102
},
{
"epoch": 0.05723812170047236,
"grad_norm": 0.6520615816116333,
"learning_rate": 5.165316846586541e-05,
"loss": 0.7166,
"step": 103
},
{
"epoch": 0.05779383161989442,
"grad_norm": 0.6201066374778748,
"learning_rate": 5.0826697238317935e-05,
"loss": 0.6871,
"step": 104
},
{
"epoch": 0.05834954153931648,
"grad_norm": 0.6387415528297424,
"learning_rate": 5e-05,
"loss": 0.7832,
"step": 105
},
{
"epoch": 0.05890525145873854,
"grad_norm": 0.5291295647621155,
"learning_rate": 4.917330276168208e-05,
"loss": 0.7588,
"step": 106
},
{
"epoch": 0.0594609613781606,
"grad_norm": 0.5174488425254822,
"learning_rate": 4.834683153413459e-05,
"loss": 0.7953,
"step": 107
},
{
"epoch": 0.06001667129758266,
"grad_norm": 0.5068111419677734,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.7733,
"step": 108
},
{
"epoch": 0.06057238121700472,
"grad_norm": 0.483059823513031,
"learning_rate": 4.669547078371504e-05,
"loss": 0.7016,
"step": 109
},
{
"epoch": 0.06112809113642678,
"grad_norm": 0.5098429918289185,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.8259,
"step": 110
},
{
"epoch": 0.06168380105584885,
"grad_norm": 0.48790547251701355,
"learning_rate": 4.504772348747687e-05,
"loss": 0.7469,
"step": 111
},
{
"epoch": 0.06223951097527091,
"grad_norm": 0.47628065943717957,
"learning_rate": 4.4225768151520694e-05,
"loss": 0.6838,
"step": 112
},
{
"epoch": 0.06279522089469297,
"grad_norm": 0.45170220732688904,
"learning_rate": 4.3405391432896555e-05,
"loss": 0.7768,
"step": 113
},
{
"epoch": 0.06335093081411503,
"grad_norm": 0.5596415400505066,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.7799,
"step": 114
},
{
"epoch": 0.06390664073353709,
"grad_norm": 0.6097323894500732,
"learning_rate": 4.17702704859633e-05,
"loss": 0.745,
"step": 115
},
{
"epoch": 0.06446235065295916,
"grad_norm": 0.6196081042289734,
"learning_rate": 4.095597328339452e-05,
"loss": 0.8016,
"step": 116
},
{
"epoch": 0.06501806057238121,
"grad_norm": 0.5493971705436707,
"learning_rate": 4.0144148627425993e-05,
"loss": 0.8414,
"step": 117
},
{
"epoch": 0.06557377049180328,
"grad_norm": 0.5350998044013977,
"learning_rate": 3.933501846281267e-05,
"loss": 0.8307,
"step": 118
},
{
"epoch": 0.06612948041122534,
"grad_norm": 0.5519810318946838,
"learning_rate": 3.852880399766243e-05,
"loss": 0.8961,
"step": 119
},
{
"epoch": 0.0666851903306474,
"grad_norm": 0.5195711851119995,
"learning_rate": 3.772572564296005e-05,
"loss": 0.8139,
"step": 120
},
{
"epoch": 0.06724090025006946,
"grad_norm": 0.5278419852256775,
"learning_rate": 3.6926002952309016e-05,
"loss": 0.8392,
"step": 121
},
{
"epoch": 0.06779661016949153,
"grad_norm": 0.5770994424819946,
"learning_rate": 3.612985456190778e-05,
"loss": 0.8381,
"step": 122
},
{
"epoch": 0.06835232008891359,
"grad_norm": 0.5510872602462769,
"learning_rate": 3.533749813077677e-05,
"loss": 0.9033,
"step": 123
},
{
"epoch": 0.06890803000833565,
"grad_norm": 0.5439556241035461,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.8519,
"step": 124
},
{
"epoch": 0.06946373992775771,
"grad_norm": 0.6017346978187561,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.9056,
"step": 125
},
{
"epoch": 0.07001944984717977,
"grad_norm": 0.5570154786109924,
"learning_rate": 3.298534127791785e-05,
"loss": 0.8463,
"step": 126
},
{
"epoch": 0.07057515976660184,
"grad_norm": 0.5789839029312134,
"learning_rate": 3.221030765387417e-05,
"loss": 0.8722,
"step": 127
},
{
"epoch": 0.07113086968602389,
"grad_norm": 0.615638792514801,
"learning_rate": 3.144013755408895e-05,
"loss": 0.8581,
"step": 128
},
{
"epoch": 0.07168657960544596,
"grad_norm": 0.645267903804779,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.8193,
"step": 129
},
{
"epoch": 0.07224228952486803,
"grad_norm": 0.5697069764137268,
"learning_rate": 2.991522876735154e-05,
"loss": 0.7402,
"step": 130
},
{
"epoch": 0.07279799944429008,
"grad_norm": 0.7123498916625977,
"learning_rate": 2.916090697523549e-05,
"loss": 0.7547,
"step": 131
},
{
"epoch": 0.07335370936371215,
"grad_norm": 0.6562145948410034,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.841,
"step": 132
},
{
"epoch": 0.0739094192831342,
"grad_norm": 0.7559430599212646,
"learning_rate": 2.766955965735968e-05,
"loss": 0.8641,
"step": 133
},
{
"epoch": 0.07446512920255627,
"grad_norm": 0.7525032162666321,
"learning_rate": 2.693294185106562e-05,
"loss": 0.8629,
"step": 134
},
{
"epoch": 0.07502083912197832,
"grad_norm": 0.7517706751823425,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.8493,
"step": 135
},
{
"epoch": 0.07557654904140039,
"grad_norm": 0.9117544889450073,
"learning_rate": 2.547882480847461e-05,
"loss": 0.979,
"step": 136
},
{
"epoch": 0.07613225896082246,
"grad_norm": 0.7862403988838196,
"learning_rate": 2.476172311325783e-05,
"loss": 0.9017,
"step": 137
},
{
"epoch": 0.07668796888024451,
"grad_norm": 0.8671106696128845,
"learning_rate": 2.405152131093926e-05,
"loss": 0.9356,
"step": 138
},
{
"epoch": 0.07724367879966658,
"grad_norm": 0.9080340266227722,
"learning_rate": 2.3348413563600325e-05,
"loss": 0.9758,
"step": 139
},
{
"epoch": 0.07779938871908863,
"grad_norm": 0.9741995930671692,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.9595,
"step": 140
},
{
"epoch": 0.0783550986385107,
"grad_norm": 1.0070784091949463,
"learning_rate": 2.196424713241637e-05,
"loss": 0.9375,
"step": 141
},
{
"epoch": 0.07891080855793275,
"grad_norm": 1.0656945705413818,
"learning_rate": 2.128356686585282e-05,
"loss": 0.8457,
"step": 142
},
{
"epoch": 0.07946651847735482,
"grad_norm": 1.159580111503601,
"learning_rate": 2.061073738537635e-05,
"loss": 1.0,
"step": 143
},
{
"epoch": 0.08002222839677688,
"grad_norm": 1.2903732061386108,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.9655,
"step": 144
},
{
"epoch": 0.08057793831619894,
"grad_norm": 1.2341926097869873,
"learning_rate": 1.928936436551661e-05,
"loss": 1.0107,
"step": 145
},
{
"epoch": 0.08113364823562101,
"grad_norm": 1.243120789527893,
"learning_rate": 1.8641182076323148e-05,
"loss": 0.9856,
"step": 146
},
{
"epoch": 0.08168935815504307,
"grad_norm": 1.4137181043624878,
"learning_rate": 1.800157297483417e-05,
"loss": 0.8974,
"step": 147
},
{
"epoch": 0.08224506807446513,
"grad_norm": 1.4438540935516357,
"learning_rate": 1.7370711923791567e-05,
"loss": 1.0212,
"step": 148
},
{
"epoch": 0.08280077799388719,
"grad_norm": 1.699130654335022,
"learning_rate": 1.6748771394307585e-05,
"loss": 1.0967,
"step": 149
},
{
"epoch": 0.08335648791330925,
"grad_norm": 2.626094341278076,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.9766,
"step": 150
},
{
"epoch": 0.08335648791330925,
"eval_loss": 0.866513729095459,
"eval_runtime": 483.5807,
"eval_samples_per_second": 6.268,
"eval_steps_per_second": 1.567,
"step": 150
},
{
"epoch": 0.08391219783273131,
"grad_norm": 0.43071356415748596,
"learning_rate": 1.553232954407171e-05,
"loss": 0.6212,
"step": 151
},
{
"epoch": 0.08446790775215338,
"grad_norm": 0.4564357399940491,
"learning_rate": 1.4938160786375572e-05,
"loss": 0.5791,
"step": 152
},
{
"epoch": 0.08502361767157544,
"grad_norm": 0.4825187623500824,
"learning_rate": 1.435357758543015e-05,
"loss": 0.6476,
"step": 153
},
{
"epoch": 0.0855793275909975,
"grad_norm": 0.604847252368927,
"learning_rate": 1.3778739760445552e-05,
"loss": 0.8215,
"step": 154
},
{
"epoch": 0.08613503751041957,
"grad_norm": 0.5483651161193848,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.7464,
"step": 155
},
{
"epoch": 0.08669074742984162,
"grad_norm": 0.5268886089324951,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.8204,
"step": 156
},
{
"epoch": 0.08724645734926369,
"grad_norm": 0.5907043218612671,
"learning_rate": 1.2114256511983274e-05,
"loss": 0.7811,
"step": 157
},
{
"epoch": 0.08780216726868574,
"grad_norm": 0.5238073468208313,
"learning_rate": 1.157994445715706e-05,
"loss": 0.7253,
"step": 158
},
{
"epoch": 0.08835787718810781,
"grad_norm": 0.5322824716567993,
"learning_rate": 1.1056136061894384e-05,
"loss": 0.7817,
"step": 159
},
{
"epoch": 0.08891358710752988,
"grad_norm": 0.515514075756073,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.8549,
"step": 160
},
{
"epoch": 0.08946929702695193,
"grad_norm": 0.49722638726234436,
"learning_rate": 1.0040600155253765e-05,
"loss": 0.7593,
"step": 161
},
{
"epoch": 0.090025006946374,
"grad_norm": 0.4791252911090851,
"learning_rate": 9.549150281252633e-06,
"loss": 0.7211,
"step": 162
},
{
"epoch": 0.09058071686579605,
"grad_norm": 0.509143590927124,
"learning_rate": 9.068759265665384e-06,
"loss": 0.7829,
"step": 163
},
{
"epoch": 0.09113642678521812,
"grad_norm": 0.5921031832695007,
"learning_rate": 8.599558442598998e-06,
"loss": 0.7481,
"step": 164
},
{
"epoch": 0.09169213670464017,
"grad_norm": 0.6113850474357605,
"learning_rate": 8.141676086873572e-06,
"loss": 0.7718,
"step": 165
},
{
"epoch": 0.09224784662406224,
"grad_norm": 0.6630951762199402,
"learning_rate": 7.695237378953223e-06,
"loss": 0.8883,
"step": 166
},
{
"epoch": 0.0928035565434843,
"grad_norm": 0.6536318063735962,
"learning_rate": 7.260364370723044e-06,
"loss": 0.8645,
"step": 167
},
{
"epoch": 0.09335926646290636,
"grad_norm": 0.5307266712188721,
"learning_rate": 6.837175952121306e-06,
"loss": 0.8854,
"step": 168
},
{
"epoch": 0.09391497638232843,
"grad_norm": 0.5809971690177917,
"learning_rate": 6.425787818636131e-06,
"loss": 0.8454,
"step": 169
},
{
"epoch": 0.09447068630175048,
"grad_norm": 0.5535210371017456,
"learning_rate": 6.026312439675552e-06,
"loss": 0.7561,
"step": 170
},
{
"epoch": 0.09502639622117255,
"grad_norm": 0.5506969690322876,
"learning_rate": 5.6388590278194096e-06,
"loss": 0.7905,
"step": 171
},
{
"epoch": 0.0955821061405946,
"grad_norm": 0.6618872284889221,
"learning_rate": 5.263533508961827e-06,
"loss": 0.8784,
"step": 172
},
{
"epoch": 0.09613781606001667,
"grad_norm": 0.5788018703460693,
"learning_rate": 4.900438493352055e-06,
"loss": 0.9002,
"step": 173
},
{
"epoch": 0.09669352597943873,
"grad_norm": 0.5524190068244934,
"learning_rate": 4.549673247541875e-06,
"loss": 0.9219,
"step": 174
},
{
"epoch": 0.0972492358988608,
"grad_norm": 0.5713950991630554,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.854,
"step": 175
},
{
"epoch": 0.09780494581828286,
"grad_norm": 0.6136912703514099,
"learning_rate": 3.885512251130763e-06,
"loss": 0.8333,
"step": 176
},
{
"epoch": 0.09836065573770492,
"grad_norm": 0.6428893208503723,
"learning_rate": 3.5722980755146517e-06,
"loss": 0.8411,
"step": 177
},
{
"epoch": 0.09891636565712698,
"grad_norm": 0.6516892313957214,
"learning_rate": 3.271776770026963e-06,
"loss": 0.9672,
"step": 178
},
{
"epoch": 0.09947207557654904,
"grad_norm": 0.6879913210868835,
"learning_rate": 2.9840304941919415e-06,
"loss": 0.8086,
"step": 179
},
{
"epoch": 0.1000277854959711,
"grad_norm": 0.6599001288414001,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.7506,
"step": 180
},
{
"epoch": 0.10058349541539316,
"grad_norm": 0.6834718585014343,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.9374,
"step": 181
},
{
"epoch": 0.10113920533481523,
"grad_norm": 0.6849175095558167,
"learning_rate": 2.1982109232821178e-06,
"loss": 0.8561,
"step": 182
},
{
"epoch": 0.1016949152542373,
"grad_norm": 0.7637895941734314,
"learning_rate": 1.962316193157593e-06,
"loss": 0.8923,
"step": 183
},
{
"epoch": 0.10225062517365935,
"grad_norm": 0.7412274479866028,
"learning_rate": 1.7395544861325718e-06,
"loss": 0.8883,
"step": 184
},
{
"epoch": 0.10280633509308142,
"grad_norm": 0.8586113452911377,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.964,
"step": 185
},
{
"epoch": 0.10336204501250347,
"grad_norm": 0.8107852935791016,
"learning_rate": 1.333670137599713e-06,
"loss": 0.9385,
"step": 186
},
{
"epoch": 0.10391775493192554,
"grad_norm": 1.0877127647399902,
"learning_rate": 1.1506584608200367e-06,
"loss": 0.9892,
"step": 187
},
{
"epoch": 0.10447346485134759,
"grad_norm": 0.9704486727714539,
"learning_rate": 9.810017062595322e-07,
"loss": 0.9466,
"step": 188
},
{
"epoch": 0.10502917477076966,
"grad_norm": 1.058051347732544,
"learning_rate": 8.247462563808817e-07,
"loss": 0.9002,
"step": 189
},
{
"epoch": 0.10558488469019171,
"grad_norm": 1.0455724000930786,
"learning_rate": 6.819348298638839e-07,
"loss": 0.936,
"step": 190
},
{
"epoch": 0.10614059460961378,
"grad_norm": 1.0386101007461548,
"learning_rate": 5.526064699265753e-07,
"loss": 0.9007,
"step": 191
},
{
"epoch": 0.10669630452903585,
"grad_norm": 1.10919988155365,
"learning_rate": 4.367965336512403e-07,
"loss": 0.9403,
"step": 192
},
{
"epoch": 0.1072520144484579,
"grad_norm": 1.2226141691207886,
"learning_rate": 3.3453668231809286e-07,
"loss": 0.9448,
"step": 193
},
{
"epoch": 0.10780772436787997,
"grad_norm": 1.201768159866333,
"learning_rate": 2.458548727494292e-07,
"loss": 0.9943,
"step": 194
},
{
"epoch": 0.10836343428730202,
"grad_norm": 1.2886711359024048,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.9768,
"step": 195
},
{
"epoch": 0.10891914420672409,
"grad_norm": 1.3844603300094604,
"learning_rate": 1.0931863906127327e-07,
"loss": 1.0332,
"step": 196
},
{
"epoch": 0.10947485412614615,
"grad_norm": 1.5774954557418823,
"learning_rate": 6.150154258476315e-08,
"loss": 1.0535,
"step": 197
},
{
"epoch": 0.11003056404556821,
"grad_norm": 1.778183937072754,
"learning_rate": 2.7337132953697554e-08,
"loss": 1.0084,
"step": 198
},
{
"epoch": 0.11058627396499028,
"grad_norm": 1.7170872688293457,
"learning_rate": 6.834750376549792e-09,
"loss": 1.1168,
"step": 199
},
{
"epoch": 0.11114198388441233,
"grad_norm": 2.3412373065948486,
"learning_rate": 0.0,
"loss": 1.0962,
"step": 200
},
{
"epoch": 0.11114198388441233,
"eval_loss": 0.8628646731376648,
"eval_runtime": 485.1156,
"eval_samples_per_second": 6.248,
"eval_steps_per_second": 1.563,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.820085038369997e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}