prxy5606's picture
Training in progress, step 200, checkpoint
413c5bb verified
{
"best_metric": 0.4233092665672302,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.25698682942499196,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00128493414712496,
"grad_norm": 0.3229522705078125,
"learning_rate": 1e-05,
"loss": 0.3839,
"step": 1
},
{
"epoch": 0.00128493414712496,
"eval_loss": 0.8631691336631775,
"eval_runtime": 17.9164,
"eval_samples_per_second": 73.173,
"eval_steps_per_second": 18.307,
"step": 1
},
{
"epoch": 0.00256986829424992,
"grad_norm": 0.16709324717521667,
"learning_rate": 2e-05,
"loss": 0.4978,
"step": 2
},
{
"epoch": 0.0038548024413748794,
"grad_norm": 0.19415687024593353,
"learning_rate": 3e-05,
"loss": 0.3871,
"step": 3
},
{
"epoch": 0.00513973658849984,
"grad_norm": 0.1970839649438858,
"learning_rate": 4e-05,
"loss": 0.4253,
"step": 4
},
{
"epoch": 0.006424670735624799,
"grad_norm": 0.17750820517539978,
"learning_rate": 5e-05,
"loss": 0.5717,
"step": 5
},
{
"epoch": 0.007709604882749759,
"grad_norm": 0.17491599917411804,
"learning_rate": 6e-05,
"loss": 0.5692,
"step": 6
},
{
"epoch": 0.008994539029874718,
"grad_norm": 0.19761942327022552,
"learning_rate": 7e-05,
"loss": 0.6119,
"step": 7
},
{
"epoch": 0.01027947317699968,
"grad_norm": 0.20383700728416443,
"learning_rate": 8e-05,
"loss": 0.6689,
"step": 8
},
{
"epoch": 0.011564407324124639,
"grad_norm": 0.21404898166656494,
"learning_rate": 9e-05,
"loss": 0.5594,
"step": 9
},
{
"epoch": 0.012849341471249599,
"grad_norm": 0.20084555447101593,
"learning_rate": 0.0001,
"loss": 0.6192,
"step": 10
},
{
"epoch": 0.014134275618374558,
"grad_norm": 0.2073499709367752,
"learning_rate": 9.999316524962345e-05,
"loss": 0.5921,
"step": 11
},
{
"epoch": 0.015419209765499518,
"grad_norm": 0.23335210978984833,
"learning_rate": 9.997266286704631e-05,
"loss": 0.6201,
"step": 12
},
{
"epoch": 0.016704143912624477,
"grad_norm": 0.18713581562042236,
"learning_rate": 9.993849845741524e-05,
"loss": 0.6113,
"step": 13
},
{
"epoch": 0.017989078059749437,
"grad_norm": 0.21381498873233795,
"learning_rate": 9.989068136093873e-05,
"loss": 0.6457,
"step": 14
},
{
"epoch": 0.019274012206874396,
"grad_norm": 0.18820427358150482,
"learning_rate": 9.98292246503335e-05,
"loss": 0.446,
"step": 15
},
{
"epoch": 0.02055894635399936,
"grad_norm": 0.19762559235095978,
"learning_rate": 9.975414512725057e-05,
"loss": 0.4946,
"step": 16
},
{
"epoch": 0.02184388050112432,
"grad_norm": 0.2400219589471817,
"learning_rate": 9.966546331768191e-05,
"loss": 0.5926,
"step": 17
},
{
"epoch": 0.023128814648249278,
"grad_norm": 0.21495814621448517,
"learning_rate": 9.956320346634876e-05,
"loss": 0.5148,
"step": 18
},
{
"epoch": 0.024413748795374238,
"grad_norm": 0.2426641285419464,
"learning_rate": 9.944739353007344e-05,
"loss": 0.5235,
"step": 19
},
{
"epoch": 0.025698682942499197,
"grad_norm": 0.2488909512758255,
"learning_rate": 9.931806517013612e-05,
"loss": 0.4619,
"step": 20
},
{
"epoch": 0.026983617089624157,
"grad_norm": 0.2871483862400055,
"learning_rate": 9.917525374361912e-05,
"loss": 0.6898,
"step": 21
},
{
"epoch": 0.028268551236749116,
"grad_norm": 0.26098084449768066,
"learning_rate": 9.901899829374047e-05,
"loss": 0.5525,
"step": 22
},
{
"epoch": 0.029553485383874076,
"grad_norm": 0.2175123542547226,
"learning_rate": 9.884934153917997e-05,
"loss": 0.4632,
"step": 23
},
{
"epoch": 0.030838419530999035,
"grad_norm": 0.18731410801410675,
"learning_rate": 9.86663298624003e-05,
"loss": 0.4726,
"step": 24
},
{
"epoch": 0.032123353678123995,
"grad_norm": 0.2108001559972763,
"learning_rate": 9.847001329696653e-05,
"loss": 0.5506,
"step": 25
},
{
"epoch": 0.033408287825248954,
"grad_norm": 0.17776276171207428,
"learning_rate": 9.826044551386744e-05,
"loss": 0.4597,
"step": 26
},
{
"epoch": 0.034693221972373914,
"grad_norm": 0.19678327441215515,
"learning_rate": 9.803768380684242e-05,
"loss": 0.4534,
"step": 27
},
{
"epoch": 0.03597815611949887,
"grad_norm": 0.20894145965576172,
"learning_rate": 9.780178907671789e-05,
"loss": 0.543,
"step": 28
},
{
"epoch": 0.03726309026662383,
"grad_norm": 0.19850239157676697,
"learning_rate": 9.755282581475769e-05,
"loss": 0.5352,
"step": 29
},
{
"epoch": 0.03854802441374879,
"grad_norm": 0.19896063208580017,
"learning_rate": 9.729086208503174e-05,
"loss": 0.4398,
"step": 30
},
{
"epoch": 0.03983295856087375,
"grad_norm": 0.235293447971344,
"learning_rate": 9.701596950580806e-05,
"loss": 0.5891,
"step": 31
},
{
"epoch": 0.04111789270799872,
"grad_norm": 0.2565675973892212,
"learning_rate": 9.672822322997305e-05,
"loss": 0.5447,
"step": 32
},
{
"epoch": 0.04240282685512368,
"grad_norm": 0.21789588034152985,
"learning_rate": 9.642770192448536e-05,
"loss": 0.5234,
"step": 33
},
{
"epoch": 0.04368776100224864,
"grad_norm": 0.2817614674568176,
"learning_rate": 9.611448774886924e-05,
"loss": 0.585,
"step": 34
},
{
"epoch": 0.0449726951493736,
"grad_norm": 0.2952912151813507,
"learning_rate": 9.578866633275288e-05,
"loss": 0.5613,
"step": 35
},
{
"epoch": 0.046257629296498556,
"grad_norm": 0.24273993074893951,
"learning_rate": 9.545032675245813e-05,
"loss": 0.3903,
"step": 36
},
{
"epoch": 0.047542563443623516,
"grad_norm": 0.28870531916618347,
"learning_rate": 9.509956150664796e-05,
"loss": 0.5704,
"step": 37
},
{
"epoch": 0.048827497590748475,
"grad_norm": 0.26661643385887146,
"learning_rate": 9.473646649103818e-05,
"loss": 0.5572,
"step": 38
},
{
"epoch": 0.050112431737873435,
"grad_norm": 0.2969691753387451,
"learning_rate": 9.43611409721806e-05,
"loss": 0.5358,
"step": 39
},
{
"epoch": 0.051397365884998394,
"grad_norm": 0.2983742952346802,
"learning_rate": 9.397368756032445e-05,
"loss": 0.612,
"step": 40
},
{
"epoch": 0.052682300032123354,
"grad_norm": 0.31109169125556946,
"learning_rate": 9.357421218136386e-05,
"loss": 0.57,
"step": 41
},
{
"epoch": 0.05396723417924831,
"grad_norm": 0.2789413332939148,
"learning_rate": 9.316282404787871e-05,
"loss": 0.5614,
"step": 42
},
{
"epoch": 0.05525216832637327,
"grad_norm": 0.2885047197341919,
"learning_rate": 9.273963562927695e-05,
"loss": 0.5403,
"step": 43
},
{
"epoch": 0.05653710247349823,
"grad_norm": 0.32259103655815125,
"learning_rate": 9.230476262104677e-05,
"loss": 0.6245,
"step": 44
},
{
"epoch": 0.05782203662062319,
"grad_norm": 0.33678483963012695,
"learning_rate": 9.185832391312644e-05,
"loss": 0.5752,
"step": 45
},
{
"epoch": 0.05910697076774815,
"grad_norm": 0.28375566005706787,
"learning_rate": 9.140044155740101e-05,
"loss": 0.496,
"step": 46
},
{
"epoch": 0.06039190491487311,
"grad_norm": 0.29486775398254395,
"learning_rate": 9.093124073433463e-05,
"loss": 0.4325,
"step": 47
},
{
"epoch": 0.06167683906199807,
"grad_norm": 0.3866497278213501,
"learning_rate": 9.045084971874738e-05,
"loss": 0.5662,
"step": 48
},
{
"epoch": 0.06296177320912304,
"grad_norm": 0.41391828656196594,
"learning_rate": 8.995939984474624e-05,
"loss": 0.5712,
"step": 49
},
{
"epoch": 0.06424670735624799,
"grad_norm": 0.6042854189872742,
"learning_rate": 8.945702546981969e-05,
"loss": 0.6938,
"step": 50
},
{
"epoch": 0.06424670735624799,
"eval_loss": 0.4823625683784485,
"eval_runtime": 17.9066,
"eval_samples_per_second": 73.213,
"eval_steps_per_second": 18.317,
"step": 50
},
{
"epoch": 0.06553164150337296,
"grad_norm": 0.41886407136917114,
"learning_rate": 8.894386393810563e-05,
"loss": 0.3765,
"step": 51
},
{
"epoch": 0.06681657565049791,
"grad_norm": 0.3565922975540161,
"learning_rate": 8.842005554284296e-05,
"loss": 0.3995,
"step": 52
},
{
"epoch": 0.06810150979762288,
"grad_norm": 0.3009561598300934,
"learning_rate": 8.788574348801675e-05,
"loss": 0.4963,
"step": 53
},
{
"epoch": 0.06938644394474783,
"grad_norm": 0.30226537585258484,
"learning_rate": 8.73410738492077e-05,
"loss": 0.3823,
"step": 54
},
{
"epoch": 0.0706713780918728,
"grad_norm": 0.24455474317073822,
"learning_rate": 8.678619553365659e-05,
"loss": 0.3055,
"step": 55
},
{
"epoch": 0.07195631223899775,
"grad_norm": 0.18948034942150116,
"learning_rate": 8.622126023955446e-05,
"loss": 0.4758,
"step": 56
},
{
"epoch": 0.07324124638612271,
"grad_norm": 0.20388995110988617,
"learning_rate": 8.564642241456986e-05,
"loss": 0.5025,
"step": 57
},
{
"epoch": 0.07452618053324767,
"grad_norm": 0.16348803043365479,
"learning_rate": 8.506183921362443e-05,
"loss": 0.3856,
"step": 58
},
{
"epoch": 0.07581111468037263,
"grad_norm": 0.1716645210981369,
"learning_rate": 8.44676704559283e-05,
"loss": 0.5104,
"step": 59
},
{
"epoch": 0.07709604882749758,
"grad_norm": 0.18084391951560974,
"learning_rate": 8.386407858128706e-05,
"loss": 0.4021,
"step": 60
},
{
"epoch": 0.07838098297462255,
"grad_norm": 0.18903827667236328,
"learning_rate": 8.32512286056924e-05,
"loss": 0.5257,
"step": 61
},
{
"epoch": 0.0796659171217475,
"grad_norm": 0.17917057871818542,
"learning_rate": 8.262928807620843e-05,
"loss": 0.405,
"step": 62
},
{
"epoch": 0.08095085126887247,
"grad_norm": 0.13263970613479614,
"learning_rate": 8.199842702516583e-05,
"loss": 0.3336,
"step": 63
},
{
"epoch": 0.08223578541599744,
"grad_norm": 0.19856888055801392,
"learning_rate": 8.135881792367686e-05,
"loss": 0.4876,
"step": 64
},
{
"epoch": 0.08352071956312239,
"grad_norm": 0.17072618007659912,
"learning_rate": 8.07106356344834e-05,
"loss": 0.4405,
"step": 65
},
{
"epoch": 0.08480565371024736,
"grad_norm": 0.17208395898342133,
"learning_rate": 8.005405736415126e-05,
"loss": 0.4741,
"step": 66
},
{
"epoch": 0.08609058785737231,
"grad_norm": 0.1645234227180481,
"learning_rate": 7.938926261462366e-05,
"loss": 0.3383,
"step": 67
},
{
"epoch": 0.08737552200449727,
"grad_norm": 0.16286449134349823,
"learning_rate": 7.871643313414718e-05,
"loss": 0.413,
"step": 68
},
{
"epoch": 0.08866045615162223,
"grad_norm": 0.1754399538040161,
"learning_rate": 7.803575286758364e-05,
"loss": 0.5182,
"step": 69
},
{
"epoch": 0.0899453902987472,
"grad_norm": 0.1368543952703476,
"learning_rate": 7.734740790612136e-05,
"loss": 0.3331,
"step": 70
},
{
"epoch": 0.09123032444587215,
"grad_norm": 0.13811126351356506,
"learning_rate": 7.66515864363997e-05,
"loss": 0.3799,
"step": 71
},
{
"epoch": 0.09251525859299711,
"grad_norm": 0.1286497414112091,
"learning_rate": 7.594847868906076e-05,
"loss": 0.2911,
"step": 72
},
{
"epoch": 0.09380019274012207,
"grad_norm": 0.16766689717769623,
"learning_rate": 7.52382768867422e-05,
"loss": 0.4131,
"step": 73
},
{
"epoch": 0.09508512688724703,
"grad_norm": 0.15062600374221802,
"learning_rate": 7.452117519152542e-05,
"loss": 0.3549,
"step": 74
},
{
"epoch": 0.09637006103437198,
"grad_norm": 0.17661640048027039,
"learning_rate": 7.379736965185368e-05,
"loss": 0.4628,
"step": 75
},
{
"epoch": 0.09765499518149695,
"grad_norm": 0.18976455926895142,
"learning_rate": 7.30670581489344e-05,
"loss": 0.447,
"step": 76
},
{
"epoch": 0.0989399293286219,
"grad_norm": 0.17289698123931885,
"learning_rate": 7.233044034264034e-05,
"loss": 0.4547,
"step": 77
},
{
"epoch": 0.10022486347574687,
"grad_norm": 0.1608312577009201,
"learning_rate": 7.158771761692464e-05,
"loss": 0.4095,
"step": 78
},
{
"epoch": 0.10150979762287182,
"grad_norm": 0.16891804337501526,
"learning_rate": 7.083909302476453e-05,
"loss": 0.4182,
"step": 79
},
{
"epoch": 0.10279473176999679,
"grad_norm": 0.19559670984745026,
"learning_rate": 7.008477123264848e-05,
"loss": 0.4894,
"step": 80
},
{
"epoch": 0.10407966591712174,
"grad_norm": 0.16868546605110168,
"learning_rate": 6.932495846462261e-05,
"loss": 0.3961,
"step": 81
},
{
"epoch": 0.10536460006424671,
"grad_norm": 0.1829994171857834,
"learning_rate": 6.855986244591104e-05,
"loss": 0.446,
"step": 82
},
{
"epoch": 0.10664953421137166,
"grad_norm": 0.1991136521100998,
"learning_rate": 6.778969234612584e-05,
"loss": 0.4248,
"step": 83
},
{
"epoch": 0.10793446835849663,
"grad_norm": 0.22807911038398743,
"learning_rate": 6.701465872208216e-05,
"loss": 0.5467,
"step": 84
},
{
"epoch": 0.1092194025056216,
"grad_norm": 0.1949082463979721,
"learning_rate": 6.623497346023418e-05,
"loss": 0.4612,
"step": 85
},
{
"epoch": 0.11050433665274655,
"grad_norm": 0.22015252709388733,
"learning_rate": 6.545084971874738e-05,
"loss": 0.4521,
"step": 86
},
{
"epoch": 0.11178927079987151,
"grad_norm": 0.19183842837810516,
"learning_rate": 6.466250186922325e-05,
"loss": 0.4972,
"step": 87
},
{
"epoch": 0.11307420494699646,
"grad_norm": 0.19982464611530304,
"learning_rate": 6.387014543809223e-05,
"loss": 0.4231,
"step": 88
},
{
"epoch": 0.11435913909412143,
"grad_norm": 0.19909873604774475,
"learning_rate": 6.307399704769099e-05,
"loss": 0.3886,
"step": 89
},
{
"epoch": 0.11564407324124638,
"grad_norm": 0.2026892900466919,
"learning_rate": 6.227427435703997e-05,
"loss": 0.4575,
"step": 90
},
{
"epoch": 0.11692900738837135,
"grad_norm": 0.23532840609550476,
"learning_rate": 6.147119600233758e-05,
"loss": 0.5401,
"step": 91
},
{
"epoch": 0.1182139415354963,
"grad_norm": 0.23304833471775055,
"learning_rate": 6.066498153718735e-05,
"loss": 0.4831,
"step": 92
},
{
"epoch": 0.11949887568262127,
"grad_norm": 0.24611881375312805,
"learning_rate": 5.985585137257401e-05,
"loss": 0.6095,
"step": 93
},
{
"epoch": 0.12078380982974622,
"grad_norm": 0.2635016441345215,
"learning_rate": 5.90440267166055e-05,
"loss": 0.4649,
"step": 94
},
{
"epoch": 0.12206874397687119,
"grad_norm": 0.24433648586273193,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.3904,
"step": 95
},
{
"epoch": 0.12335367812399614,
"grad_norm": 0.2562096118927002,
"learning_rate": 5.74131823855921e-05,
"loss": 0.4316,
"step": 96
},
{
"epoch": 0.12463861227112111,
"grad_norm": 0.26686418056488037,
"learning_rate": 5.6594608567103456e-05,
"loss": 0.4104,
"step": 97
},
{
"epoch": 0.12592354641824607,
"grad_norm": 0.2688847482204437,
"learning_rate": 5.577423184847932e-05,
"loss": 0.4729,
"step": 98
},
{
"epoch": 0.127208480565371,
"grad_norm": 0.31322845816612244,
"learning_rate": 5.495227651252315e-05,
"loss": 0.5112,
"step": 99
},
{
"epoch": 0.12849341471249598,
"grad_norm": 0.4091756343841553,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.7067,
"step": 100
},
{
"epoch": 0.12849341471249598,
"eval_loss": 0.43862655758857727,
"eval_runtime": 17.9241,
"eval_samples_per_second": 73.142,
"eval_steps_per_second": 18.299,
"step": 100
},
{
"epoch": 0.12977834885962095,
"grad_norm": 0.14330808818340302,
"learning_rate": 5.330452921628497e-05,
"loss": 0.386,
"step": 101
},
{
"epoch": 0.1310632830067459,
"grad_norm": 0.16079817712306976,
"learning_rate": 5.247918773366112e-05,
"loss": 0.3885,
"step": 102
},
{
"epoch": 0.13234821715387085,
"grad_norm": 0.16479887068271637,
"learning_rate": 5.165316846586541e-05,
"loss": 0.3111,
"step": 103
},
{
"epoch": 0.13363315130099582,
"grad_norm": 0.16989882290363312,
"learning_rate": 5.0826697238317935e-05,
"loss": 0.4324,
"step": 104
},
{
"epoch": 0.13491808544812078,
"grad_norm": 0.13408862054347992,
"learning_rate": 5e-05,
"loss": 0.4253,
"step": 105
},
{
"epoch": 0.13620301959524575,
"grad_norm": 0.1488083451986313,
"learning_rate": 4.917330276168208e-05,
"loss": 0.4273,
"step": 106
},
{
"epoch": 0.13748795374237072,
"grad_norm": 0.14648298919200897,
"learning_rate": 4.834683153413459e-05,
"loss": 0.4275,
"step": 107
},
{
"epoch": 0.13877288788949566,
"grad_norm": 0.17291350662708282,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.4815,
"step": 108
},
{
"epoch": 0.14005782203662062,
"grad_norm": 0.14816628396511078,
"learning_rate": 4.669547078371504e-05,
"loss": 0.4189,
"step": 109
},
{
"epoch": 0.1413427561837456,
"grad_norm": 0.15040449798107147,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.4407,
"step": 110
},
{
"epoch": 0.14262769033087055,
"grad_norm": 0.16331325471401215,
"learning_rate": 4.504772348747687e-05,
"loss": 0.3577,
"step": 111
},
{
"epoch": 0.1439126244779955,
"grad_norm": 0.1523352414369583,
"learning_rate": 4.4225768151520694e-05,
"loss": 0.4538,
"step": 112
},
{
"epoch": 0.14519755862512046,
"grad_norm": 0.15538831055164337,
"learning_rate": 4.3405391432896555e-05,
"loss": 0.4939,
"step": 113
},
{
"epoch": 0.14648249277224543,
"grad_norm": 0.17550474405288696,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.3471,
"step": 114
},
{
"epoch": 0.1477674269193704,
"grad_norm": 0.16642166674137115,
"learning_rate": 4.17702704859633e-05,
"loss": 0.3598,
"step": 115
},
{
"epoch": 0.14905236106649533,
"grad_norm": 0.1335379034280777,
"learning_rate": 4.095597328339452e-05,
"loss": 0.4309,
"step": 116
},
{
"epoch": 0.1503372952136203,
"grad_norm": 0.1894882321357727,
"learning_rate": 4.0144148627425993e-05,
"loss": 0.3114,
"step": 117
},
{
"epoch": 0.15162222936074526,
"grad_norm": 0.18704906105995178,
"learning_rate": 3.933501846281267e-05,
"loss": 0.5558,
"step": 118
},
{
"epoch": 0.15290716350787023,
"grad_norm": 0.1511276811361313,
"learning_rate": 3.852880399766243e-05,
"loss": 0.4341,
"step": 119
},
{
"epoch": 0.15419209765499517,
"grad_norm": 0.14488156139850616,
"learning_rate": 3.772572564296005e-05,
"loss": 0.3553,
"step": 120
},
{
"epoch": 0.15547703180212014,
"grad_norm": 0.16023483872413635,
"learning_rate": 3.6926002952309016e-05,
"loss": 0.364,
"step": 121
},
{
"epoch": 0.1567619659492451,
"grad_norm": 0.15687936544418335,
"learning_rate": 3.612985456190778e-05,
"loss": 0.4378,
"step": 122
},
{
"epoch": 0.15804690009637007,
"grad_norm": 0.1499202400445938,
"learning_rate": 3.533749813077677e-05,
"loss": 0.3253,
"step": 123
},
{
"epoch": 0.159331834243495,
"grad_norm": 0.16471999883651733,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.4204,
"step": 124
},
{
"epoch": 0.16061676839061997,
"grad_norm": 0.17482925951480865,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.4361,
"step": 125
},
{
"epoch": 0.16190170253774494,
"grad_norm": 0.15067900717258453,
"learning_rate": 3.298534127791785e-05,
"loss": 0.4064,
"step": 126
},
{
"epoch": 0.1631866366848699,
"grad_norm": 0.15777434408664703,
"learning_rate": 3.221030765387417e-05,
"loss": 0.3329,
"step": 127
},
{
"epoch": 0.16447157083199487,
"grad_norm": 0.14798729121685028,
"learning_rate": 3.144013755408895e-05,
"loss": 0.2936,
"step": 128
},
{
"epoch": 0.1657565049791198,
"grad_norm": 0.1725255399942398,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.3613,
"step": 129
},
{
"epoch": 0.16704143912624478,
"grad_norm": 0.18094204366207123,
"learning_rate": 2.991522876735154e-05,
"loss": 0.4062,
"step": 130
},
{
"epoch": 0.16832637327336974,
"grad_norm": 0.17012368142604828,
"learning_rate": 2.916090697523549e-05,
"loss": 0.4061,
"step": 131
},
{
"epoch": 0.1696113074204947,
"grad_norm": 0.19924713671207428,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.4565,
"step": 132
},
{
"epoch": 0.17089624156761965,
"grad_norm": 0.19040025770664215,
"learning_rate": 2.766955965735968e-05,
"loss": 0.424,
"step": 133
},
{
"epoch": 0.17218117571474462,
"grad_norm": 0.1983742117881775,
"learning_rate": 2.693294185106562e-05,
"loss": 0.3611,
"step": 134
},
{
"epoch": 0.17346610986186958,
"grad_norm": 0.21056251227855682,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.453,
"step": 135
},
{
"epoch": 0.17475104400899455,
"grad_norm": 0.2103775441646576,
"learning_rate": 2.547882480847461e-05,
"loss": 0.4283,
"step": 136
},
{
"epoch": 0.1760359781561195,
"grad_norm": 0.25432565808296204,
"learning_rate": 2.476172311325783e-05,
"loss": 0.4887,
"step": 137
},
{
"epoch": 0.17732091230324445,
"grad_norm": 0.2193983495235443,
"learning_rate": 2.405152131093926e-05,
"loss": 0.5363,
"step": 138
},
{
"epoch": 0.17860584645036942,
"grad_norm": 0.2406087964773178,
"learning_rate": 2.3348413563600325e-05,
"loss": 0.4488,
"step": 139
},
{
"epoch": 0.1798907805974944,
"grad_norm": 0.2359459102153778,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.4299,
"step": 140
},
{
"epoch": 0.18117571474461933,
"grad_norm": 0.25858861207962036,
"learning_rate": 2.196424713241637e-05,
"loss": 0.4911,
"step": 141
},
{
"epoch": 0.1824606488917443,
"grad_norm": 0.25297558307647705,
"learning_rate": 2.128356686585282e-05,
"loss": 0.4162,
"step": 142
},
{
"epoch": 0.18374558303886926,
"grad_norm": 0.24798721075057983,
"learning_rate": 2.061073738537635e-05,
"loss": 0.5344,
"step": 143
},
{
"epoch": 0.18503051718599423,
"grad_norm": 0.24184654653072357,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.5321,
"step": 144
},
{
"epoch": 0.18631545133311916,
"grad_norm": 0.23573166131973267,
"learning_rate": 1.928936436551661e-05,
"loss": 0.4314,
"step": 145
},
{
"epoch": 0.18760038548024413,
"grad_norm": 0.2928771376609802,
"learning_rate": 1.8641182076323148e-05,
"loss": 0.4242,
"step": 146
},
{
"epoch": 0.1888853196273691,
"grad_norm": 0.274859219789505,
"learning_rate": 1.800157297483417e-05,
"loss": 0.3875,
"step": 147
},
{
"epoch": 0.19017025377449406,
"grad_norm": 0.3347971737384796,
"learning_rate": 1.7370711923791567e-05,
"loss": 0.4947,
"step": 148
},
{
"epoch": 0.19145518792161903,
"grad_norm": 0.3627684712409973,
"learning_rate": 1.6748771394307585e-05,
"loss": 0.6645,
"step": 149
},
{
"epoch": 0.19274012206874397,
"grad_norm": 0.48059049248695374,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.7234,
"step": 150
},
{
"epoch": 0.19274012206874397,
"eval_loss": 0.427529901266098,
"eval_runtime": 17.9604,
"eval_samples_per_second": 72.994,
"eval_steps_per_second": 18.262,
"step": 150
},
{
"epoch": 0.19402505621586894,
"grad_norm": 0.16127873957157135,
"learning_rate": 1.553232954407171e-05,
"loss": 0.352,
"step": 151
},
{
"epoch": 0.1953099903629939,
"grad_norm": 0.1213570386171341,
"learning_rate": 1.4938160786375572e-05,
"loss": 0.284,
"step": 152
},
{
"epoch": 0.19659492451011887,
"grad_norm": 0.12937839329242706,
"learning_rate": 1.435357758543015e-05,
"loss": 0.3905,
"step": 153
},
{
"epoch": 0.1978798586572438,
"grad_norm": 0.13650377094745636,
"learning_rate": 1.3778739760445552e-05,
"loss": 0.3244,
"step": 154
},
{
"epoch": 0.19916479280436877,
"grad_norm": 0.1445707529783249,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.3777,
"step": 155
},
{
"epoch": 0.20044972695149374,
"grad_norm": 0.15034304559230804,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.4105,
"step": 156
},
{
"epoch": 0.2017346610986187,
"grad_norm": 0.13963250815868378,
"learning_rate": 1.2114256511983274e-05,
"loss": 0.3949,
"step": 157
},
{
"epoch": 0.20301959524574364,
"grad_norm": 0.17511266469955444,
"learning_rate": 1.157994445715706e-05,
"loss": 0.4454,
"step": 158
},
{
"epoch": 0.2043045293928686,
"grad_norm": 0.18399956822395325,
"learning_rate": 1.1056136061894384e-05,
"loss": 0.4273,
"step": 159
},
{
"epoch": 0.20558946353999358,
"grad_norm": 0.17723242938518524,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.4896,
"step": 160
},
{
"epoch": 0.20687439768711854,
"grad_norm": 0.15420684218406677,
"learning_rate": 1.0040600155253765e-05,
"loss": 0.2953,
"step": 161
},
{
"epoch": 0.20815933183424348,
"grad_norm": 0.16175687313079834,
"learning_rate": 9.549150281252633e-06,
"loss": 0.4442,
"step": 162
},
{
"epoch": 0.20944426598136845,
"grad_norm": 0.1726570576429367,
"learning_rate": 9.068759265665384e-06,
"loss": 0.3781,
"step": 163
},
{
"epoch": 0.21072920012849342,
"grad_norm": 0.18467459082603455,
"learning_rate": 8.599558442598998e-06,
"loss": 0.4104,
"step": 164
},
{
"epoch": 0.21201413427561838,
"grad_norm": 0.1754767894744873,
"learning_rate": 8.141676086873572e-06,
"loss": 0.3249,
"step": 165
},
{
"epoch": 0.21329906842274332,
"grad_norm": 0.18807180225849152,
"learning_rate": 7.695237378953223e-06,
"loss": 0.5624,
"step": 166
},
{
"epoch": 0.2145840025698683,
"grad_norm": 0.18431425094604492,
"learning_rate": 7.260364370723044e-06,
"loss": 0.4022,
"step": 167
},
{
"epoch": 0.21586893671699325,
"grad_norm": 0.13891683518886566,
"learning_rate": 6.837175952121306e-06,
"loss": 0.3189,
"step": 168
},
{
"epoch": 0.21715387086411822,
"grad_norm": 0.16312813758850098,
"learning_rate": 6.425787818636131e-06,
"loss": 0.457,
"step": 169
},
{
"epoch": 0.2184388050112432,
"grad_norm": 0.1642637699842453,
"learning_rate": 6.026312439675552e-06,
"loss": 0.4609,
"step": 170
},
{
"epoch": 0.21972373915836813,
"grad_norm": 0.1412889063358307,
"learning_rate": 5.6388590278194096e-06,
"loss": 0.4007,
"step": 171
},
{
"epoch": 0.2210086733054931,
"grad_norm": 0.18311673402786255,
"learning_rate": 5.263533508961827e-06,
"loss": 0.5766,
"step": 172
},
{
"epoch": 0.22229360745261806,
"grad_norm": 0.15250040590763092,
"learning_rate": 4.900438493352055e-06,
"loss": 0.3739,
"step": 173
},
{
"epoch": 0.22357854159974302,
"grad_norm": 0.17925508320331573,
"learning_rate": 4.549673247541875e-06,
"loss": 0.399,
"step": 174
},
{
"epoch": 0.22486347574686796,
"grad_norm": 0.1857130378484726,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.4792,
"step": 175
},
{
"epoch": 0.22614840989399293,
"grad_norm": 0.14979122579097748,
"learning_rate": 3.885512251130763e-06,
"loss": 0.3605,
"step": 176
},
{
"epoch": 0.2274333440411179,
"grad_norm": 0.18381118774414062,
"learning_rate": 3.5722980755146517e-06,
"loss": 0.5163,
"step": 177
},
{
"epoch": 0.22871827818824286,
"grad_norm": 0.19898559153079987,
"learning_rate": 3.271776770026963e-06,
"loss": 0.4975,
"step": 178
},
{
"epoch": 0.2300032123353678,
"grad_norm": 0.17745105922222137,
"learning_rate": 2.9840304941919415e-06,
"loss": 0.4046,
"step": 179
},
{
"epoch": 0.23128814648249277,
"grad_norm": 0.1690097600221634,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.4185,
"step": 180
},
{
"epoch": 0.23257308062961773,
"grad_norm": 0.1791466772556305,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.394,
"step": 181
},
{
"epoch": 0.2338580147767427,
"grad_norm": 0.1853507161140442,
"learning_rate": 2.1982109232821178e-06,
"loss": 0.3164,
"step": 182
},
{
"epoch": 0.23514294892386764,
"grad_norm": 0.20235706865787506,
"learning_rate": 1.962316193157593e-06,
"loss": 0.4857,
"step": 183
},
{
"epoch": 0.2364278830709926,
"grad_norm": 0.2068977952003479,
"learning_rate": 1.7395544861325718e-06,
"loss": 0.4459,
"step": 184
},
{
"epoch": 0.23771281721811757,
"grad_norm": 0.20159567892551422,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.4653,
"step": 185
},
{
"epoch": 0.23899775136524254,
"grad_norm": 0.18035966157913208,
"learning_rate": 1.333670137599713e-06,
"loss": 0.3182,
"step": 186
},
{
"epoch": 0.24028268551236748,
"grad_norm": 0.19839312136173248,
"learning_rate": 1.1506584608200367e-06,
"loss": 0.4436,
"step": 187
},
{
"epoch": 0.24156761965949244,
"grad_norm": 0.18996292352676392,
"learning_rate": 9.810017062595322e-07,
"loss": 0.359,
"step": 188
},
{
"epoch": 0.2428525538066174,
"grad_norm": 0.2028089016675949,
"learning_rate": 8.247462563808817e-07,
"loss": 0.4321,
"step": 189
},
{
"epoch": 0.24413748795374238,
"grad_norm": 0.2218060940504074,
"learning_rate": 6.819348298638839e-07,
"loss": 0.4457,
"step": 190
},
{
"epoch": 0.24542242210086734,
"grad_norm": 0.2336476743221283,
"learning_rate": 5.526064699265753e-07,
"loss": 0.4117,
"step": 191
},
{
"epoch": 0.24670735624799228,
"grad_norm": 0.27392345666885376,
"learning_rate": 4.367965336512403e-07,
"loss": 0.5749,
"step": 192
},
{
"epoch": 0.24799229039511725,
"grad_norm": 0.2659059166908264,
"learning_rate": 3.3453668231809286e-07,
"loss": 0.436,
"step": 193
},
{
"epoch": 0.24927722454224222,
"grad_norm": 0.232716366648674,
"learning_rate": 2.458548727494292e-07,
"loss": 0.4579,
"step": 194
},
{
"epoch": 0.25056215868936715,
"grad_norm": 0.2634781002998352,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.3846,
"step": 195
},
{
"epoch": 0.25184709283649215,
"grad_norm": 0.23306170105934143,
"learning_rate": 1.0931863906127327e-07,
"loss": 0.4111,
"step": 196
},
{
"epoch": 0.2531320269836171,
"grad_norm": 0.31027573347091675,
"learning_rate": 6.150154258476315e-08,
"loss": 0.4865,
"step": 197
},
{
"epoch": 0.254416961130742,
"grad_norm": 0.35948267579078674,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.5539,
"step": 198
},
{
"epoch": 0.255701895277867,
"grad_norm": 0.32407116889953613,
"learning_rate": 6.834750376549792e-09,
"loss": 0.5199,
"step": 199
},
{
"epoch": 0.25698682942499196,
"grad_norm": 0.47398993372917175,
"learning_rate": 0.0,
"loss": 0.6213,
"step": 200
},
{
"epoch": 0.25698682942499196,
"eval_loss": 0.4233092665672302,
"eval_runtime": 17.8979,
"eval_samples_per_second": 73.249,
"eval_steps_per_second": 18.326,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5061780452671488.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}