lesso's picture
Training in progress, step 150, checkpoint
796950a verified
raw
history blame
27.7 kB
{
"best_metric": 0.41522201895713806,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.029414648494950485,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00019609765663300324,
"grad_norm": 7.468555927276611,
"learning_rate": 1.013e-05,
"loss": 1.5524,
"step": 1
},
{
"epoch": 0.00019609765663300324,
"eval_loss": 1.2237062454223633,
"eval_runtime": 243.2276,
"eval_samples_per_second": 8.831,
"eval_steps_per_second": 2.208,
"step": 1
},
{
"epoch": 0.0003921953132660065,
"grad_norm": 7.6385884284973145,
"learning_rate": 2.026e-05,
"loss": 1.7279,
"step": 2
},
{
"epoch": 0.0005882929698990097,
"grad_norm": 10.681758880615234,
"learning_rate": 3.039e-05,
"loss": 1.5164,
"step": 3
},
{
"epoch": 0.000784390626532013,
"grad_norm": 9.887133598327637,
"learning_rate": 4.052e-05,
"loss": 1.1316,
"step": 4
},
{
"epoch": 0.0009804882831650162,
"grad_norm": 8.374829292297363,
"learning_rate": 5.065e-05,
"loss": 1.1586,
"step": 5
},
{
"epoch": 0.0011765859397980193,
"grad_norm": 7.310905456542969,
"learning_rate": 6.078e-05,
"loss": 0.8968,
"step": 6
},
{
"epoch": 0.0013726835964310226,
"grad_norm": 10.953272819519043,
"learning_rate": 7.091e-05,
"loss": 1.1154,
"step": 7
},
{
"epoch": 0.001568781253064026,
"grad_norm": 7.230965614318848,
"learning_rate": 8.104e-05,
"loss": 1.0405,
"step": 8
},
{
"epoch": 0.0017648789096970292,
"grad_norm": 8.350433349609375,
"learning_rate": 9.117e-05,
"loss": 1.2122,
"step": 9
},
{
"epoch": 0.0019609765663300325,
"grad_norm": 6.164297580718994,
"learning_rate": 0.0001013,
"loss": 0.9343,
"step": 10
},
{
"epoch": 0.0021570742229630358,
"grad_norm": 8.079084396362305,
"learning_rate": 0.00010076684210526316,
"loss": 0.7113,
"step": 11
},
{
"epoch": 0.0023531718795960386,
"grad_norm": 9.09571361541748,
"learning_rate": 0.0001002336842105263,
"loss": 1.0389,
"step": 12
},
{
"epoch": 0.002549269536229042,
"grad_norm": 9.438547134399414,
"learning_rate": 9.970052631578946e-05,
"loss": 1.2697,
"step": 13
},
{
"epoch": 0.002745367192862045,
"grad_norm": 9.123111724853516,
"learning_rate": 9.916736842105263e-05,
"loss": 1.2704,
"step": 14
},
{
"epoch": 0.0029414648494950485,
"grad_norm": 7.670680999755859,
"learning_rate": 9.863421052631579e-05,
"loss": 0.9233,
"step": 15
},
{
"epoch": 0.003137562506128052,
"grad_norm": 6.911023139953613,
"learning_rate": 9.810105263157895e-05,
"loss": 0.8698,
"step": 16
},
{
"epoch": 0.003333660162761055,
"grad_norm": 9.797855377197266,
"learning_rate": 9.756789473684211e-05,
"loss": 1.1806,
"step": 17
},
{
"epoch": 0.0035297578193940584,
"grad_norm": 8.471273422241211,
"learning_rate": 9.703473684210525e-05,
"loss": 1.0096,
"step": 18
},
{
"epoch": 0.0037258554760270617,
"grad_norm": 12.676338195800781,
"learning_rate": 9.650157894736842e-05,
"loss": 1.1519,
"step": 19
},
{
"epoch": 0.003921953132660065,
"grad_norm": 7.387173652648926,
"learning_rate": 9.596842105263158e-05,
"loss": 0.7844,
"step": 20
},
{
"epoch": 0.004118050789293068,
"grad_norm": 35.67609786987305,
"learning_rate": 9.543526315789474e-05,
"loss": 0.8251,
"step": 21
},
{
"epoch": 0.0043141484459260715,
"grad_norm": 21.55261993408203,
"learning_rate": 9.49021052631579e-05,
"loss": 0.5756,
"step": 22
},
{
"epoch": 0.004510246102559075,
"grad_norm": 17.739107131958008,
"learning_rate": 9.436894736842105e-05,
"loss": 1.4716,
"step": 23
},
{
"epoch": 0.004706343759192077,
"grad_norm": 16.76211166381836,
"learning_rate": 9.38357894736842e-05,
"loss": 1.2926,
"step": 24
},
{
"epoch": 0.0049024414158250805,
"grad_norm": 19.425182342529297,
"learning_rate": 9.330263157894737e-05,
"loss": 1.2513,
"step": 25
},
{
"epoch": 0.005098539072458084,
"grad_norm": 5.306024074554443,
"learning_rate": 9.276947368421051e-05,
"loss": 0.4769,
"step": 26
},
{
"epoch": 0.005294636729091087,
"grad_norm": 15.570332527160645,
"learning_rate": 9.223631578947369e-05,
"loss": 0.869,
"step": 27
},
{
"epoch": 0.00549073438572409,
"grad_norm": 16.34988784790039,
"learning_rate": 9.170315789473684e-05,
"loss": 1.0487,
"step": 28
},
{
"epoch": 0.005686832042357094,
"grad_norm": 7.214115142822266,
"learning_rate": 9.117e-05,
"loss": 0.6652,
"step": 29
},
{
"epoch": 0.005882929698990097,
"grad_norm": 8.671172142028809,
"learning_rate": 9.063684210526316e-05,
"loss": 0.5766,
"step": 30
},
{
"epoch": 0.0060790273556231,
"grad_norm": 14.24565315246582,
"learning_rate": 9.010368421052632e-05,
"loss": 0.7564,
"step": 31
},
{
"epoch": 0.006275125012256104,
"grad_norm": 9.223489761352539,
"learning_rate": 8.957052631578946e-05,
"loss": 0.5476,
"step": 32
},
{
"epoch": 0.006471222668889107,
"grad_norm": 12.95165729522705,
"learning_rate": 8.903736842105263e-05,
"loss": 0.7006,
"step": 33
},
{
"epoch": 0.00666732032552211,
"grad_norm": 11.666534423828125,
"learning_rate": 8.850421052631579e-05,
"loss": 0.7866,
"step": 34
},
{
"epoch": 0.0068634179821551134,
"grad_norm": 10.406961441040039,
"learning_rate": 8.797105263157895e-05,
"loss": 0.7515,
"step": 35
},
{
"epoch": 0.007059515638788117,
"grad_norm": 13.311921119689941,
"learning_rate": 8.743789473684211e-05,
"loss": 0.6337,
"step": 36
},
{
"epoch": 0.00725561329542112,
"grad_norm": 11.749580383300781,
"learning_rate": 8.690473684210526e-05,
"loss": 0.8481,
"step": 37
},
{
"epoch": 0.007451710952054123,
"grad_norm": 6.961912155151367,
"learning_rate": 8.637157894736842e-05,
"loss": 0.6709,
"step": 38
},
{
"epoch": 0.007647808608687127,
"grad_norm": 22.275279998779297,
"learning_rate": 8.583842105263158e-05,
"loss": 1.2376,
"step": 39
},
{
"epoch": 0.00784390626532013,
"grad_norm": 67.40861511230469,
"learning_rate": 8.530526315789472e-05,
"loss": 1.8463,
"step": 40
},
{
"epoch": 0.008040003921953132,
"grad_norm": 11.63316822052002,
"learning_rate": 8.47721052631579e-05,
"loss": 1.0184,
"step": 41
},
{
"epoch": 0.008236101578586136,
"grad_norm": 33.797874450683594,
"learning_rate": 8.423894736842105e-05,
"loss": 1.7594,
"step": 42
},
{
"epoch": 0.008432199235219139,
"grad_norm": 14.965246200561523,
"learning_rate": 8.37057894736842e-05,
"loss": 0.9693,
"step": 43
},
{
"epoch": 0.008628296891852143,
"grad_norm": 10.041715621948242,
"learning_rate": 8.317263157894737e-05,
"loss": 0.7927,
"step": 44
},
{
"epoch": 0.008824394548485145,
"grad_norm": 22.70721435546875,
"learning_rate": 8.263947368421053e-05,
"loss": 1.3944,
"step": 45
},
{
"epoch": 0.00902049220511815,
"grad_norm": 49.67770767211914,
"learning_rate": 8.210631578947368e-05,
"loss": 1.8496,
"step": 46
},
{
"epoch": 0.009216589861751152,
"grad_norm": 23.283432006835938,
"learning_rate": 8.157315789473684e-05,
"loss": 1.057,
"step": 47
},
{
"epoch": 0.009412687518384154,
"grad_norm": 23.300397872924805,
"learning_rate": 8.104e-05,
"loss": 1.0415,
"step": 48
},
{
"epoch": 0.009608785175017159,
"grad_norm": 14.141422271728516,
"learning_rate": 8.050684210526316e-05,
"loss": 1.0551,
"step": 49
},
{
"epoch": 0.009804882831650161,
"grad_norm": 45.43354415893555,
"learning_rate": 7.997368421052632e-05,
"loss": 2.293,
"step": 50
},
{
"epoch": 0.009804882831650161,
"eval_loss": 0.5220252871513367,
"eval_runtime": 242.92,
"eval_samples_per_second": 8.842,
"eval_steps_per_second": 2.211,
"step": 50
},
{
"epoch": 0.010000980488283165,
"grad_norm": 6.259135723114014,
"learning_rate": 7.944052631578947e-05,
"loss": 1.9433,
"step": 51
},
{
"epoch": 0.010197078144916168,
"grad_norm": 3.87833833694458,
"learning_rate": 7.890736842105263e-05,
"loss": 0.925,
"step": 52
},
{
"epoch": 0.010393175801549172,
"grad_norm": 3.702810764312744,
"learning_rate": 7.837421052631579e-05,
"loss": 0.8375,
"step": 53
},
{
"epoch": 0.010589273458182174,
"grad_norm": 3.9069197177886963,
"learning_rate": 7.784105263157893e-05,
"loss": 0.7016,
"step": 54
},
{
"epoch": 0.010785371114815178,
"grad_norm": 4.089141845703125,
"learning_rate": 7.730789473684211e-05,
"loss": 0.736,
"step": 55
},
{
"epoch": 0.01098146877144818,
"grad_norm": 4.299583435058594,
"learning_rate": 7.677473684210526e-05,
"loss": 0.9623,
"step": 56
},
{
"epoch": 0.011177566428081185,
"grad_norm": 4.404727458953857,
"learning_rate": 7.624157894736842e-05,
"loss": 0.9444,
"step": 57
},
{
"epoch": 0.011373664084714187,
"grad_norm": 3.662179470062256,
"learning_rate": 7.570842105263158e-05,
"loss": 0.7267,
"step": 58
},
{
"epoch": 0.011569761741347192,
"grad_norm": 5.034608364105225,
"learning_rate": 7.517526315789474e-05,
"loss": 0.8414,
"step": 59
},
{
"epoch": 0.011765859397980194,
"grad_norm": 4.015024185180664,
"learning_rate": 7.464210526315789e-05,
"loss": 0.7682,
"step": 60
},
{
"epoch": 0.011961957054613198,
"grad_norm": 3.8569846153259277,
"learning_rate": 7.410894736842106e-05,
"loss": 0.7419,
"step": 61
},
{
"epoch": 0.0121580547112462,
"grad_norm": 5.50112247467041,
"learning_rate": 7.35757894736842e-05,
"loss": 0.789,
"step": 62
},
{
"epoch": 0.012354152367879203,
"grad_norm": 4.346161842346191,
"learning_rate": 7.304263157894737e-05,
"loss": 0.7292,
"step": 63
},
{
"epoch": 0.012550250024512207,
"grad_norm": 12.474294662475586,
"learning_rate": 7.250947368421053e-05,
"loss": 0.8405,
"step": 64
},
{
"epoch": 0.01274634768114521,
"grad_norm": 6.171235084533691,
"learning_rate": 7.197631578947368e-05,
"loss": 0.9972,
"step": 65
},
{
"epoch": 0.012942445337778214,
"grad_norm": 10.865174293518066,
"learning_rate": 7.144315789473684e-05,
"loss": 0.6531,
"step": 66
},
{
"epoch": 0.013138542994411216,
"grad_norm": 5.450807094573975,
"learning_rate": 7.091e-05,
"loss": 0.9603,
"step": 67
},
{
"epoch": 0.01333464065104422,
"grad_norm": 5.7336015701293945,
"learning_rate": 7.037684210526316e-05,
"loss": 0.9774,
"step": 68
},
{
"epoch": 0.013530738307677223,
"grad_norm": 5.6365156173706055,
"learning_rate": 6.984368421052632e-05,
"loss": 0.7666,
"step": 69
},
{
"epoch": 0.013726835964310227,
"grad_norm": 7.478610992431641,
"learning_rate": 6.931052631578947e-05,
"loss": 0.7726,
"step": 70
},
{
"epoch": 0.01392293362094323,
"grad_norm": 5.047933578491211,
"learning_rate": 6.877736842105263e-05,
"loss": 0.5822,
"step": 71
},
{
"epoch": 0.014119031277576233,
"grad_norm": 6.480734348297119,
"learning_rate": 6.824421052631579e-05,
"loss": 0.7629,
"step": 72
},
{
"epoch": 0.014315128934209236,
"grad_norm": 7.191463470458984,
"learning_rate": 6.771105263157895e-05,
"loss": 0.6803,
"step": 73
},
{
"epoch": 0.01451122659084224,
"grad_norm": 4.883853435516357,
"learning_rate": 6.71778947368421e-05,
"loss": 0.6785,
"step": 74
},
{
"epoch": 0.014707324247475242,
"grad_norm": 6.941567420959473,
"learning_rate": 6.664473684210527e-05,
"loss": 0.8324,
"step": 75
},
{
"epoch": 0.014903421904108247,
"grad_norm": 6.098438739776611,
"learning_rate": 6.611157894736842e-05,
"loss": 0.8325,
"step": 76
},
{
"epoch": 0.015099519560741249,
"grad_norm": 8.49299144744873,
"learning_rate": 6.557842105263158e-05,
"loss": 1.1349,
"step": 77
},
{
"epoch": 0.015295617217374253,
"grad_norm": 4.6161627769470215,
"learning_rate": 6.504526315789474e-05,
"loss": 0.3797,
"step": 78
},
{
"epoch": 0.015491714874007256,
"grad_norm": 5.683040142059326,
"learning_rate": 6.451210526315789e-05,
"loss": 0.4784,
"step": 79
},
{
"epoch": 0.01568781253064026,
"grad_norm": 7.138983249664307,
"learning_rate": 6.397894736842105e-05,
"loss": 0.6783,
"step": 80
},
{
"epoch": 0.01588391018727326,
"grad_norm": 5.301764488220215,
"learning_rate": 6.344578947368421e-05,
"loss": 0.4013,
"step": 81
},
{
"epoch": 0.016080007843906265,
"grad_norm": 5.394726753234863,
"learning_rate": 6.291263157894737e-05,
"loss": 0.472,
"step": 82
},
{
"epoch": 0.01627610550053927,
"grad_norm": 7.571673393249512,
"learning_rate": 6.237947368421053e-05,
"loss": 0.6222,
"step": 83
},
{
"epoch": 0.016472203157172273,
"grad_norm": 13.990236282348633,
"learning_rate": 6.184631578947368e-05,
"loss": 1.5111,
"step": 84
},
{
"epoch": 0.016668300813805274,
"grad_norm": 9.939437866210938,
"learning_rate": 6.131315789473684e-05,
"loss": 1.2702,
"step": 85
},
{
"epoch": 0.016864398470438278,
"grad_norm": 11.926310539245605,
"learning_rate": 6.078e-05,
"loss": 0.997,
"step": 86
},
{
"epoch": 0.017060496127071282,
"grad_norm": 7.9521379470825195,
"learning_rate": 6.024684210526315e-05,
"loss": 0.8637,
"step": 87
},
{
"epoch": 0.017256593783704286,
"grad_norm": 11.58044147491455,
"learning_rate": 5.9713684210526305e-05,
"loss": 1.3852,
"step": 88
},
{
"epoch": 0.017452691440337287,
"grad_norm": 9.782970428466797,
"learning_rate": 5.918052631578947e-05,
"loss": 0.8799,
"step": 89
},
{
"epoch": 0.01764878909697029,
"grad_norm": 10.114582061767578,
"learning_rate": 5.8647368421052634e-05,
"loss": 1.1961,
"step": 90
},
{
"epoch": 0.017844886753603295,
"grad_norm": 12.574527740478516,
"learning_rate": 5.811421052631579e-05,
"loss": 1.0894,
"step": 91
},
{
"epoch": 0.0180409844102363,
"grad_norm": 14.899230003356934,
"learning_rate": 5.758105263157894e-05,
"loss": 1.2862,
"step": 92
},
{
"epoch": 0.0182370820668693,
"grad_norm": 8.636293411254883,
"learning_rate": 5.70478947368421e-05,
"loss": 0.854,
"step": 93
},
{
"epoch": 0.018433179723502304,
"grad_norm": 11.84518814086914,
"learning_rate": 5.6514736842105256e-05,
"loss": 0.6895,
"step": 94
},
{
"epoch": 0.01862927738013531,
"grad_norm": 28.145517349243164,
"learning_rate": 5.5981578947368424e-05,
"loss": 1.5005,
"step": 95
},
{
"epoch": 0.01882537503676831,
"grad_norm": 13.958025932312012,
"learning_rate": 5.544842105263158e-05,
"loss": 0.4707,
"step": 96
},
{
"epoch": 0.019021472693401313,
"grad_norm": 14.421865463256836,
"learning_rate": 5.491526315789474e-05,
"loss": 1.3302,
"step": 97
},
{
"epoch": 0.019217570350034317,
"grad_norm": 19.330612182617188,
"learning_rate": 5.438210526315789e-05,
"loss": 1.0502,
"step": 98
},
{
"epoch": 0.01941366800666732,
"grad_norm": 16.62639045715332,
"learning_rate": 5.384894736842105e-05,
"loss": 1.2102,
"step": 99
},
{
"epoch": 0.019609765663300322,
"grad_norm": 26.352754592895508,
"learning_rate": 5.331578947368421e-05,
"loss": 1.9532,
"step": 100
},
{
"epoch": 0.019609765663300322,
"eval_loss": 0.4575375020503998,
"eval_runtime": 243.2626,
"eval_samples_per_second": 8.83,
"eval_steps_per_second": 2.207,
"step": 100
},
{
"epoch": 0.019805863319933326,
"grad_norm": 4.1386566162109375,
"learning_rate": 5.278263157894736e-05,
"loss": 1.3238,
"step": 101
},
{
"epoch": 0.02000196097656633,
"grad_norm": 3.137593984603882,
"learning_rate": 5.224947368421053e-05,
"loss": 0.9319,
"step": 102
},
{
"epoch": 0.020198058633199335,
"grad_norm": 3.7397894859313965,
"learning_rate": 5.171631578947368e-05,
"loss": 0.9041,
"step": 103
},
{
"epoch": 0.020394156289832335,
"grad_norm": 2.7134053707122803,
"learning_rate": 5.1183157894736844e-05,
"loss": 0.7395,
"step": 104
},
{
"epoch": 0.02059025394646534,
"grad_norm": 4.872076988220215,
"learning_rate": 5.065e-05,
"loss": 1.2704,
"step": 105
},
{
"epoch": 0.020786351603098344,
"grad_norm": 4.066561698913574,
"learning_rate": 5.011684210526315e-05,
"loss": 0.8238,
"step": 106
},
{
"epoch": 0.020982449259731348,
"grad_norm": 2.926990032196045,
"learning_rate": 4.958368421052631e-05,
"loss": 0.6825,
"step": 107
},
{
"epoch": 0.02117854691636435,
"grad_norm": 3.305487632751465,
"learning_rate": 4.9050526315789473e-05,
"loss": 0.6813,
"step": 108
},
{
"epoch": 0.021374644572997353,
"grad_norm": 5.23881196975708,
"learning_rate": 4.851736842105263e-05,
"loss": 0.9895,
"step": 109
},
{
"epoch": 0.021570742229630357,
"grad_norm": 3.481923818588257,
"learning_rate": 4.798421052631579e-05,
"loss": 0.6055,
"step": 110
},
{
"epoch": 0.021766839886263357,
"grad_norm": 2.770195722579956,
"learning_rate": 4.745105263157895e-05,
"loss": 0.4307,
"step": 111
},
{
"epoch": 0.02196293754289636,
"grad_norm": 5.2551398277282715,
"learning_rate": 4.69178947368421e-05,
"loss": 1.2838,
"step": 112
},
{
"epoch": 0.022159035199529366,
"grad_norm": 3.3223180770874023,
"learning_rate": 4.638473684210526e-05,
"loss": 0.7093,
"step": 113
},
{
"epoch": 0.02235513285616237,
"grad_norm": 4.730376720428467,
"learning_rate": 4.585157894736842e-05,
"loss": 0.8399,
"step": 114
},
{
"epoch": 0.02255123051279537,
"grad_norm": 5.192849636077881,
"learning_rate": 4.531842105263158e-05,
"loss": 0.7188,
"step": 115
},
{
"epoch": 0.022747328169428375,
"grad_norm": 3.2309155464172363,
"learning_rate": 4.478526315789473e-05,
"loss": 0.5142,
"step": 116
},
{
"epoch": 0.02294342582606138,
"grad_norm": 4.466787815093994,
"learning_rate": 4.425210526315789e-05,
"loss": 0.7535,
"step": 117
},
{
"epoch": 0.023139523482694383,
"grad_norm": 3.1393327713012695,
"learning_rate": 4.3718947368421054e-05,
"loss": 0.5282,
"step": 118
},
{
"epoch": 0.023335621139327384,
"grad_norm": 5.505728721618652,
"learning_rate": 4.318578947368421e-05,
"loss": 0.9189,
"step": 119
},
{
"epoch": 0.023531718795960388,
"grad_norm": 3.670624017715454,
"learning_rate": 4.265263157894736e-05,
"loss": 0.7202,
"step": 120
},
{
"epoch": 0.023727816452593392,
"grad_norm": 5.582329750061035,
"learning_rate": 4.211947368421052e-05,
"loss": 0.716,
"step": 121
},
{
"epoch": 0.023923914109226396,
"grad_norm": 4.540538787841797,
"learning_rate": 4.1586315789473684e-05,
"loss": 0.5689,
"step": 122
},
{
"epoch": 0.024120011765859397,
"grad_norm": 6.913193225860596,
"learning_rate": 4.105315789473684e-05,
"loss": 0.985,
"step": 123
},
{
"epoch": 0.0243161094224924,
"grad_norm": 6.866704940795898,
"learning_rate": 4.052e-05,
"loss": 0.7173,
"step": 124
},
{
"epoch": 0.024512207079125405,
"grad_norm": 8.457905769348145,
"learning_rate": 3.998684210526316e-05,
"loss": 0.7499,
"step": 125
},
{
"epoch": 0.024708304735758406,
"grad_norm": 6.656124591827393,
"learning_rate": 3.945368421052631e-05,
"loss": 1.0133,
"step": 126
},
{
"epoch": 0.02490440239239141,
"grad_norm": 6.976663112640381,
"learning_rate": 3.892052631578947e-05,
"loss": 0.8715,
"step": 127
},
{
"epoch": 0.025100500049024414,
"grad_norm": 8.578405380249023,
"learning_rate": 3.838736842105263e-05,
"loss": 0.6424,
"step": 128
},
{
"epoch": 0.02529659770565742,
"grad_norm": 7.345449924468994,
"learning_rate": 3.785421052631579e-05,
"loss": 0.8932,
"step": 129
},
{
"epoch": 0.02549269536229042,
"grad_norm": 5.459024906158447,
"learning_rate": 3.732105263157894e-05,
"loss": 0.5799,
"step": 130
},
{
"epoch": 0.025688793018923423,
"grad_norm": 6.494460105895996,
"learning_rate": 3.67878947368421e-05,
"loss": 0.4163,
"step": 131
},
{
"epoch": 0.025884890675556427,
"grad_norm": 5.555450916290283,
"learning_rate": 3.6254736842105264e-05,
"loss": 0.5469,
"step": 132
},
{
"epoch": 0.02608098833218943,
"grad_norm": 6.8839263916015625,
"learning_rate": 3.572157894736842e-05,
"loss": 0.9823,
"step": 133
},
{
"epoch": 0.026277085988822432,
"grad_norm": 12.403116226196289,
"learning_rate": 3.518842105263158e-05,
"loss": 0.6406,
"step": 134
},
{
"epoch": 0.026473183645455436,
"grad_norm": 7.836792469024658,
"learning_rate": 3.465526315789473e-05,
"loss": 1.1348,
"step": 135
},
{
"epoch": 0.02666928130208844,
"grad_norm": 7.983771800994873,
"learning_rate": 3.4122105263157894e-05,
"loss": 0.803,
"step": 136
},
{
"epoch": 0.026865378958721445,
"grad_norm": 7.406615734100342,
"learning_rate": 3.358894736842105e-05,
"loss": 0.6877,
"step": 137
},
{
"epoch": 0.027061476615354445,
"grad_norm": 7.180506706237793,
"learning_rate": 3.305578947368421e-05,
"loss": 0.9407,
"step": 138
},
{
"epoch": 0.02725757427198745,
"grad_norm": 10.637256622314453,
"learning_rate": 3.252263157894737e-05,
"loss": 1.0234,
"step": 139
},
{
"epoch": 0.027453671928620454,
"grad_norm": 12.093735694885254,
"learning_rate": 3.198947368421052e-05,
"loss": 0.9802,
"step": 140
},
{
"epoch": 0.027649769585253458,
"grad_norm": 11.916462898254395,
"learning_rate": 3.1456315789473684e-05,
"loss": 0.9617,
"step": 141
},
{
"epoch": 0.02784586724188646,
"grad_norm": 9.011691093444824,
"learning_rate": 3.092315789473684e-05,
"loss": 0.4959,
"step": 142
},
{
"epoch": 0.028041964898519463,
"grad_norm": 5.6275954246521,
"learning_rate": 3.039e-05,
"loss": 0.3209,
"step": 143
},
{
"epoch": 0.028238062555152467,
"grad_norm": 10.726852416992188,
"learning_rate": 2.9856842105263153e-05,
"loss": 1.2664,
"step": 144
},
{
"epoch": 0.028434160211785468,
"grad_norm": 12.800682067871094,
"learning_rate": 2.9323684210526317e-05,
"loss": 1.4327,
"step": 145
},
{
"epoch": 0.028630257868418472,
"grad_norm": 8.828701972961426,
"learning_rate": 2.879052631578947e-05,
"loss": 0.8394,
"step": 146
},
{
"epoch": 0.028826355525051476,
"grad_norm": 20.543346405029297,
"learning_rate": 2.8257368421052628e-05,
"loss": 2.1235,
"step": 147
},
{
"epoch": 0.02902245318168448,
"grad_norm": 24.438735961914062,
"learning_rate": 2.772421052631579e-05,
"loss": 1.5965,
"step": 148
},
{
"epoch": 0.02921855083831748,
"grad_norm": 21.095970153808594,
"learning_rate": 2.7191052631578946e-05,
"loss": 1.35,
"step": 149
},
{
"epoch": 0.029414648494950485,
"grad_norm": 18.338863372802734,
"learning_rate": 2.6657894736842104e-05,
"loss": 1.7666,
"step": 150
},
{
"epoch": 0.029414648494950485,
"eval_loss": 0.41522201895713806,
"eval_runtime": 243.3207,
"eval_samples_per_second": 8.828,
"eval_steps_per_second": 2.207,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.652448497421517e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}