lesso's picture
Training in progress, step 200, checkpoint
11835d6 verified
{
"best_metric": 0.22683469951152802,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.29027576197387517,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001451378809869376,
"grad_norm": 0.11322330683469772,
"learning_rate": 1.0140000000000001e-05,
"loss": 0.2249,
"step": 1
},
{
"epoch": 0.001451378809869376,
"eval_loss": 0.27678582072257996,
"eval_runtime": 142.3329,
"eval_samples_per_second": 2.045,
"eval_steps_per_second": 0.513,
"step": 1
},
{
"epoch": 0.002902757619738752,
"grad_norm": 0.11262699961662292,
"learning_rate": 2.0280000000000002e-05,
"loss": 0.4195,
"step": 2
},
{
"epoch": 0.0043541364296081275,
"grad_norm": 0.17557258903980255,
"learning_rate": 3.0419999999999997e-05,
"loss": 0.3746,
"step": 3
},
{
"epoch": 0.005805515239477504,
"grad_norm": 0.13793011009693146,
"learning_rate": 4.0560000000000005e-05,
"loss": 0.3454,
"step": 4
},
{
"epoch": 0.00725689404934688,
"grad_norm": 0.11599768698215485,
"learning_rate": 5.07e-05,
"loss": 0.1971,
"step": 5
},
{
"epoch": 0.008708272859216255,
"grad_norm": 0.13049179315567017,
"learning_rate": 6.0839999999999993e-05,
"loss": 0.2833,
"step": 6
},
{
"epoch": 0.010159651669085631,
"grad_norm": 0.15671488642692566,
"learning_rate": 7.097999999999999e-05,
"loss": 0.4398,
"step": 7
},
{
"epoch": 0.011611030478955007,
"grad_norm": 0.14859157800674438,
"learning_rate": 8.112000000000001e-05,
"loss": 0.4155,
"step": 8
},
{
"epoch": 0.013062409288824383,
"grad_norm": 0.1236390471458435,
"learning_rate": 9.126e-05,
"loss": 0.3616,
"step": 9
},
{
"epoch": 0.01451378809869376,
"grad_norm": 0.13758322596549988,
"learning_rate": 0.0001014,
"loss": 0.7267,
"step": 10
},
{
"epoch": 0.015965166908563134,
"grad_norm": 0.16419778764247894,
"learning_rate": 0.00010086631578947368,
"loss": 0.5664,
"step": 11
},
{
"epoch": 0.01741654571843251,
"grad_norm": 0.13118581473827362,
"learning_rate": 0.00010033263157894736,
"loss": 0.329,
"step": 12
},
{
"epoch": 0.018867924528301886,
"grad_norm": 0.20077037811279297,
"learning_rate": 9.979894736842105e-05,
"loss": 0.6135,
"step": 13
},
{
"epoch": 0.020319303338171262,
"grad_norm": 0.17843510210514069,
"learning_rate": 9.926526315789475e-05,
"loss": 0.4948,
"step": 14
},
{
"epoch": 0.02177068214804064,
"grad_norm": 0.14726127684116364,
"learning_rate": 9.873157894736843e-05,
"loss": 0.2194,
"step": 15
},
{
"epoch": 0.023222060957910014,
"grad_norm": 0.1400846391916275,
"learning_rate": 9.81978947368421e-05,
"loss": 0.2336,
"step": 16
},
{
"epoch": 0.02467343976777939,
"grad_norm": 0.14046010375022888,
"learning_rate": 9.766421052631579e-05,
"loss": 0.3142,
"step": 17
},
{
"epoch": 0.026124818577648767,
"grad_norm": 0.14007939398288727,
"learning_rate": 9.713052631578947e-05,
"loss": 0.2546,
"step": 18
},
{
"epoch": 0.027576197387518143,
"grad_norm": 0.1631072461605072,
"learning_rate": 9.659684210526315e-05,
"loss": 0.5582,
"step": 19
},
{
"epoch": 0.02902757619738752,
"grad_norm": 0.11267740279436111,
"learning_rate": 9.606315789473684e-05,
"loss": 0.1945,
"step": 20
},
{
"epoch": 0.030478955007256895,
"grad_norm": 0.09841505438089371,
"learning_rate": 9.552947368421053e-05,
"loss": 0.1045,
"step": 21
},
{
"epoch": 0.03193033381712627,
"grad_norm": 0.14902962744235992,
"learning_rate": 9.499578947368422e-05,
"loss": 0.4456,
"step": 22
},
{
"epoch": 0.033381712626995644,
"grad_norm": 0.14722256362438202,
"learning_rate": 9.44621052631579e-05,
"loss": 0.4339,
"step": 23
},
{
"epoch": 0.03483309143686502,
"grad_norm": 0.1797783523797989,
"learning_rate": 9.392842105263158e-05,
"loss": 0.3443,
"step": 24
},
{
"epoch": 0.036284470246734396,
"grad_norm": 0.13160745799541473,
"learning_rate": 9.339473684210526e-05,
"loss": 0.3453,
"step": 25
},
{
"epoch": 0.03773584905660377,
"grad_norm": 0.1477287858724594,
"learning_rate": 9.286105263157894e-05,
"loss": 0.4933,
"step": 26
},
{
"epoch": 0.03918722786647315,
"grad_norm": 0.1731908768415451,
"learning_rate": 9.232736842105263e-05,
"loss": 0.5843,
"step": 27
},
{
"epoch": 0.040638606676342524,
"grad_norm": 0.11266452074050903,
"learning_rate": 9.179368421052632e-05,
"loss": 0.2571,
"step": 28
},
{
"epoch": 0.0420899854862119,
"grad_norm": 0.1734313815832138,
"learning_rate": 9.126e-05,
"loss": 0.4923,
"step": 29
},
{
"epoch": 0.04354136429608128,
"grad_norm": 0.26261186599731445,
"learning_rate": 9.072631578947368e-05,
"loss": 0.3936,
"step": 30
},
{
"epoch": 0.04499274310595065,
"grad_norm": 0.12735208868980408,
"learning_rate": 9.019263157894736e-05,
"loss": 0.3903,
"step": 31
},
{
"epoch": 0.04644412191582003,
"grad_norm": 0.10862109065055847,
"learning_rate": 8.965894736842104e-05,
"loss": 0.2795,
"step": 32
},
{
"epoch": 0.047895500725689405,
"grad_norm": 0.15703803300857544,
"learning_rate": 8.912526315789472e-05,
"loss": 0.4776,
"step": 33
},
{
"epoch": 0.04934687953555878,
"grad_norm": 0.12412229925394058,
"learning_rate": 8.859157894736842e-05,
"loss": 0.2616,
"step": 34
},
{
"epoch": 0.05079825834542816,
"grad_norm": 0.10488831996917725,
"learning_rate": 8.805789473684211e-05,
"loss": 0.2254,
"step": 35
},
{
"epoch": 0.05224963715529753,
"grad_norm": 0.1006138026714325,
"learning_rate": 8.752421052631579e-05,
"loss": 0.2064,
"step": 36
},
{
"epoch": 0.05370101596516691,
"grad_norm": 0.12277859449386597,
"learning_rate": 8.699052631578947e-05,
"loss": 0.2471,
"step": 37
},
{
"epoch": 0.055152394775036286,
"grad_norm": 0.11067472398281097,
"learning_rate": 8.645684210526315e-05,
"loss": 0.2185,
"step": 38
},
{
"epoch": 0.05660377358490566,
"grad_norm": 0.15399208664894104,
"learning_rate": 8.592315789473683e-05,
"loss": 0.3452,
"step": 39
},
{
"epoch": 0.05805515239477504,
"grad_norm": 0.15283828973770142,
"learning_rate": 8.538947368421051e-05,
"loss": 0.6742,
"step": 40
},
{
"epoch": 0.059506531204644414,
"grad_norm": 0.14193159341812134,
"learning_rate": 8.485578947368421e-05,
"loss": 0.216,
"step": 41
},
{
"epoch": 0.06095791001451379,
"grad_norm": 0.2340918630361557,
"learning_rate": 8.43221052631579e-05,
"loss": 0.3091,
"step": 42
},
{
"epoch": 0.062409288824383166,
"grad_norm": 0.12806956470012665,
"learning_rate": 8.378842105263158e-05,
"loss": 0.2117,
"step": 43
},
{
"epoch": 0.06386066763425254,
"grad_norm": 0.17580929398536682,
"learning_rate": 8.325473684210526e-05,
"loss": 0.6556,
"step": 44
},
{
"epoch": 0.06531204644412192,
"grad_norm": 0.25366875529289246,
"learning_rate": 8.272105263157894e-05,
"loss": 0.5201,
"step": 45
},
{
"epoch": 0.06676342525399129,
"grad_norm": 1.4643609523773193,
"learning_rate": 8.218736842105262e-05,
"loss": 0.9149,
"step": 46
},
{
"epoch": 0.06821480406386067,
"grad_norm": 0.31062501668930054,
"learning_rate": 8.165368421052632e-05,
"loss": 0.8123,
"step": 47
},
{
"epoch": 0.06966618287373004,
"grad_norm": 0.28281453251838684,
"learning_rate": 8.112000000000001e-05,
"loss": 0.61,
"step": 48
},
{
"epoch": 0.07111756168359942,
"grad_norm": 0.5076847672462463,
"learning_rate": 8.058631578947369e-05,
"loss": 0.9513,
"step": 49
},
{
"epoch": 0.07256894049346879,
"grad_norm": 0.488422691822052,
"learning_rate": 8.005263157894737e-05,
"loss": 1.1186,
"step": 50
},
{
"epoch": 0.07256894049346879,
"eval_loss": 0.24130938947200775,
"eval_runtime": 144.8327,
"eval_samples_per_second": 2.009,
"eval_steps_per_second": 0.504,
"step": 50
},
{
"epoch": 0.07402031930333818,
"grad_norm": 0.14887401461601257,
"learning_rate": 7.951894736842105e-05,
"loss": 0.5748,
"step": 51
},
{
"epoch": 0.07547169811320754,
"grad_norm": 0.1398821324110031,
"learning_rate": 7.898526315789473e-05,
"loss": 0.3816,
"step": 52
},
{
"epoch": 0.07692307692307693,
"grad_norm": 0.13132548332214355,
"learning_rate": 7.845157894736841e-05,
"loss": 0.3495,
"step": 53
},
{
"epoch": 0.0783744557329463,
"grad_norm": 0.18042461574077606,
"learning_rate": 7.79178947368421e-05,
"loss": 0.6343,
"step": 54
},
{
"epoch": 0.07982583454281568,
"grad_norm": 0.17077870666980743,
"learning_rate": 7.73842105263158e-05,
"loss": 0.4339,
"step": 55
},
{
"epoch": 0.08127721335268505,
"grad_norm": 0.17123785614967346,
"learning_rate": 7.685052631578948e-05,
"loss": 0.5185,
"step": 56
},
{
"epoch": 0.08272859216255443,
"grad_norm": 0.1359768807888031,
"learning_rate": 7.631684210526316e-05,
"loss": 0.3538,
"step": 57
},
{
"epoch": 0.0841799709724238,
"grad_norm": 0.20184911787509918,
"learning_rate": 7.578315789473684e-05,
"loss": 0.8425,
"step": 58
},
{
"epoch": 0.08563134978229318,
"grad_norm": 0.12804219126701355,
"learning_rate": 7.524947368421052e-05,
"loss": 0.3524,
"step": 59
},
{
"epoch": 0.08708272859216255,
"grad_norm": 0.14602139592170715,
"learning_rate": 7.47157894736842e-05,
"loss": 0.8212,
"step": 60
},
{
"epoch": 0.08853410740203194,
"grad_norm": 0.16759201884269714,
"learning_rate": 7.418210526315789e-05,
"loss": 0.5125,
"step": 61
},
{
"epoch": 0.0899854862119013,
"grad_norm": 0.124669648706913,
"learning_rate": 7.364842105263159e-05,
"loss": 0.5449,
"step": 62
},
{
"epoch": 0.09143686502177069,
"grad_norm": 0.17681315541267395,
"learning_rate": 7.311473684210527e-05,
"loss": 0.4928,
"step": 63
},
{
"epoch": 0.09288824383164006,
"grad_norm": 0.10935454815626144,
"learning_rate": 7.258105263157895e-05,
"loss": 0.2289,
"step": 64
},
{
"epoch": 0.09433962264150944,
"grad_norm": 0.11440805345773697,
"learning_rate": 7.204736842105263e-05,
"loss": 0.3037,
"step": 65
},
{
"epoch": 0.09579100145137881,
"grad_norm": 0.13342805206775665,
"learning_rate": 7.151368421052631e-05,
"loss": 0.4788,
"step": 66
},
{
"epoch": 0.09724238026124818,
"grad_norm": 0.14905494451522827,
"learning_rate": 7.097999999999999e-05,
"loss": 0.5567,
"step": 67
},
{
"epoch": 0.09869375907111756,
"grad_norm": 0.16177383065223694,
"learning_rate": 7.044631578947368e-05,
"loss": 0.2705,
"step": 68
},
{
"epoch": 0.10014513788098693,
"grad_norm": 0.1414748579263687,
"learning_rate": 6.991263157894738e-05,
"loss": 0.4054,
"step": 69
},
{
"epoch": 0.10159651669085631,
"grad_norm": 0.15447840094566345,
"learning_rate": 6.937894736842106e-05,
"loss": 0.5717,
"step": 70
},
{
"epoch": 0.10304789550072568,
"grad_norm": 0.1624661087989807,
"learning_rate": 6.884526315789474e-05,
"loss": 0.3306,
"step": 71
},
{
"epoch": 0.10449927431059507,
"grad_norm": 0.1577853560447693,
"learning_rate": 6.831157894736842e-05,
"loss": 0.7486,
"step": 72
},
{
"epoch": 0.10595065312046444,
"grad_norm": 0.10878780484199524,
"learning_rate": 6.77778947368421e-05,
"loss": 0.3239,
"step": 73
},
{
"epoch": 0.10740203193033382,
"grad_norm": 0.09321369230747223,
"learning_rate": 6.724421052631579e-05,
"loss": 0.2124,
"step": 74
},
{
"epoch": 0.10885341074020319,
"grad_norm": 0.12391191720962524,
"learning_rate": 6.671052631578948e-05,
"loss": 0.315,
"step": 75
},
{
"epoch": 0.11030478955007257,
"grad_norm": 0.1172720268368721,
"learning_rate": 6.617684210526316e-05,
"loss": 0.2476,
"step": 76
},
{
"epoch": 0.11175616835994194,
"grad_norm": 0.08327235281467438,
"learning_rate": 6.564315789473684e-05,
"loss": 0.1284,
"step": 77
},
{
"epoch": 0.11320754716981132,
"grad_norm": 0.11149916797876358,
"learning_rate": 6.510947368421052e-05,
"loss": 0.3052,
"step": 78
},
{
"epoch": 0.11465892597968069,
"grad_norm": 0.12076565623283386,
"learning_rate": 6.45757894736842e-05,
"loss": 0.5215,
"step": 79
},
{
"epoch": 0.11611030478955008,
"grad_norm": 0.18347236514091492,
"learning_rate": 6.404210526315789e-05,
"loss": 1.1367,
"step": 80
},
{
"epoch": 0.11756168359941944,
"grad_norm": 0.08764215558767319,
"learning_rate": 6.350842105263158e-05,
"loss": 0.223,
"step": 81
},
{
"epoch": 0.11901306240928883,
"grad_norm": 0.13922688364982605,
"learning_rate": 6.297473684210527e-05,
"loss": 0.4811,
"step": 82
},
{
"epoch": 0.1204644412191582,
"grad_norm": 0.12688542902469635,
"learning_rate": 6.244105263157895e-05,
"loss": 0.4067,
"step": 83
},
{
"epoch": 0.12191582002902758,
"grad_norm": 0.1191866546869278,
"learning_rate": 6.190736842105263e-05,
"loss": 0.484,
"step": 84
},
{
"epoch": 0.12336719883889695,
"grad_norm": 0.1730227768421173,
"learning_rate": 6.137368421052631e-05,
"loss": 0.6044,
"step": 85
},
{
"epoch": 0.12481857764876633,
"grad_norm": 0.12854455411434174,
"learning_rate": 6.0839999999999993e-05,
"loss": 0.597,
"step": 86
},
{
"epoch": 0.1262699564586357,
"grad_norm": 0.08396941423416138,
"learning_rate": 6.030631578947368e-05,
"loss": 0.104,
"step": 87
},
{
"epoch": 0.12772133526850507,
"grad_norm": 0.12541648745536804,
"learning_rate": 5.977263157894736e-05,
"loss": 0.1516,
"step": 88
},
{
"epoch": 0.12917271407837447,
"grad_norm": 0.08674124628305435,
"learning_rate": 5.9238947368421054e-05,
"loss": 0.1682,
"step": 89
},
{
"epoch": 0.13062409288824384,
"grad_norm": 0.15892113745212555,
"learning_rate": 5.870526315789474e-05,
"loss": 1.0767,
"step": 90
},
{
"epoch": 0.1320754716981132,
"grad_norm": 0.16107392311096191,
"learning_rate": 5.817157894736842e-05,
"loss": 0.8568,
"step": 91
},
{
"epoch": 0.13352685050798258,
"grad_norm": 0.13994719088077545,
"learning_rate": 5.76378947368421e-05,
"loss": 0.8352,
"step": 92
},
{
"epoch": 0.13497822931785197,
"grad_norm": 0.12204215675592422,
"learning_rate": 5.710421052631579e-05,
"loss": 0.2985,
"step": 93
},
{
"epoch": 0.13642960812772134,
"grad_norm": 0.16387738287448883,
"learning_rate": 5.657052631578947e-05,
"loss": 0.7261,
"step": 94
},
{
"epoch": 0.1378809869375907,
"grad_norm": 0.15970756113529205,
"learning_rate": 5.603684210526316e-05,
"loss": 0.5917,
"step": 95
},
{
"epoch": 0.13933236574746008,
"grad_norm": 0.24255751073360443,
"learning_rate": 5.550315789473684e-05,
"loss": 1.297,
"step": 96
},
{
"epoch": 0.14078374455732948,
"grad_norm": 0.31163647770881653,
"learning_rate": 5.496947368421053e-05,
"loss": 1.0701,
"step": 97
},
{
"epoch": 0.14223512336719885,
"grad_norm": 0.24494129419326782,
"learning_rate": 5.443578947368421e-05,
"loss": 0.6255,
"step": 98
},
{
"epoch": 0.14368650217706821,
"grad_norm": 0.36796122789382935,
"learning_rate": 5.390210526315789e-05,
"loss": 1.3235,
"step": 99
},
{
"epoch": 0.14513788098693758,
"grad_norm": 0.6409245729446411,
"learning_rate": 5.336842105263158e-05,
"loss": 0.9752,
"step": 100
},
{
"epoch": 0.14513788098693758,
"eval_loss": 0.22861960530281067,
"eval_runtime": 143.5547,
"eval_samples_per_second": 2.027,
"eval_steps_per_second": 0.509,
"step": 100
},
{
"epoch": 0.14658925979680695,
"grad_norm": 0.13961444795131683,
"learning_rate": 5.283473684210526e-05,
"loss": 0.7936,
"step": 101
},
{
"epoch": 0.14804063860667635,
"grad_norm": 0.15385086834430695,
"learning_rate": 5.230105263157895e-05,
"loss": 0.4879,
"step": 102
},
{
"epoch": 0.14949201741654572,
"grad_norm": 0.09357989579439163,
"learning_rate": 5.176736842105263e-05,
"loss": 0.1763,
"step": 103
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.1107134148478508,
"learning_rate": 5.123368421052632e-05,
"loss": 0.4355,
"step": 104
},
{
"epoch": 0.15239477503628446,
"grad_norm": 0.11032993346452713,
"learning_rate": 5.07e-05,
"loss": 0.3403,
"step": 105
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.09584687650203705,
"learning_rate": 5.016631578947368e-05,
"loss": 0.3298,
"step": 106
},
{
"epoch": 0.15529753265602322,
"grad_norm": 0.15111547708511353,
"learning_rate": 4.963263157894737e-05,
"loss": 0.7715,
"step": 107
},
{
"epoch": 0.1567489114658926,
"grad_norm": 0.16879358887672424,
"learning_rate": 4.909894736842105e-05,
"loss": 0.4804,
"step": 108
},
{
"epoch": 0.15820029027576196,
"grad_norm": 0.15038713812828064,
"learning_rate": 4.8565263157894734e-05,
"loss": 0.6739,
"step": 109
},
{
"epoch": 0.15965166908563136,
"grad_norm": 0.1444604992866516,
"learning_rate": 4.803157894736842e-05,
"loss": 0.2473,
"step": 110
},
{
"epoch": 0.16110304789550073,
"grad_norm": 0.12805120646953583,
"learning_rate": 4.749789473684211e-05,
"loss": 0.3842,
"step": 111
},
{
"epoch": 0.1625544267053701,
"grad_norm": 0.13309065997600555,
"learning_rate": 4.696421052631579e-05,
"loss": 0.4518,
"step": 112
},
{
"epoch": 0.16400580551523947,
"grad_norm": 0.12078673392534256,
"learning_rate": 4.643052631578947e-05,
"loss": 0.3201,
"step": 113
},
{
"epoch": 0.16545718432510886,
"grad_norm": 0.10837192833423615,
"learning_rate": 4.589684210526316e-05,
"loss": 0.2514,
"step": 114
},
{
"epoch": 0.16690856313497823,
"grad_norm": 0.1499970406293869,
"learning_rate": 4.536315789473684e-05,
"loss": 0.7187,
"step": 115
},
{
"epoch": 0.1683599419448476,
"grad_norm": 0.1287223845720291,
"learning_rate": 4.482947368421052e-05,
"loss": 0.5988,
"step": 116
},
{
"epoch": 0.16981132075471697,
"grad_norm": 0.17324185371398926,
"learning_rate": 4.429578947368421e-05,
"loss": 0.6624,
"step": 117
},
{
"epoch": 0.17126269956458637,
"grad_norm": 0.21229642629623413,
"learning_rate": 4.3762105263157896e-05,
"loss": 0.6378,
"step": 118
},
{
"epoch": 0.17271407837445574,
"grad_norm": 0.1443430334329605,
"learning_rate": 4.3228421052631576e-05,
"loss": 0.3911,
"step": 119
},
{
"epoch": 0.1741654571843251,
"grad_norm": 0.09278915822505951,
"learning_rate": 4.269473684210526e-05,
"loss": 0.2204,
"step": 120
},
{
"epoch": 0.17561683599419448,
"grad_norm": 0.10660077631473541,
"learning_rate": 4.216105263157895e-05,
"loss": 0.2652,
"step": 121
},
{
"epoch": 0.17706821480406387,
"grad_norm": 0.11987288296222687,
"learning_rate": 4.162736842105263e-05,
"loss": 0.1777,
"step": 122
},
{
"epoch": 0.17851959361393324,
"grad_norm": 0.11112989485263824,
"learning_rate": 4.109368421052631e-05,
"loss": 0.3522,
"step": 123
},
{
"epoch": 0.1799709724238026,
"grad_norm": 0.1165122240781784,
"learning_rate": 4.0560000000000005e-05,
"loss": 0.2828,
"step": 124
},
{
"epoch": 0.18142235123367198,
"grad_norm": 0.12267779558897018,
"learning_rate": 4.0026315789473685e-05,
"loss": 0.3735,
"step": 125
},
{
"epoch": 0.18287373004354138,
"grad_norm": 0.07691169530153275,
"learning_rate": 3.9492631578947365e-05,
"loss": 0.1477,
"step": 126
},
{
"epoch": 0.18432510885341075,
"grad_norm": 0.12377668917179108,
"learning_rate": 3.895894736842105e-05,
"loss": 0.3195,
"step": 127
},
{
"epoch": 0.18577648766328012,
"grad_norm": 0.137495219707489,
"learning_rate": 3.842526315789474e-05,
"loss": 0.3273,
"step": 128
},
{
"epoch": 0.18722786647314948,
"grad_norm": 0.11476119607686996,
"learning_rate": 3.789157894736842e-05,
"loss": 0.2213,
"step": 129
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.11729950457811356,
"learning_rate": 3.73578947368421e-05,
"loss": 0.2502,
"step": 130
},
{
"epoch": 0.19013062409288825,
"grad_norm": 0.11003285646438599,
"learning_rate": 3.682421052631579e-05,
"loss": 0.1764,
"step": 131
},
{
"epoch": 0.19158200290275762,
"grad_norm": 0.09452930092811584,
"learning_rate": 3.6290526315789474e-05,
"loss": 0.2065,
"step": 132
},
{
"epoch": 0.193033381712627,
"grad_norm": 0.1257011443376541,
"learning_rate": 3.5756842105263154e-05,
"loss": 0.1952,
"step": 133
},
{
"epoch": 0.19448476052249636,
"grad_norm": 0.11230431497097015,
"learning_rate": 3.522315789473684e-05,
"loss": 0.223,
"step": 134
},
{
"epoch": 0.19593613933236576,
"grad_norm": 0.10401938110589981,
"learning_rate": 3.468947368421053e-05,
"loss": 0.3397,
"step": 135
},
{
"epoch": 0.19738751814223512,
"grad_norm": 0.16281422972679138,
"learning_rate": 3.415578947368421e-05,
"loss": 0.2784,
"step": 136
},
{
"epoch": 0.1988388969521045,
"grad_norm": 0.08936594426631927,
"learning_rate": 3.3622105263157895e-05,
"loss": 0.139,
"step": 137
},
{
"epoch": 0.20029027576197386,
"grad_norm": 0.149820938706398,
"learning_rate": 3.308842105263158e-05,
"loss": 0.3641,
"step": 138
},
{
"epoch": 0.20174165457184326,
"grad_norm": 0.10825081914663315,
"learning_rate": 3.255473684210526e-05,
"loss": 0.1988,
"step": 139
},
{
"epoch": 0.20319303338171263,
"grad_norm": 0.09764596819877625,
"learning_rate": 3.202105263157894e-05,
"loss": 0.2893,
"step": 140
},
{
"epoch": 0.204644412191582,
"grad_norm": 0.1362355500459671,
"learning_rate": 3.1487368421052636e-05,
"loss": 0.8362,
"step": 141
},
{
"epoch": 0.20609579100145137,
"grad_norm": 0.2416013777256012,
"learning_rate": 3.0953684210526317e-05,
"loss": 0.6021,
"step": 142
},
{
"epoch": 0.20754716981132076,
"grad_norm": 0.12458783388137817,
"learning_rate": 3.0419999999999997e-05,
"loss": 0.4149,
"step": 143
},
{
"epoch": 0.20899854862119013,
"grad_norm": 0.1262422502040863,
"learning_rate": 2.988631578947368e-05,
"loss": 0.2399,
"step": 144
},
{
"epoch": 0.2104499274310595,
"grad_norm": 0.16632845997810364,
"learning_rate": 2.935263157894737e-05,
"loss": 0.3516,
"step": 145
},
{
"epoch": 0.21190130624092887,
"grad_norm": 0.197062149643898,
"learning_rate": 2.881894736842105e-05,
"loss": 0.5045,
"step": 146
},
{
"epoch": 0.21335268505079827,
"grad_norm": 0.26367273926734924,
"learning_rate": 2.8285263157894735e-05,
"loss": 0.7623,
"step": 147
},
{
"epoch": 0.21480406386066764,
"grad_norm": 0.2773095369338989,
"learning_rate": 2.775157894736842e-05,
"loss": 0.8716,
"step": 148
},
{
"epoch": 0.216255442670537,
"grad_norm": 0.3937135934829712,
"learning_rate": 2.7217894736842105e-05,
"loss": 0.8716,
"step": 149
},
{
"epoch": 0.21770682148040638,
"grad_norm": 0.37637463212013245,
"learning_rate": 2.668421052631579e-05,
"loss": 0.4929,
"step": 150
},
{
"epoch": 0.21770682148040638,
"eval_loss": 0.2300933599472046,
"eval_runtime": 142.3159,
"eval_samples_per_second": 2.045,
"eval_steps_per_second": 0.513,
"step": 150
},
{
"epoch": 0.21915820029027577,
"grad_norm": 0.15475043654441833,
"learning_rate": 2.6150526315789476e-05,
"loss": 1.0066,
"step": 151
},
{
"epoch": 0.22060957910014514,
"grad_norm": 0.17831435799598694,
"learning_rate": 2.561684210526316e-05,
"loss": 0.8342,
"step": 152
},
{
"epoch": 0.2220609579100145,
"grad_norm": 0.15772460401058197,
"learning_rate": 2.508315789473684e-05,
"loss": 0.5335,
"step": 153
},
{
"epoch": 0.22351233671988388,
"grad_norm": 0.1907489001750946,
"learning_rate": 2.4549473684210527e-05,
"loss": 1.0695,
"step": 154
},
{
"epoch": 0.22496371552975328,
"grad_norm": 0.12170762568712234,
"learning_rate": 2.401578947368421e-05,
"loss": 0.5123,
"step": 155
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.20167575776576996,
"learning_rate": 2.3482105263157894e-05,
"loss": 1.0599,
"step": 156
},
{
"epoch": 0.22786647314949202,
"grad_norm": 0.1575011909008026,
"learning_rate": 2.294842105263158e-05,
"loss": 0.2423,
"step": 157
},
{
"epoch": 0.22931785195936139,
"grad_norm": 0.164499431848526,
"learning_rate": 2.241473684210526e-05,
"loss": 0.7476,
"step": 158
},
{
"epoch": 0.23076923076923078,
"grad_norm": 0.123818539083004,
"learning_rate": 2.1881052631578948e-05,
"loss": 0.3213,
"step": 159
},
{
"epoch": 0.23222060957910015,
"grad_norm": 0.1462564766407013,
"learning_rate": 2.134736842105263e-05,
"loss": 0.3852,
"step": 160
},
{
"epoch": 0.23367198838896952,
"grad_norm": 0.10650084167718887,
"learning_rate": 2.0813684210526315e-05,
"loss": 0.4096,
"step": 161
},
{
"epoch": 0.2351233671988389,
"grad_norm": 0.14189405739307404,
"learning_rate": 2.0280000000000002e-05,
"loss": 0.4864,
"step": 162
},
{
"epoch": 0.2365747460087083,
"grad_norm": 0.15137618780136108,
"learning_rate": 1.9746315789473683e-05,
"loss": 0.3834,
"step": 163
},
{
"epoch": 0.23802612481857766,
"grad_norm": 0.14375939965248108,
"learning_rate": 1.921263157894737e-05,
"loss": 0.8234,
"step": 164
},
{
"epoch": 0.23947750362844702,
"grad_norm": 0.12783659994602203,
"learning_rate": 1.867894736842105e-05,
"loss": 0.3969,
"step": 165
},
{
"epoch": 0.2409288824383164,
"grad_norm": 0.18408963084220886,
"learning_rate": 1.8145263157894737e-05,
"loss": 1.0383,
"step": 166
},
{
"epoch": 0.24238026124818576,
"grad_norm": 0.22771526873111725,
"learning_rate": 1.761157894736842e-05,
"loss": 0.6109,
"step": 167
},
{
"epoch": 0.24383164005805516,
"grad_norm": 0.11981768161058426,
"learning_rate": 1.7077894736842104e-05,
"loss": 0.3539,
"step": 168
},
{
"epoch": 0.24528301886792453,
"grad_norm": 0.13763920962810516,
"learning_rate": 1.654421052631579e-05,
"loss": 0.5094,
"step": 169
},
{
"epoch": 0.2467343976777939,
"grad_norm": 0.10743150860071182,
"learning_rate": 1.601052631578947e-05,
"loss": 0.3303,
"step": 170
},
{
"epoch": 0.24818577648766327,
"grad_norm": 0.13086174428462982,
"learning_rate": 1.5476842105263158e-05,
"loss": 0.3147,
"step": 171
},
{
"epoch": 0.24963715529753266,
"grad_norm": 0.2016236037015915,
"learning_rate": 1.494315789473684e-05,
"loss": 0.4497,
"step": 172
},
{
"epoch": 0.251088534107402,
"grad_norm": 0.13244684040546417,
"learning_rate": 1.4409473684210525e-05,
"loss": 0.2323,
"step": 173
},
{
"epoch": 0.2525399129172714,
"grad_norm": 0.09831114858388901,
"learning_rate": 1.387578947368421e-05,
"loss": 0.3278,
"step": 174
},
{
"epoch": 0.2539912917271408,
"grad_norm": 0.10631395131349564,
"learning_rate": 1.3342105263157894e-05,
"loss": 0.3234,
"step": 175
},
{
"epoch": 0.25544267053701014,
"grad_norm": 0.12602189183235168,
"learning_rate": 1.280842105263158e-05,
"loss": 0.9905,
"step": 176
},
{
"epoch": 0.25689404934687954,
"grad_norm": 0.1074807196855545,
"learning_rate": 1.2274736842105263e-05,
"loss": 0.3108,
"step": 177
},
{
"epoch": 0.25834542815674894,
"grad_norm": 0.14480219781398773,
"learning_rate": 1.1741052631578947e-05,
"loss": 0.51,
"step": 178
},
{
"epoch": 0.2597968069666183,
"grad_norm": 0.1332997977733612,
"learning_rate": 1.120736842105263e-05,
"loss": 0.5009,
"step": 179
},
{
"epoch": 0.2612481857764877,
"grad_norm": 0.1115342527627945,
"learning_rate": 1.0673684210526314e-05,
"loss": 0.2856,
"step": 180
},
{
"epoch": 0.262699564586357,
"grad_norm": 0.15611930191516876,
"learning_rate": 1.0140000000000001e-05,
"loss": 0.3714,
"step": 181
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.1355818808078766,
"learning_rate": 9.606315789473685e-06,
"loss": 0.5347,
"step": 182
},
{
"epoch": 0.2656023222060958,
"grad_norm": 0.12218355387449265,
"learning_rate": 9.072631578947368e-06,
"loss": 0.2982,
"step": 183
},
{
"epoch": 0.26705370101596515,
"grad_norm": 0.09633472561836243,
"learning_rate": 8.538947368421052e-06,
"loss": 0.2164,
"step": 184
},
{
"epoch": 0.26850507982583455,
"grad_norm": 0.09966512769460678,
"learning_rate": 8.005263157894736e-06,
"loss": 0.6552,
"step": 185
},
{
"epoch": 0.26995645863570394,
"grad_norm": 0.09162217378616333,
"learning_rate": 7.47157894736842e-06,
"loss": 0.2767,
"step": 186
},
{
"epoch": 0.2714078374455733,
"grad_norm": 0.0985371544957161,
"learning_rate": 6.937894736842105e-06,
"loss": 0.2375,
"step": 187
},
{
"epoch": 0.2728592162554427,
"grad_norm": 0.127197727560997,
"learning_rate": 6.40421052631579e-06,
"loss": 0.2468,
"step": 188
},
{
"epoch": 0.274310595065312,
"grad_norm": 0.14667248725891113,
"learning_rate": 5.8705263157894735e-06,
"loss": 0.279,
"step": 189
},
{
"epoch": 0.2757619738751814,
"grad_norm": 0.1272004246711731,
"learning_rate": 5.336842105263157e-06,
"loss": 0.2025,
"step": 190
},
{
"epoch": 0.2772133526850508,
"grad_norm": 0.10167410224676132,
"learning_rate": 4.803157894736842e-06,
"loss": 0.2823,
"step": 191
},
{
"epoch": 0.27866473149492016,
"grad_norm": 0.09734392166137695,
"learning_rate": 4.269473684210526e-06,
"loss": 0.1672,
"step": 192
},
{
"epoch": 0.28011611030478956,
"grad_norm": 0.12383999675512314,
"learning_rate": 3.73578947368421e-06,
"loss": 0.4226,
"step": 193
},
{
"epoch": 0.28156748911465895,
"grad_norm": 0.19274388253688812,
"learning_rate": 3.202105263157895e-06,
"loss": 0.4254,
"step": 194
},
{
"epoch": 0.2830188679245283,
"grad_norm": 0.18453097343444824,
"learning_rate": 2.6684210526315785e-06,
"loss": 0.8314,
"step": 195
},
{
"epoch": 0.2844702467343977,
"grad_norm": 0.1844322383403778,
"learning_rate": 2.134736842105263e-06,
"loss": 0.8963,
"step": 196
},
{
"epoch": 0.28592162554426703,
"grad_norm": 0.23592235147953033,
"learning_rate": 1.6010526315789475e-06,
"loss": 0.6229,
"step": 197
},
{
"epoch": 0.28737300435413643,
"grad_norm": 0.2770744264125824,
"learning_rate": 1.0673684210526315e-06,
"loss": 0.6797,
"step": 198
},
{
"epoch": 0.2888243831640058,
"grad_norm": 0.46040070056915283,
"learning_rate": 5.336842105263158e-07,
"loss": 0.6804,
"step": 199
},
{
"epoch": 0.29027576197387517,
"grad_norm": 0.4375990033149719,
"learning_rate": 0.0,
"loss": 1.084,
"step": 200
},
{
"epoch": 0.29027576197387517,
"eval_loss": 0.22683469951152802,
"eval_runtime": 143.599,
"eval_samples_per_second": 2.026,
"eval_steps_per_second": 0.508,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4053840999350272e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}