lesso's picture
Training in progress, step 200, checkpoint
a9a0fff verified
{
"best_metric": 0.22681911289691925,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.29027576197387517,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001451378809869376,
"grad_norm": 0.11486738920211792,
"learning_rate": 1.0110000000000001e-05,
"loss": 0.2249,
"step": 1
},
{
"epoch": 0.001451378809869376,
"eval_loss": 0.27678582072257996,
"eval_runtime": 142.974,
"eval_samples_per_second": 2.035,
"eval_steps_per_second": 0.511,
"step": 1
},
{
"epoch": 0.002902757619738752,
"grad_norm": 0.11387992650270462,
"learning_rate": 2.0220000000000003e-05,
"loss": 0.4195,
"step": 2
},
{
"epoch": 0.0043541364296081275,
"grad_norm": 0.1770201325416565,
"learning_rate": 3.033e-05,
"loss": 0.3746,
"step": 3
},
{
"epoch": 0.005805515239477504,
"grad_norm": 0.15089277923107147,
"learning_rate": 4.0440000000000006e-05,
"loss": 0.3461,
"step": 4
},
{
"epoch": 0.00725689404934688,
"grad_norm": 0.11748471856117249,
"learning_rate": 5.055e-05,
"loss": 0.1969,
"step": 5
},
{
"epoch": 0.008708272859216255,
"grad_norm": 0.1325271725654602,
"learning_rate": 6.066e-05,
"loss": 0.2835,
"step": 6
},
{
"epoch": 0.010159651669085631,
"grad_norm": 0.14989273250102997,
"learning_rate": 7.077e-05,
"loss": 0.44,
"step": 7
},
{
"epoch": 0.011611030478955007,
"grad_norm": 0.1532517969608307,
"learning_rate": 8.088000000000001e-05,
"loss": 0.4155,
"step": 8
},
{
"epoch": 0.013062409288824383,
"grad_norm": 0.12538368999958038,
"learning_rate": 9.099000000000001e-05,
"loss": 0.3613,
"step": 9
},
{
"epoch": 0.01451378809869376,
"grad_norm": 0.13840965926647186,
"learning_rate": 0.0001011,
"loss": 0.7261,
"step": 10
},
{
"epoch": 0.015965166908563134,
"grad_norm": 0.1670045405626297,
"learning_rate": 0.00010056789473684211,
"loss": 0.5663,
"step": 11
},
{
"epoch": 0.01741654571843251,
"grad_norm": 0.13095274567604065,
"learning_rate": 0.00010003578947368421,
"loss": 0.3288,
"step": 12
},
{
"epoch": 0.018867924528301886,
"grad_norm": 0.19772979617118835,
"learning_rate": 9.950368421052632e-05,
"loss": 0.613,
"step": 13
},
{
"epoch": 0.020319303338171262,
"grad_norm": 0.16937138140201569,
"learning_rate": 9.897157894736842e-05,
"loss": 0.4953,
"step": 14
},
{
"epoch": 0.02177068214804064,
"grad_norm": 0.1333189755678177,
"learning_rate": 9.843947368421053e-05,
"loss": 0.2195,
"step": 15
},
{
"epoch": 0.023222060957910014,
"grad_norm": 0.1428212672472,
"learning_rate": 9.790736842105264e-05,
"loss": 0.2334,
"step": 16
},
{
"epoch": 0.02467343976777939,
"grad_norm": 0.14330728352069855,
"learning_rate": 9.737526315789474e-05,
"loss": 0.3142,
"step": 17
},
{
"epoch": 0.026124818577648767,
"grad_norm": 0.1493055373430252,
"learning_rate": 9.684315789473684e-05,
"loss": 0.2551,
"step": 18
},
{
"epoch": 0.027576197387518143,
"grad_norm": 0.16706344485282898,
"learning_rate": 9.631105263157895e-05,
"loss": 0.5574,
"step": 19
},
{
"epoch": 0.02902757619738752,
"grad_norm": 0.11606133729219437,
"learning_rate": 9.577894736842105e-05,
"loss": 0.1948,
"step": 20
},
{
"epoch": 0.030478955007256895,
"grad_norm": 0.0992949903011322,
"learning_rate": 9.524684210526317e-05,
"loss": 0.1042,
"step": 21
},
{
"epoch": 0.03193033381712627,
"grad_norm": 0.15178389847278595,
"learning_rate": 9.471473684210526e-05,
"loss": 0.4452,
"step": 22
},
{
"epoch": 0.033381712626995644,
"grad_norm": 0.15305830538272858,
"learning_rate": 9.418263157894737e-05,
"loss": 0.4336,
"step": 23
},
{
"epoch": 0.03483309143686502,
"grad_norm": 0.21439702808856964,
"learning_rate": 9.365052631578948e-05,
"loss": 0.346,
"step": 24
},
{
"epoch": 0.036284470246734396,
"grad_norm": 0.1342068761587143,
"learning_rate": 9.311842105263157e-05,
"loss": 0.3454,
"step": 25
},
{
"epoch": 0.03773584905660377,
"grad_norm": 0.1589202582836151,
"learning_rate": 9.258631578947368e-05,
"loss": 0.4931,
"step": 26
},
{
"epoch": 0.03918722786647315,
"grad_norm": 0.16286569833755493,
"learning_rate": 9.20542105263158e-05,
"loss": 0.5843,
"step": 27
},
{
"epoch": 0.040638606676342524,
"grad_norm": 0.11265375465154648,
"learning_rate": 9.15221052631579e-05,
"loss": 0.2569,
"step": 28
},
{
"epoch": 0.0420899854862119,
"grad_norm": 0.18966278433799744,
"learning_rate": 9.099000000000001e-05,
"loss": 0.4922,
"step": 29
},
{
"epoch": 0.04354136429608128,
"grad_norm": 0.2602640688419342,
"learning_rate": 9.045789473684212e-05,
"loss": 0.3959,
"step": 30
},
{
"epoch": 0.04499274310595065,
"grad_norm": 0.12830835580825806,
"learning_rate": 8.992578947368421e-05,
"loss": 0.3903,
"step": 31
},
{
"epoch": 0.04644412191582003,
"grad_norm": 0.10974445939064026,
"learning_rate": 8.939368421052632e-05,
"loss": 0.2795,
"step": 32
},
{
"epoch": 0.047895500725689405,
"grad_norm": 0.16427350044250488,
"learning_rate": 8.886157894736841e-05,
"loss": 0.4776,
"step": 33
},
{
"epoch": 0.04934687953555878,
"grad_norm": 0.12569822371006012,
"learning_rate": 8.832947368421054e-05,
"loss": 0.2615,
"step": 34
},
{
"epoch": 0.05079825834542816,
"grad_norm": 0.10434585809707642,
"learning_rate": 8.779736842105264e-05,
"loss": 0.2248,
"step": 35
},
{
"epoch": 0.05224963715529753,
"grad_norm": 0.1004101112484932,
"learning_rate": 8.726526315789474e-05,
"loss": 0.2055,
"step": 36
},
{
"epoch": 0.05370101596516691,
"grad_norm": 0.12491065263748169,
"learning_rate": 8.673315789473685e-05,
"loss": 0.2482,
"step": 37
},
{
"epoch": 0.055152394775036286,
"grad_norm": 0.11319632083177567,
"learning_rate": 8.620105263157896e-05,
"loss": 0.2185,
"step": 38
},
{
"epoch": 0.05660377358490566,
"grad_norm": 0.15422259271144867,
"learning_rate": 8.566894736842105e-05,
"loss": 0.3448,
"step": 39
},
{
"epoch": 0.05805515239477504,
"grad_norm": 0.15866440534591675,
"learning_rate": 8.513684210526316e-05,
"loss": 0.6739,
"step": 40
},
{
"epoch": 0.059506531204644414,
"grad_norm": 0.1435173898935318,
"learning_rate": 8.460473684210527e-05,
"loss": 0.2159,
"step": 41
},
{
"epoch": 0.06095791001451379,
"grad_norm": 0.21297410130500793,
"learning_rate": 8.407263157894738e-05,
"loss": 0.3077,
"step": 42
},
{
"epoch": 0.062409288824383166,
"grad_norm": 0.1251886934041977,
"learning_rate": 8.354052631578948e-05,
"loss": 0.2113,
"step": 43
},
{
"epoch": 0.06386066763425254,
"grad_norm": 0.1815638542175293,
"learning_rate": 8.300842105263158e-05,
"loss": 0.6564,
"step": 44
},
{
"epoch": 0.06531204644412192,
"grad_norm": 0.2576989531517029,
"learning_rate": 8.247631578947369e-05,
"loss": 0.5205,
"step": 45
},
{
"epoch": 0.06676342525399129,
"grad_norm": 0.27275457978248596,
"learning_rate": 8.19442105263158e-05,
"loss": 0.9175,
"step": 46
},
{
"epoch": 0.06821480406386067,
"grad_norm": 0.26641345024108887,
"learning_rate": 8.141210526315789e-05,
"loss": 0.812,
"step": 47
},
{
"epoch": 0.06966618287373004,
"grad_norm": 0.2708298861980438,
"learning_rate": 8.088000000000001e-05,
"loss": 0.6076,
"step": 48
},
{
"epoch": 0.07111756168359942,
"grad_norm": 0.5135082602500916,
"learning_rate": 8.03478947368421e-05,
"loss": 0.9538,
"step": 49
},
{
"epoch": 0.07256894049346879,
"grad_norm": 0.46205997467041016,
"learning_rate": 7.981578947368421e-05,
"loss": 1.1091,
"step": 50
},
{
"epoch": 0.07256894049346879,
"eval_loss": 0.24128209054470062,
"eval_runtime": 146.2789,
"eval_samples_per_second": 1.989,
"eval_steps_per_second": 0.499,
"step": 50
},
{
"epoch": 0.07402031930333818,
"grad_norm": 0.15042653679847717,
"learning_rate": 7.928368421052632e-05,
"loss": 0.5749,
"step": 51
},
{
"epoch": 0.07547169811320754,
"grad_norm": 0.14070290327072144,
"learning_rate": 7.875157894736842e-05,
"loss": 0.3815,
"step": 52
},
{
"epoch": 0.07692307692307693,
"grad_norm": 0.12616680562496185,
"learning_rate": 7.821947368421053e-05,
"loss": 0.3485,
"step": 53
},
{
"epoch": 0.0783744557329463,
"grad_norm": 0.1807371824979782,
"learning_rate": 7.768736842105263e-05,
"loss": 0.6332,
"step": 54
},
{
"epoch": 0.07982583454281568,
"grad_norm": 0.1610403060913086,
"learning_rate": 7.715526315789474e-05,
"loss": 0.4334,
"step": 55
},
{
"epoch": 0.08127721335268505,
"grad_norm": 0.16740575432777405,
"learning_rate": 7.662315789473685e-05,
"loss": 0.5169,
"step": 56
},
{
"epoch": 0.08272859216255443,
"grad_norm": 0.13583853840827942,
"learning_rate": 7.609105263157895e-05,
"loss": 0.3515,
"step": 57
},
{
"epoch": 0.0841799709724238,
"grad_norm": 0.20852766931056976,
"learning_rate": 7.555894736842105e-05,
"loss": 0.8417,
"step": 58
},
{
"epoch": 0.08563134978229318,
"grad_norm": 0.12840092182159424,
"learning_rate": 7.502684210526316e-05,
"loss": 0.3527,
"step": 59
},
{
"epoch": 0.08708272859216255,
"grad_norm": 0.20794260501861572,
"learning_rate": 7.449473684210526e-05,
"loss": 0.8209,
"step": 60
},
{
"epoch": 0.08853410740203194,
"grad_norm": 0.15332041680812836,
"learning_rate": 7.396263157894738e-05,
"loss": 0.5123,
"step": 61
},
{
"epoch": 0.0899854862119013,
"grad_norm": 0.12152067571878433,
"learning_rate": 7.343052631578949e-05,
"loss": 0.5442,
"step": 62
},
{
"epoch": 0.09143686502177069,
"grad_norm": 0.15734325349330902,
"learning_rate": 7.289842105263158e-05,
"loss": 0.4926,
"step": 63
},
{
"epoch": 0.09288824383164006,
"grad_norm": 0.10759459435939789,
"learning_rate": 7.236631578947369e-05,
"loss": 0.2285,
"step": 64
},
{
"epoch": 0.09433962264150944,
"grad_norm": 0.11365488916635513,
"learning_rate": 7.183421052631579e-05,
"loss": 0.3032,
"step": 65
},
{
"epoch": 0.09579100145137881,
"grad_norm": 0.13057614862918854,
"learning_rate": 7.13021052631579e-05,
"loss": 0.4789,
"step": 66
},
{
"epoch": 0.09724238026124818,
"grad_norm": 0.1509314924478531,
"learning_rate": 7.077e-05,
"loss": 0.5571,
"step": 67
},
{
"epoch": 0.09869375907111756,
"grad_norm": 0.16016532480716705,
"learning_rate": 7.023789473684211e-05,
"loss": 0.2708,
"step": 68
},
{
"epoch": 0.10014513788098693,
"grad_norm": 0.2185145616531372,
"learning_rate": 6.970578947368422e-05,
"loss": 0.4046,
"step": 69
},
{
"epoch": 0.10159651669085631,
"grad_norm": 0.14823275804519653,
"learning_rate": 6.917368421052633e-05,
"loss": 0.5717,
"step": 70
},
{
"epoch": 0.10304789550072568,
"grad_norm": 0.16261740028858185,
"learning_rate": 6.864157894736842e-05,
"loss": 0.3313,
"step": 71
},
{
"epoch": 0.10449927431059507,
"grad_norm": 0.1639692485332489,
"learning_rate": 6.810947368421053e-05,
"loss": 0.7474,
"step": 72
},
{
"epoch": 0.10595065312046444,
"grad_norm": 0.11137572675943375,
"learning_rate": 6.757736842105264e-05,
"loss": 0.3237,
"step": 73
},
{
"epoch": 0.10740203193033382,
"grad_norm": 0.09403427690267563,
"learning_rate": 6.704526315789473e-05,
"loss": 0.213,
"step": 74
},
{
"epoch": 0.10885341074020319,
"grad_norm": 0.1198626309633255,
"learning_rate": 6.651315789473685e-05,
"loss": 0.3147,
"step": 75
},
{
"epoch": 0.11030478955007257,
"grad_norm": 0.18822187185287476,
"learning_rate": 6.598105263157895e-05,
"loss": 0.2479,
"step": 76
},
{
"epoch": 0.11175616835994194,
"grad_norm": 0.08338137716054916,
"learning_rate": 6.544894736842106e-05,
"loss": 0.1284,
"step": 77
},
{
"epoch": 0.11320754716981132,
"grad_norm": 0.11395663768053055,
"learning_rate": 6.491684210526317e-05,
"loss": 0.304,
"step": 78
},
{
"epoch": 0.11465892597968069,
"grad_norm": 0.12172822654247284,
"learning_rate": 6.438473684210526e-05,
"loss": 0.5225,
"step": 79
},
{
"epoch": 0.11611030478955008,
"grad_norm": 0.18436570465564728,
"learning_rate": 6.385263157894737e-05,
"loss": 1.1369,
"step": 80
},
{
"epoch": 0.11756168359941944,
"grad_norm": 0.08676780015230179,
"learning_rate": 6.332052631578948e-05,
"loss": 0.2241,
"step": 81
},
{
"epoch": 0.11901306240928883,
"grad_norm": 0.14665937423706055,
"learning_rate": 6.278842105263159e-05,
"loss": 0.4818,
"step": 82
},
{
"epoch": 0.1204644412191582,
"grad_norm": 0.12407947331666946,
"learning_rate": 6.22563157894737e-05,
"loss": 0.4061,
"step": 83
},
{
"epoch": 0.12191582002902758,
"grad_norm": 0.12156087160110474,
"learning_rate": 6.172421052631579e-05,
"loss": 0.4842,
"step": 84
},
{
"epoch": 0.12336719883889695,
"grad_norm": 0.164754256606102,
"learning_rate": 6.11921052631579e-05,
"loss": 0.6039,
"step": 85
},
{
"epoch": 0.12481857764876633,
"grad_norm": 0.13035471737384796,
"learning_rate": 6.066e-05,
"loss": 0.5963,
"step": 86
},
{
"epoch": 0.1262699564586357,
"grad_norm": 0.08667127043008804,
"learning_rate": 6.012789473684211e-05,
"loss": 0.104,
"step": 87
},
{
"epoch": 0.12772133526850507,
"grad_norm": 0.12500996887683868,
"learning_rate": 5.959578947368421e-05,
"loss": 0.1519,
"step": 88
},
{
"epoch": 0.12917271407837447,
"grad_norm": 0.1052846610546112,
"learning_rate": 5.9063684210526324e-05,
"loss": 0.168,
"step": 89
},
{
"epoch": 0.13062409288824384,
"grad_norm": 0.16326619684696198,
"learning_rate": 5.8531578947368425e-05,
"loss": 1.0778,
"step": 90
},
{
"epoch": 0.1320754716981132,
"grad_norm": 0.18031665682792664,
"learning_rate": 5.7999473684210527e-05,
"loss": 0.8578,
"step": 91
},
{
"epoch": 0.13352685050798258,
"grad_norm": 0.14327001571655273,
"learning_rate": 5.7467368421052635e-05,
"loss": 0.8367,
"step": 92
},
{
"epoch": 0.13497822931785197,
"grad_norm": 0.12158166617155075,
"learning_rate": 5.6935263157894736e-05,
"loss": 0.2978,
"step": 93
},
{
"epoch": 0.13642960812772134,
"grad_norm": 0.17898587882518768,
"learning_rate": 5.640315789473684e-05,
"loss": 0.7248,
"step": 94
},
{
"epoch": 0.1378809869375907,
"grad_norm": 0.1620013415813446,
"learning_rate": 5.587105263157895e-05,
"loss": 0.5909,
"step": 95
},
{
"epoch": 0.13933236574746008,
"grad_norm": 0.24596630036830902,
"learning_rate": 5.533894736842106e-05,
"loss": 1.3,
"step": 96
},
{
"epoch": 0.14078374455732948,
"grad_norm": 0.28974536061286926,
"learning_rate": 5.480684210526316e-05,
"loss": 1.0699,
"step": 97
},
{
"epoch": 0.14223512336719885,
"grad_norm": 0.2533659338951111,
"learning_rate": 5.4274736842105264e-05,
"loss": 0.6266,
"step": 98
},
{
"epoch": 0.14368650217706821,
"grad_norm": 0.36984333395957947,
"learning_rate": 5.374263157894737e-05,
"loss": 1.3249,
"step": 99
},
{
"epoch": 0.14513788098693758,
"grad_norm": 0.4221994876861572,
"learning_rate": 5.3210526315789474e-05,
"loss": 0.9726,
"step": 100
},
{
"epoch": 0.14513788098693758,
"eval_loss": 0.2286633849143982,
"eval_runtime": 145.162,
"eval_samples_per_second": 2.005,
"eval_steps_per_second": 0.503,
"step": 100
},
{
"epoch": 0.14658925979680695,
"grad_norm": 0.13860496878623962,
"learning_rate": 5.2678421052631576e-05,
"loss": 0.7931,
"step": 101
},
{
"epoch": 0.14804063860667635,
"grad_norm": 0.1530522257089615,
"learning_rate": 5.214631578947369e-05,
"loss": 0.4883,
"step": 102
},
{
"epoch": 0.14949201741654572,
"grad_norm": 0.09421262890100479,
"learning_rate": 5.161421052631579e-05,
"loss": 0.1757,
"step": 103
},
{
"epoch": 0.1509433962264151,
"grad_norm": 0.11253266036510468,
"learning_rate": 5.10821052631579e-05,
"loss": 0.4355,
"step": 104
},
{
"epoch": 0.15239477503628446,
"grad_norm": 0.11226090788841248,
"learning_rate": 5.055e-05,
"loss": 0.3406,
"step": 105
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.09969495981931686,
"learning_rate": 5.0017894736842104e-05,
"loss": 0.33,
"step": 106
},
{
"epoch": 0.15529753265602322,
"grad_norm": 0.14994554221630096,
"learning_rate": 4.948578947368421e-05,
"loss": 0.7712,
"step": 107
},
{
"epoch": 0.1567489114658926,
"grad_norm": 0.16747573018074036,
"learning_rate": 4.895368421052632e-05,
"loss": 0.4805,
"step": 108
},
{
"epoch": 0.15820029027576196,
"grad_norm": 0.15101346373558044,
"learning_rate": 4.842157894736842e-05,
"loss": 0.6745,
"step": 109
},
{
"epoch": 0.15965166908563136,
"grad_norm": 0.14325077831745148,
"learning_rate": 4.7889473684210523e-05,
"loss": 0.2481,
"step": 110
},
{
"epoch": 0.16110304789550073,
"grad_norm": 0.6589614152908325,
"learning_rate": 4.735736842105263e-05,
"loss": 0.385,
"step": 111
},
{
"epoch": 0.1625544267053701,
"grad_norm": 0.14817237854003906,
"learning_rate": 4.682526315789474e-05,
"loss": 0.4519,
"step": 112
},
{
"epoch": 0.16400580551523947,
"grad_norm": 0.12075196951627731,
"learning_rate": 4.629315789473684e-05,
"loss": 0.32,
"step": 113
},
{
"epoch": 0.16545718432510886,
"grad_norm": 0.10896284133195877,
"learning_rate": 4.576105263157895e-05,
"loss": 0.252,
"step": 114
},
{
"epoch": 0.16690856313497823,
"grad_norm": 0.15797024965286255,
"learning_rate": 4.522894736842106e-05,
"loss": 0.7193,
"step": 115
},
{
"epoch": 0.1683599419448476,
"grad_norm": 0.12811946868896484,
"learning_rate": 4.469684210526316e-05,
"loss": 0.5991,
"step": 116
},
{
"epoch": 0.16981132075471697,
"grad_norm": 0.17125706374645233,
"learning_rate": 4.416473684210527e-05,
"loss": 0.6612,
"step": 117
},
{
"epoch": 0.17126269956458637,
"grad_norm": 0.21033529937267303,
"learning_rate": 4.363263157894737e-05,
"loss": 0.637,
"step": 118
},
{
"epoch": 0.17271407837445574,
"grad_norm": 0.14343442022800446,
"learning_rate": 4.310052631578948e-05,
"loss": 0.3919,
"step": 119
},
{
"epoch": 0.1741654571843251,
"grad_norm": 0.09223167598247528,
"learning_rate": 4.256842105263158e-05,
"loss": 0.2206,
"step": 120
},
{
"epoch": 0.17561683599419448,
"grad_norm": 0.10201866924762726,
"learning_rate": 4.203631578947369e-05,
"loss": 0.2657,
"step": 121
},
{
"epoch": 0.17706821480406387,
"grad_norm": 0.09830533713102341,
"learning_rate": 4.150421052631579e-05,
"loss": 0.1767,
"step": 122
},
{
"epoch": 0.17851959361393324,
"grad_norm": 0.11305135488510132,
"learning_rate": 4.09721052631579e-05,
"loss": 0.3528,
"step": 123
},
{
"epoch": 0.1799709724238026,
"grad_norm": 0.11795450747013092,
"learning_rate": 4.0440000000000006e-05,
"loss": 0.2826,
"step": 124
},
{
"epoch": 0.18142235123367198,
"grad_norm": 0.10810278356075287,
"learning_rate": 3.990789473684211e-05,
"loss": 0.3728,
"step": 125
},
{
"epoch": 0.18287373004354138,
"grad_norm": 0.07889194041490555,
"learning_rate": 3.937578947368421e-05,
"loss": 0.1485,
"step": 126
},
{
"epoch": 0.18432510885341075,
"grad_norm": 0.12167462706565857,
"learning_rate": 3.884368421052632e-05,
"loss": 0.3191,
"step": 127
},
{
"epoch": 0.18577648766328012,
"grad_norm": 0.13652844727039337,
"learning_rate": 3.8311578947368426e-05,
"loss": 0.3272,
"step": 128
},
{
"epoch": 0.18722786647314948,
"grad_norm": 0.11355964839458466,
"learning_rate": 3.777947368421053e-05,
"loss": 0.2216,
"step": 129
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.12072530388832092,
"learning_rate": 3.724736842105263e-05,
"loss": 0.2511,
"step": 130
},
{
"epoch": 0.19013062409288825,
"grad_norm": 0.11075890809297562,
"learning_rate": 3.6715263157894744e-05,
"loss": 0.1769,
"step": 131
},
{
"epoch": 0.19158200290275762,
"grad_norm": 0.09322332590818405,
"learning_rate": 3.6183157894736845e-05,
"loss": 0.2052,
"step": 132
},
{
"epoch": 0.193033381712627,
"grad_norm": 0.12875616550445557,
"learning_rate": 3.565105263157895e-05,
"loss": 0.1961,
"step": 133
},
{
"epoch": 0.19448476052249636,
"grad_norm": 0.11314037442207336,
"learning_rate": 3.5118947368421055e-05,
"loss": 0.2227,
"step": 134
},
{
"epoch": 0.19593613933236576,
"grad_norm": 0.10332886129617691,
"learning_rate": 3.458684210526316e-05,
"loss": 0.339,
"step": 135
},
{
"epoch": 0.19738751814223512,
"grad_norm": 0.17729254066944122,
"learning_rate": 3.4054736842105265e-05,
"loss": 0.2802,
"step": 136
},
{
"epoch": 0.1988388969521045,
"grad_norm": 0.08777690678834915,
"learning_rate": 3.3522631578947366e-05,
"loss": 0.1391,
"step": 137
},
{
"epoch": 0.20029027576197386,
"grad_norm": 0.1536087989807129,
"learning_rate": 3.2990526315789475e-05,
"loss": 0.3643,
"step": 138
},
{
"epoch": 0.20174165457184326,
"grad_norm": 0.11051812022924423,
"learning_rate": 3.245842105263158e-05,
"loss": 0.1991,
"step": 139
},
{
"epoch": 0.20319303338171263,
"grad_norm": 0.09295262396335602,
"learning_rate": 3.1926315789473685e-05,
"loss": 0.2913,
"step": 140
},
{
"epoch": 0.204644412191582,
"grad_norm": 0.13505689799785614,
"learning_rate": 3.139421052631579e-05,
"loss": 0.8371,
"step": 141
},
{
"epoch": 0.20609579100145137,
"grad_norm": 0.1267169862985611,
"learning_rate": 3.0862105263157894e-05,
"loss": 0.6024,
"step": 142
},
{
"epoch": 0.20754716981132076,
"grad_norm": 0.13407814502716064,
"learning_rate": 3.033e-05,
"loss": 0.4159,
"step": 143
},
{
"epoch": 0.20899854862119013,
"grad_norm": 0.11469519138336182,
"learning_rate": 2.9797894736842104e-05,
"loss": 0.2402,
"step": 144
},
{
"epoch": 0.2104499274310595,
"grad_norm": 0.169059157371521,
"learning_rate": 2.9265789473684213e-05,
"loss": 0.3534,
"step": 145
},
{
"epoch": 0.21190130624092887,
"grad_norm": 0.19773763418197632,
"learning_rate": 2.8733684210526317e-05,
"loss": 0.5001,
"step": 146
},
{
"epoch": 0.21335268505079827,
"grad_norm": 0.4277418553829193,
"learning_rate": 2.820157894736842e-05,
"loss": 0.7581,
"step": 147
},
{
"epoch": 0.21480406386066764,
"grad_norm": 0.2791711688041687,
"learning_rate": 2.766947368421053e-05,
"loss": 0.8721,
"step": 148
},
{
"epoch": 0.216255442670537,
"grad_norm": 0.40901464223861694,
"learning_rate": 2.7137368421052632e-05,
"loss": 0.8766,
"step": 149
},
{
"epoch": 0.21770682148040638,
"grad_norm": 0.39813101291656494,
"learning_rate": 2.6605263157894737e-05,
"loss": 0.5074,
"step": 150
},
{
"epoch": 0.21770682148040638,
"eval_loss": 0.23037858307361603,
"eval_runtime": 143.626,
"eval_samples_per_second": 2.026,
"eval_steps_per_second": 0.508,
"step": 150
},
{
"epoch": 0.21915820029027577,
"grad_norm": 0.16082116961479187,
"learning_rate": 2.6073157894736845e-05,
"loss": 1.008,
"step": 151
},
{
"epoch": 0.22060957910014514,
"grad_norm": 0.17968745529651642,
"learning_rate": 2.554105263157895e-05,
"loss": 0.8331,
"step": 152
},
{
"epoch": 0.2220609579100145,
"grad_norm": 0.17362633347511292,
"learning_rate": 2.5008947368421052e-05,
"loss": 0.5327,
"step": 153
},
{
"epoch": 0.22351233671988388,
"grad_norm": 0.1643386036157608,
"learning_rate": 2.447684210526316e-05,
"loss": 1.0695,
"step": 154
},
{
"epoch": 0.22496371552975328,
"grad_norm": 0.12344465404748917,
"learning_rate": 2.3944736842105262e-05,
"loss": 0.5125,
"step": 155
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.2044529914855957,
"learning_rate": 2.341263157894737e-05,
"loss": 1.0597,
"step": 156
},
{
"epoch": 0.22786647314949202,
"grad_norm": 0.1536094695329666,
"learning_rate": 2.2880526315789475e-05,
"loss": 0.2413,
"step": 157
},
{
"epoch": 0.22931785195936139,
"grad_norm": 0.16920588910579681,
"learning_rate": 2.234842105263158e-05,
"loss": 0.7497,
"step": 158
},
{
"epoch": 0.23076923076923078,
"grad_norm": 0.12118630111217499,
"learning_rate": 2.1816315789473685e-05,
"loss": 0.3208,
"step": 159
},
{
"epoch": 0.23222060957910015,
"grad_norm": 0.14840605854988098,
"learning_rate": 2.128421052631579e-05,
"loss": 0.387,
"step": 160
},
{
"epoch": 0.23367198838896952,
"grad_norm": 0.10502210259437561,
"learning_rate": 2.0752105263157895e-05,
"loss": 0.4105,
"step": 161
},
{
"epoch": 0.2351233671988389,
"grad_norm": 0.23360522091388702,
"learning_rate": 2.0220000000000003e-05,
"loss": 0.4883,
"step": 162
},
{
"epoch": 0.2365747460087083,
"grad_norm": 0.1557024121284485,
"learning_rate": 1.9687894736842104e-05,
"loss": 0.3857,
"step": 163
},
{
"epoch": 0.23802612481857766,
"grad_norm": 0.15826496481895447,
"learning_rate": 1.9155789473684213e-05,
"loss": 0.8257,
"step": 164
},
{
"epoch": 0.23947750362844702,
"grad_norm": 0.12281662970781326,
"learning_rate": 1.8623684210526314e-05,
"loss": 0.398,
"step": 165
},
{
"epoch": 0.2409288824383164,
"grad_norm": 0.17840005457401276,
"learning_rate": 1.8091578947368423e-05,
"loss": 1.0399,
"step": 166
},
{
"epoch": 0.24238026124818576,
"grad_norm": 0.2307438999414444,
"learning_rate": 1.7559473684210528e-05,
"loss": 0.6128,
"step": 167
},
{
"epoch": 0.24383164005805516,
"grad_norm": 0.12274490296840668,
"learning_rate": 1.7027368421052632e-05,
"loss": 0.3564,
"step": 168
},
{
"epoch": 0.24528301886792453,
"grad_norm": 0.12504906952381134,
"learning_rate": 1.6495263157894737e-05,
"loss": 0.5111,
"step": 169
},
{
"epoch": 0.2467343976777939,
"grad_norm": 0.10843324661254883,
"learning_rate": 1.5963157894736842e-05,
"loss": 0.3324,
"step": 170
},
{
"epoch": 0.24818577648766327,
"grad_norm": 0.12919478118419647,
"learning_rate": 1.5431052631578947e-05,
"loss": 0.3152,
"step": 171
},
{
"epoch": 0.24963715529753266,
"grad_norm": 0.19524158537387848,
"learning_rate": 1.4898947368421052e-05,
"loss": 0.4494,
"step": 172
},
{
"epoch": 0.251088534107402,
"grad_norm": 0.13108614087104797,
"learning_rate": 1.4366842105263159e-05,
"loss": 0.2326,
"step": 173
},
{
"epoch": 0.2525399129172714,
"grad_norm": 0.09462467581033707,
"learning_rate": 1.3834736842105265e-05,
"loss": 0.3285,
"step": 174
},
{
"epoch": 0.2539912917271408,
"grad_norm": 0.11037827283143997,
"learning_rate": 1.3302631578947369e-05,
"loss": 0.3239,
"step": 175
},
{
"epoch": 0.25544267053701014,
"grad_norm": 0.13191723823547363,
"learning_rate": 1.2770526315789475e-05,
"loss": 0.9917,
"step": 176
},
{
"epoch": 0.25689404934687954,
"grad_norm": 0.11104253679513931,
"learning_rate": 1.223842105263158e-05,
"loss": 0.3113,
"step": 177
},
{
"epoch": 0.25834542815674894,
"grad_norm": 0.16667191684246063,
"learning_rate": 1.1706315789473685e-05,
"loss": 0.5103,
"step": 178
},
{
"epoch": 0.2597968069666183,
"grad_norm": 0.13359755277633667,
"learning_rate": 1.117421052631579e-05,
"loss": 0.5022,
"step": 179
},
{
"epoch": 0.2612481857764877,
"grad_norm": 0.11774896085262299,
"learning_rate": 1.0642105263157895e-05,
"loss": 0.2854,
"step": 180
},
{
"epoch": 0.262699564586357,
"grad_norm": 0.14967772364616394,
"learning_rate": 1.0110000000000001e-05,
"loss": 0.3724,
"step": 181
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.17375238239765167,
"learning_rate": 9.577894736842106e-06,
"loss": 0.5366,
"step": 182
},
{
"epoch": 0.2656023222060958,
"grad_norm": 0.12037857621908188,
"learning_rate": 9.045789473684211e-06,
"loss": 0.2978,
"step": 183
},
{
"epoch": 0.26705370101596515,
"grad_norm": 0.09681455045938492,
"learning_rate": 8.513684210526316e-06,
"loss": 0.2166,
"step": 184
},
{
"epoch": 0.26850507982583455,
"grad_norm": 0.09586142748594284,
"learning_rate": 7.981578947368421e-06,
"loss": 0.655,
"step": 185
},
{
"epoch": 0.26995645863570394,
"grad_norm": 0.09345651417970657,
"learning_rate": 7.449473684210526e-06,
"loss": 0.2769,
"step": 186
},
{
"epoch": 0.2714078374455733,
"grad_norm": 0.09641759097576141,
"learning_rate": 6.917368421052633e-06,
"loss": 0.2374,
"step": 187
},
{
"epoch": 0.2728592162554427,
"grad_norm": 0.12996430695056915,
"learning_rate": 6.385263157894738e-06,
"loss": 0.2477,
"step": 188
},
{
"epoch": 0.274310595065312,
"grad_norm": 0.14311741292476654,
"learning_rate": 5.8531578947368425e-06,
"loss": 0.2815,
"step": 189
},
{
"epoch": 0.2757619738751814,
"grad_norm": 0.12616676092147827,
"learning_rate": 5.321052631578947e-06,
"loss": 0.2024,
"step": 190
},
{
"epoch": 0.2772133526850508,
"grad_norm": 0.10585512220859528,
"learning_rate": 4.788947368421053e-06,
"loss": 0.2822,
"step": 191
},
{
"epoch": 0.27866473149492016,
"grad_norm": 0.1168060377240181,
"learning_rate": 4.256842105263158e-06,
"loss": 0.1666,
"step": 192
},
{
"epoch": 0.28011611030478956,
"grad_norm": 0.12357722222805023,
"learning_rate": 3.724736842105263e-06,
"loss": 0.4234,
"step": 193
},
{
"epoch": 0.28156748911465895,
"grad_norm": 0.1898827701807022,
"learning_rate": 3.192631578947369e-06,
"loss": 0.426,
"step": 194
},
{
"epoch": 0.2830188679245283,
"grad_norm": 0.2734042704105377,
"learning_rate": 2.6605263157894737e-06,
"loss": 0.8311,
"step": 195
},
{
"epoch": 0.2844702467343977,
"grad_norm": 0.23327644169330597,
"learning_rate": 2.128421052631579e-06,
"loss": 0.8963,
"step": 196
},
{
"epoch": 0.28592162554426703,
"grad_norm": 0.23755201697349548,
"learning_rate": 1.5963157894736844e-06,
"loss": 0.6194,
"step": 197
},
{
"epoch": 0.28737300435413643,
"grad_norm": 0.3003562390804291,
"learning_rate": 1.0642105263157895e-06,
"loss": 0.6794,
"step": 198
},
{
"epoch": 0.2888243831640058,
"grad_norm": 0.45043861865997314,
"learning_rate": 5.321052631578948e-07,
"loss": 0.6781,
"step": 199
},
{
"epoch": 0.29027576197387517,
"grad_norm": 0.43113571405410767,
"learning_rate": 0.0,
"loss": 1.0902,
"step": 200
},
{
"epoch": 0.29027576197387517,
"eval_loss": 0.22681911289691925,
"eval_runtime": 144.9273,
"eval_samples_per_second": 2.008,
"eval_steps_per_second": 0.504,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.4053840999350272e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}