lesso's picture
Training in progress, step 200, checkpoint
9713997 verified
{
"best_metric": 0.0005457898951135576,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.4319654427645788,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0021598272138228943,
"grad_norm": 1.0908081531524658,
"learning_rate": 1.0060000000000002e-05,
"loss": 0.0443,
"step": 1
},
{
"epoch": 0.0021598272138228943,
"eval_loss": 0.14869527518749237,
"eval_runtime": 38.1641,
"eval_samples_per_second": 5.11,
"eval_steps_per_second": 1.284,
"step": 1
},
{
"epoch": 0.004319654427645789,
"grad_norm": 0.6184279322624207,
"learning_rate": 2.0120000000000004e-05,
"loss": 0.0345,
"step": 2
},
{
"epoch": 0.0064794816414686825,
"grad_norm": 0.3461187183856964,
"learning_rate": 3.018e-05,
"loss": 0.0337,
"step": 3
},
{
"epoch": 0.008639308855291577,
"grad_norm": 0.22748076915740967,
"learning_rate": 4.024000000000001e-05,
"loss": 0.0299,
"step": 4
},
{
"epoch": 0.01079913606911447,
"grad_norm": 0.11859659850597382,
"learning_rate": 5.03e-05,
"loss": 0.0259,
"step": 5
},
{
"epoch": 0.012958963282937365,
"grad_norm": 0.1670089215040207,
"learning_rate": 6.036e-05,
"loss": 0.0249,
"step": 6
},
{
"epoch": 0.01511879049676026,
"grad_norm": 0.1212053894996643,
"learning_rate": 7.042e-05,
"loss": 0.0189,
"step": 7
},
{
"epoch": 0.017278617710583154,
"grad_norm": 0.08793988823890686,
"learning_rate": 8.048000000000002e-05,
"loss": 0.0179,
"step": 8
},
{
"epoch": 0.019438444924406047,
"grad_norm": 0.2328803539276123,
"learning_rate": 9.054000000000001e-05,
"loss": 0.027,
"step": 9
},
{
"epoch": 0.02159827213822894,
"grad_norm": 0.17274922132492065,
"learning_rate": 0.0001006,
"loss": 0.0257,
"step": 10
},
{
"epoch": 0.023758099352051837,
"grad_norm": 0.28910505771636963,
"learning_rate": 0.00010007052631578948,
"loss": 0.0171,
"step": 11
},
{
"epoch": 0.02591792656587473,
"grad_norm": 0.13060903549194336,
"learning_rate": 9.954105263157895e-05,
"loss": 0.0085,
"step": 12
},
{
"epoch": 0.028077753779697623,
"grad_norm": 0.25134187936782837,
"learning_rate": 9.901157894736842e-05,
"loss": 0.0121,
"step": 13
},
{
"epoch": 0.03023758099352052,
"grad_norm": 0.1418023407459259,
"learning_rate": 9.84821052631579e-05,
"loss": 0.0035,
"step": 14
},
{
"epoch": 0.032397408207343416,
"grad_norm": 0.2569674253463745,
"learning_rate": 9.795263157894737e-05,
"loss": 0.0033,
"step": 15
},
{
"epoch": 0.03455723542116631,
"grad_norm": 0.46587738394737244,
"learning_rate": 9.742315789473686e-05,
"loss": 0.008,
"step": 16
},
{
"epoch": 0.0367170626349892,
"grad_norm": 0.10296951234340668,
"learning_rate": 9.689368421052633e-05,
"loss": 0.0015,
"step": 17
},
{
"epoch": 0.038876889848812095,
"grad_norm": 0.052655696868896484,
"learning_rate": 9.63642105263158e-05,
"loss": 0.0011,
"step": 18
},
{
"epoch": 0.04103671706263499,
"grad_norm": 0.1694544404745102,
"learning_rate": 9.583473684210527e-05,
"loss": 0.0028,
"step": 19
},
{
"epoch": 0.04319654427645788,
"grad_norm": 0.028250250965356827,
"learning_rate": 9.530526315789474e-05,
"loss": 0.0005,
"step": 20
},
{
"epoch": 0.04535637149028078,
"grad_norm": 0.30077144503593445,
"learning_rate": 9.477578947368422e-05,
"loss": 0.0043,
"step": 21
},
{
"epoch": 0.047516198704103674,
"grad_norm": 0.3806341886520386,
"learning_rate": 9.424631578947369e-05,
"loss": 0.0021,
"step": 22
},
{
"epoch": 0.04967602591792657,
"grad_norm": 0.3099500238895416,
"learning_rate": 9.371684210526316e-05,
"loss": 0.0041,
"step": 23
},
{
"epoch": 0.05183585313174946,
"grad_norm": 0.3820614218711853,
"learning_rate": 9.318736842105263e-05,
"loss": 0.0065,
"step": 24
},
{
"epoch": 0.05399568034557235,
"grad_norm": 0.3208763003349304,
"learning_rate": 9.26578947368421e-05,
"loss": 0.0044,
"step": 25
},
{
"epoch": 0.056155507559395246,
"grad_norm": 0.20953547954559326,
"learning_rate": 9.212842105263159e-05,
"loss": 0.0018,
"step": 26
},
{
"epoch": 0.058315334773218146,
"grad_norm": 0.11007744818925858,
"learning_rate": 9.159894736842107e-05,
"loss": 0.0016,
"step": 27
},
{
"epoch": 0.06047516198704104,
"grad_norm": 0.005611012689769268,
"learning_rate": 9.106947368421054e-05,
"loss": 0.0004,
"step": 28
},
{
"epoch": 0.06263498920086392,
"grad_norm": 0.3053535223007202,
"learning_rate": 9.054000000000001e-05,
"loss": 0.0068,
"step": 29
},
{
"epoch": 0.06479481641468683,
"grad_norm": 0.009484589099884033,
"learning_rate": 9.001052631578948e-05,
"loss": 0.0002,
"step": 30
},
{
"epoch": 0.06695464362850972,
"grad_norm": 0.025772912427783012,
"learning_rate": 8.948105263157895e-05,
"loss": 0.0008,
"step": 31
},
{
"epoch": 0.06911447084233262,
"grad_norm": 0.17834728956222534,
"learning_rate": 8.895157894736842e-05,
"loss": 0.0017,
"step": 32
},
{
"epoch": 0.07127429805615551,
"grad_norm": 0.4382934868335724,
"learning_rate": 8.842210526315789e-05,
"loss": 0.0108,
"step": 33
},
{
"epoch": 0.0734341252699784,
"grad_norm": 0.029989449307322502,
"learning_rate": 8.789263157894738e-05,
"loss": 0.0003,
"step": 34
},
{
"epoch": 0.0755939524838013,
"grad_norm": 0.296584814786911,
"learning_rate": 8.736315789473685e-05,
"loss": 0.0062,
"step": 35
},
{
"epoch": 0.07775377969762419,
"grad_norm": 0.004909132141619921,
"learning_rate": 8.683368421052632e-05,
"loss": 0.0003,
"step": 36
},
{
"epoch": 0.07991360691144708,
"grad_norm": 0.03281238302588463,
"learning_rate": 8.63042105263158e-05,
"loss": 0.0008,
"step": 37
},
{
"epoch": 0.08207343412526998,
"grad_norm": 0.2719196379184723,
"learning_rate": 8.577473684210527e-05,
"loss": 0.0061,
"step": 38
},
{
"epoch": 0.08423326133909287,
"grad_norm": 0.05495695397257805,
"learning_rate": 8.524526315789474e-05,
"loss": 0.0007,
"step": 39
},
{
"epoch": 0.08639308855291576,
"grad_norm": 0.22600561380386353,
"learning_rate": 8.471578947368421e-05,
"loss": 0.0053,
"step": 40
},
{
"epoch": 0.08855291576673865,
"grad_norm": 0.08108193427324295,
"learning_rate": 8.41863157894737e-05,
"loss": 0.0013,
"step": 41
},
{
"epoch": 0.09071274298056156,
"grad_norm": 0.15901321172714233,
"learning_rate": 8.365684210526317e-05,
"loss": 0.0034,
"step": 42
},
{
"epoch": 0.09287257019438445,
"grad_norm": 0.22143931686878204,
"learning_rate": 8.312736842105264e-05,
"loss": 0.0014,
"step": 43
},
{
"epoch": 0.09503239740820735,
"grad_norm": 0.18730013072490692,
"learning_rate": 8.259789473684211e-05,
"loss": 0.0019,
"step": 44
},
{
"epoch": 0.09719222462203024,
"grad_norm": 0.19464093446731567,
"learning_rate": 8.206842105263158e-05,
"loss": 0.0011,
"step": 45
},
{
"epoch": 0.09935205183585313,
"grad_norm": 0.09799759089946747,
"learning_rate": 8.153894736842105e-05,
"loss": 0.001,
"step": 46
},
{
"epoch": 0.10151187904967603,
"grad_norm": 0.5721032023429871,
"learning_rate": 8.100947368421053e-05,
"loss": 0.0025,
"step": 47
},
{
"epoch": 0.10367170626349892,
"grad_norm": 0.11062470078468323,
"learning_rate": 8.048000000000002e-05,
"loss": 0.0007,
"step": 48
},
{
"epoch": 0.10583153347732181,
"grad_norm": 0.36123791337013245,
"learning_rate": 7.995052631578949e-05,
"loss": 0.0018,
"step": 49
},
{
"epoch": 0.1079913606911447,
"grad_norm": 0.17662936449050903,
"learning_rate": 7.942105263157896e-05,
"loss": 0.0011,
"step": 50
},
{
"epoch": 0.1079913606911447,
"eval_loss": 0.003911234904080629,
"eval_runtime": 38.5635,
"eval_samples_per_second": 5.057,
"eval_steps_per_second": 1.271,
"step": 50
},
{
"epoch": 0.1101511879049676,
"grad_norm": 0.09929417818784714,
"learning_rate": 7.889157894736843e-05,
"loss": 0.0122,
"step": 51
},
{
"epoch": 0.11231101511879049,
"grad_norm": 0.0626210868358612,
"learning_rate": 7.83621052631579e-05,
"loss": 0.0117,
"step": 52
},
{
"epoch": 0.11447084233261338,
"grad_norm": 0.08357356488704681,
"learning_rate": 7.783263157894737e-05,
"loss": 0.013,
"step": 53
},
{
"epoch": 0.11663066954643629,
"grad_norm": 0.35985463857650757,
"learning_rate": 7.730315789473684e-05,
"loss": 0.0162,
"step": 54
},
{
"epoch": 0.11879049676025918,
"grad_norm": 0.06007225811481476,
"learning_rate": 7.677368421052632e-05,
"loss": 0.0126,
"step": 55
},
{
"epoch": 0.12095032397408208,
"grad_norm": 0.07496761530637741,
"learning_rate": 7.624421052631579e-05,
"loss": 0.0099,
"step": 56
},
{
"epoch": 0.12311015118790497,
"grad_norm": 0.05158822238445282,
"learning_rate": 7.571473684210526e-05,
"loss": 0.0044,
"step": 57
},
{
"epoch": 0.12526997840172785,
"grad_norm": 0.04859482869505882,
"learning_rate": 7.518526315789475e-05,
"loss": 0.0053,
"step": 58
},
{
"epoch": 0.12742980561555076,
"grad_norm": 0.08607001602649689,
"learning_rate": 7.465578947368422e-05,
"loss": 0.0016,
"step": 59
},
{
"epoch": 0.12958963282937366,
"grad_norm": 0.19171972572803497,
"learning_rate": 7.412631578947369e-05,
"loss": 0.0017,
"step": 60
},
{
"epoch": 0.13174946004319654,
"grad_norm": 0.1458485871553421,
"learning_rate": 7.359684210526317e-05,
"loss": 0.0018,
"step": 61
},
{
"epoch": 0.13390928725701945,
"grad_norm": 0.274966835975647,
"learning_rate": 7.306736842105264e-05,
"loss": 0.0027,
"step": 62
},
{
"epoch": 0.13606911447084233,
"grad_norm": 0.051928453147411346,
"learning_rate": 7.253789473684211e-05,
"loss": 0.001,
"step": 63
},
{
"epoch": 0.13822894168466524,
"grad_norm": 0.2043527066707611,
"learning_rate": 7.200842105263158e-05,
"loss": 0.0018,
"step": 64
},
{
"epoch": 0.14038876889848811,
"grad_norm": 0.0076795765198767185,
"learning_rate": 7.147894736842105e-05,
"loss": 0.0002,
"step": 65
},
{
"epoch": 0.14254859611231102,
"grad_norm": 0.011842038482427597,
"learning_rate": 7.094947368421052e-05,
"loss": 0.0004,
"step": 66
},
{
"epoch": 0.1447084233261339,
"grad_norm": 0.0678977221250534,
"learning_rate": 7.042e-05,
"loss": 0.0009,
"step": 67
},
{
"epoch": 0.1468682505399568,
"grad_norm": 0.5634580254554749,
"learning_rate": 6.989052631578948e-05,
"loss": 0.0021,
"step": 68
},
{
"epoch": 0.1490280777537797,
"grad_norm": 0.23194696009159088,
"learning_rate": 6.936105263157896e-05,
"loss": 0.0021,
"step": 69
},
{
"epoch": 0.1511879049676026,
"grad_norm": 0.0037620756775140762,
"learning_rate": 6.883157894736843e-05,
"loss": 0.0002,
"step": 70
},
{
"epoch": 0.15334773218142547,
"grad_norm": 0.14211751520633698,
"learning_rate": 6.83021052631579e-05,
"loss": 0.0039,
"step": 71
},
{
"epoch": 0.15550755939524838,
"grad_norm": 0.1930842101573944,
"learning_rate": 6.777263157894737e-05,
"loss": 0.0005,
"step": 72
},
{
"epoch": 0.15766738660907129,
"grad_norm": 0.004353968892246485,
"learning_rate": 6.724315789473684e-05,
"loss": 0.0002,
"step": 73
},
{
"epoch": 0.15982721382289417,
"grad_norm": 0.20285466313362122,
"learning_rate": 6.671368421052631e-05,
"loss": 0.0013,
"step": 74
},
{
"epoch": 0.16198704103671707,
"grad_norm": 0.0263319481164217,
"learning_rate": 6.61842105263158e-05,
"loss": 0.0029,
"step": 75
},
{
"epoch": 0.16414686825053995,
"grad_norm": 0.013903641141951084,
"learning_rate": 6.565473684210527e-05,
"loss": 0.0002,
"step": 76
},
{
"epoch": 0.16630669546436286,
"grad_norm": 0.02208566665649414,
"learning_rate": 6.512526315789474e-05,
"loss": 0.0005,
"step": 77
},
{
"epoch": 0.16846652267818574,
"grad_norm": 0.430324912071228,
"learning_rate": 6.459578947368421e-05,
"loss": 0.0043,
"step": 78
},
{
"epoch": 0.17062634989200864,
"grad_norm": 0.48172298073768616,
"learning_rate": 6.406631578947369e-05,
"loss": 0.0052,
"step": 79
},
{
"epoch": 0.17278617710583152,
"grad_norm": 0.02294292487204075,
"learning_rate": 6.353684210526316e-05,
"loss": 0.0003,
"step": 80
},
{
"epoch": 0.17494600431965443,
"grad_norm": 0.008779804222285748,
"learning_rate": 6.300736842105263e-05,
"loss": 0.0003,
"step": 81
},
{
"epoch": 0.1771058315334773,
"grad_norm": 0.025301771238446236,
"learning_rate": 6.247789473684212e-05,
"loss": 0.0003,
"step": 82
},
{
"epoch": 0.17926565874730022,
"grad_norm": 0.010768801905214787,
"learning_rate": 6.194842105263159e-05,
"loss": 0.0002,
"step": 83
},
{
"epoch": 0.18142548596112312,
"grad_norm": 0.01551789976656437,
"learning_rate": 6.141894736842106e-05,
"loss": 0.0003,
"step": 84
},
{
"epoch": 0.183585313174946,
"grad_norm": 0.1713397204875946,
"learning_rate": 6.088947368421053e-05,
"loss": 0.0023,
"step": 85
},
{
"epoch": 0.1857451403887689,
"grad_norm": 0.11895774304866791,
"learning_rate": 6.036e-05,
"loss": 0.001,
"step": 86
},
{
"epoch": 0.1879049676025918,
"grad_norm": 0.011528117582201958,
"learning_rate": 5.9830526315789475e-05,
"loss": 0.0003,
"step": 87
},
{
"epoch": 0.1900647948164147,
"grad_norm": 0.0027349735610187054,
"learning_rate": 5.9301052631578946e-05,
"loss": 0.0002,
"step": 88
},
{
"epoch": 0.19222462203023757,
"grad_norm": 0.0025566022377461195,
"learning_rate": 5.877157894736843e-05,
"loss": 0.0001,
"step": 89
},
{
"epoch": 0.19438444924406048,
"grad_norm": 0.010126935318112373,
"learning_rate": 5.82421052631579e-05,
"loss": 0.0003,
"step": 90
},
{
"epoch": 0.19654427645788336,
"grad_norm": 0.0021927962079644203,
"learning_rate": 5.771263157894737e-05,
"loss": 0.0001,
"step": 91
},
{
"epoch": 0.19870410367170627,
"grad_norm": 0.007295831106603146,
"learning_rate": 5.718315789473685e-05,
"loss": 0.0002,
"step": 92
},
{
"epoch": 0.20086393088552915,
"grad_norm": 0.3323706388473511,
"learning_rate": 5.665368421052632e-05,
"loss": 0.0008,
"step": 93
},
{
"epoch": 0.20302375809935205,
"grad_norm": 0.007849380373954773,
"learning_rate": 5.612421052631579e-05,
"loss": 0.0001,
"step": 94
},
{
"epoch": 0.20518358531317496,
"grad_norm": 0.5382089614868164,
"learning_rate": 5.559473684210527e-05,
"loss": 0.0054,
"step": 95
},
{
"epoch": 0.20734341252699784,
"grad_norm": 0.004848291631788015,
"learning_rate": 5.506526315789474e-05,
"loss": 0.0003,
"step": 96
},
{
"epoch": 0.20950323974082075,
"grad_norm": 0.009182745590806007,
"learning_rate": 5.453578947368421e-05,
"loss": 0.0003,
"step": 97
},
{
"epoch": 0.21166306695464362,
"grad_norm": 0.02678913250565529,
"learning_rate": 5.400631578947369e-05,
"loss": 0.0003,
"step": 98
},
{
"epoch": 0.21382289416846653,
"grad_norm": 0.10986873507499695,
"learning_rate": 5.347684210526316e-05,
"loss": 0.0009,
"step": 99
},
{
"epoch": 0.2159827213822894,
"grad_norm": 0.6294146180152893,
"learning_rate": 5.294736842105263e-05,
"loss": 0.0097,
"step": 100
},
{
"epoch": 0.2159827213822894,
"eval_loss": 0.004535946063697338,
"eval_runtime": 38.2221,
"eval_samples_per_second": 5.102,
"eval_steps_per_second": 1.282,
"step": 100
},
{
"epoch": 0.21814254859611232,
"grad_norm": 0.23118843138217926,
"learning_rate": 5.24178947368421e-05,
"loss": 0.0068,
"step": 101
},
{
"epoch": 0.2203023758099352,
"grad_norm": 0.07103104144334793,
"learning_rate": 5.1888421052631585e-05,
"loss": 0.0039,
"step": 102
},
{
"epoch": 0.2224622030237581,
"grad_norm": 0.03884083777666092,
"learning_rate": 5.135894736842106e-05,
"loss": 0.0021,
"step": 103
},
{
"epoch": 0.22462203023758098,
"grad_norm": 0.01880479045212269,
"learning_rate": 5.082947368421053e-05,
"loss": 0.0009,
"step": 104
},
{
"epoch": 0.2267818574514039,
"grad_norm": 0.0531153567135334,
"learning_rate": 5.03e-05,
"loss": 0.001,
"step": 105
},
{
"epoch": 0.22894168466522677,
"grad_norm": 0.053985074162483215,
"learning_rate": 4.977052631578947e-05,
"loss": 0.0013,
"step": 106
},
{
"epoch": 0.23110151187904968,
"grad_norm": 0.037867240607738495,
"learning_rate": 4.924105263157895e-05,
"loss": 0.0023,
"step": 107
},
{
"epoch": 0.23326133909287258,
"grad_norm": 0.11338239908218384,
"learning_rate": 4.871157894736843e-05,
"loss": 0.0024,
"step": 108
},
{
"epoch": 0.23542116630669546,
"grad_norm": 0.014712098054587841,
"learning_rate": 4.81821052631579e-05,
"loss": 0.0009,
"step": 109
},
{
"epoch": 0.23758099352051837,
"grad_norm": 0.03086118772625923,
"learning_rate": 4.765263157894737e-05,
"loss": 0.001,
"step": 110
},
{
"epoch": 0.23974082073434125,
"grad_norm": 0.013892450369894505,
"learning_rate": 4.7123157894736845e-05,
"loss": 0.0005,
"step": 111
},
{
"epoch": 0.24190064794816415,
"grad_norm": 0.2487848550081253,
"learning_rate": 4.6593684210526316e-05,
"loss": 0.0082,
"step": 112
},
{
"epoch": 0.24406047516198703,
"grad_norm": 0.11459369212388992,
"learning_rate": 4.606421052631579e-05,
"loss": 0.0037,
"step": 113
},
{
"epoch": 0.24622030237580994,
"grad_norm": 0.07744896411895752,
"learning_rate": 4.553473684210527e-05,
"loss": 0.0023,
"step": 114
},
{
"epoch": 0.24838012958963282,
"grad_norm": 0.10101820528507233,
"learning_rate": 4.500526315789474e-05,
"loss": 0.0034,
"step": 115
},
{
"epoch": 0.2505399568034557,
"grad_norm": 0.007767634466290474,
"learning_rate": 4.447578947368421e-05,
"loss": 0.0003,
"step": 116
},
{
"epoch": 0.2526997840172786,
"grad_norm": 0.10766242444515228,
"learning_rate": 4.394631578947369e-05,
"loss": 0.0012,
"step": 117
},
{
"epoch": 0.2548596112311015,
"grad_norm": 0.008573450148105621,
"learning_rate": 4.341684210526316e-05,
"loss": 0.0003,
"step": 118
},
{
"epoch": 0.2570194384449244,
"grad_norm": 0.0011212700046598911,
"learning_rate": 4.2887368421052636e-05,
"loss": 0.0001,
"step": 119
},
{
"epoch": 0.2591792656587473,
"grad_norm": 0.005315995309501886,
"learning_rate": 4.2357894736842106e-05,
"loss": 0.0002,
"step": 120
},
{
"epoch": 0.2613390928725702,
"grad_norm": 0.0370321199297905,
"learning_rate": 4.182842105263158e-05,
"loss": 0.0002,
"step": 121
},
{
"epoch": 0.2634989200863931,
"grad_norm": 0.0062796249985694885,
"learning_rate": 4.1298947368421053e-05,
"loss": 0.0002,
"step": 122
},
{
"epoch": 0.265658747300216,
"grad_norm": 0.06505784392356873,
"learning_rate": 4.0769473684210524e-05,
"loss": 0.0004,
"step": 123
},
{
"epoch": 0.2678185745140389,
"grad_norm": 0.11662036925554276,
"learning_rate": 4.024000000000001e-05,
"loss": 0.0003,
"step": 124
},
{
"epoch": 0.26997840172786175,
"grad_norm": 0.01756948232650757,
"learning_rate": 3.971052631578948e-05,
"loss": 0.0003,
"step": 125
},
{
"epoch": 0.27213822894168466,
"grad_norm": 0.01176679227501154,
"learning_rate": 3.918105263157895e-05,
"loss": 0.0002,
"step": 126
},
{
"epoch": 0.27429805615550756,
"grad_norm": 0.16879205405712128,
"learning_rate": 3.865157894736842e-05,
"loss": 0.0051,
"step": 127
},
{
"epoch": 0.27645788336933047,
"grad_norm": 0.015613168478012085,
"learning_rate": 3.8122105263157896e-05,
"loss": 0.0003,
"step": 128
},
{
"epoch": 0.2786177105831533,
"grad_norm": 0.001986750401556492,
"learning_rate": 3.759263157894737e-05,
"loss": 0.0001,
"step": 129
},
{
"epoch": 0.28077753779697623,
"grad_norm": 0.008711952716112137,
"learning_rate": 3.7063157894736844e-05,
"loss": 0.0003,
"step": 130
},
{
"epoch": 0.28293736501079914,
"grad_norm": 0.0025189965963363647,
"learning_rate": 3.653368421052632e-05,
"loss": 0.0001,
"step": 131
},
{
"epoch": 0.28509719222462204,
"grad_norm": 0.0006644928944297135,
"learning_rate": 3.600421052631579e-05,
"loss": 0.0001,
"step": 132
},
{
"epoch": 0.28725701943844495,
"grad_norm": 0.0026721965987235308,
"learning_rate": 3.547473684210526e-05,
"loss": 0.0002,
"step": 133
},
{
"epoch": 0.2894168466522678,
"grad_norm": 0.015398127026855946,
"learning_rate": 3.494526315789474e-05,
"loss": 0.0001,
"step": 134
},
{
"epoch": 0.2915766738660907,
"grad_norm": 0.014543671160936356,
"learning_rate": 3.4415789473684216e-05,
"loss": 0.0003,
"step": 135
},
{
"epoch": 0.2937365010799136,
"grad_norm": 0.006670842412859201,
"learning_rate": 3.3886315789473686e-05,
"loss": 0.0001,
"step": 136
},
{
"epoch": 0.2958963282937365,
"grad_norm": 0.005377969238907099,
"learning_rate": 3.3356842105263156e-05,
"loss": 0.0002,
"step": 137
},
{
"epoch": 0.2980561555075594,
"grad_norm": 0.0062574828043580055,
"learning_rate": 3.2827368421052634e-05,
"loss": 0.0002,
"step": 138
},
{
"epoch": 0.3002159827213823,
"grad_norm": 0.14523354172706604,
"learning_rate": 3.2297894736842104e-05,
"loss": 0.0145,
"step": 139
},
{
"epoch": 0.3023758099352052,
"grad_norm": 0.010900533758103848,
"learning_rate": 3.176842105263158e-05,
"loss": 0.0002,
"step": 140
},
{
"epoch": 0.3045356371490281,
"grad_norm": 0.010036949999630451,
"learning_rate": 3.123894736842106e-05,
"loss": 0.0001,
"step": 141
},
{
"epoch": 0.30669546436285094,
"grad_norm": 0.598667562007904,
"learning_rate": 3.070947368421053e-05,
"loss": 0.0035,
"step": 142
},
{
"epoch": 0.30885529157667385,
"grad_norm": 0.002981688594445586,
"learning_rate": 3.018e-05,
"loss": 0.0001,
"step": 143
},
{
"epoch": 0.31101511879049676,
"grad_norm": 0.0013599407393485308,
"learning_rate": 2.9650526315789473e-05,
"loss": 0.0001,
"step": 144
},
{
"epoch": 0.31317494600431967,
"grad_norm": 0.5247439742088318,
"learning_rate": 2.912105263157895e-05,
"loss": 0.0167,
"step": 145
},
{
"epoch": 0.31533477321814257,
"grad_norm": 0.004649030510336161,
"learning_rate": 2.8591578947368424e-05,
"loss": 0.0002,
"step": 146
},
{
"epoch": 0.3174946004319654,
"grad_norm": 0.0023146297316998243,
"learning_rate": 2.8062105263157894e-05,
"loss": 0.0001,
"step": 147
},
{
"epoch": 0.31965442764578833,
"grad_norm": 0.4256044328212738,
"learning_rate": 2.753263157894737e-05,
"loss": 0.0243,
"step": 148
},
{
"epoch": 0.32181425485961124,
"grad_norm": 0.22870223224163055,
"learning_rate": 2.7003157894736845e-05,
"loss": 0.0005,
"step": 149
},
{
"epoch": 0.32397408207343414,
"grad_norm": 5.829990386962891,
"learning_rate": 2.6473684210526315e-05,
"loss": 0.0135,
"step": 150
},
{
"epoch": 0.32397408207343414,
"eval_loss": 0.0008732560090720654,
"eval_runtime": 38.606,
"eval_samples_per_second": 5.051,
"eval_steps_per_second": 1.269,
"step": 150
},
{
"epoch": 0.326133909287257,
"grad_norm": 0.041695982217788696,
"learning_rate": 2.5944210526315793e-05,
"loss": 0.0009,
"step": 151
},
{
"epoch": 0.3282937365010799,
"grad_norm": 0.1370960921049118,
"learning_rate": 2.5414736842105266e-05,
"loss": 0.0018,
"step": 152
},
{
"epoch": 0.3304535637149028,
"grad_norm": 0.031126253306865692,
"learning_rate": 2.4885263157894737e-05,
"loss": 0.0009,
"step": 153
},
{
"epoch": 0.3326133909287257,
"grad_norm": 0.029885003343224525,
"learning_rate": 2.4355789473684214e-05,
"loss": 0.0009,
"step": 154
},
{
"epoch": 0.3347732181425486,
"grad_norm": 0.02018631622195244,
"learning_rate": 2.3826315789473684e-05,
"loss": 0.0005,
"step": 155
},
{
"epoch": 0.3369330453563715,
"grad_norm": 0.029901625588536263,
"learning_rate": 2.3296842105263158e-05,
"loss": 0.0009,
"step": 156
},
{
"epoch": 0.3390928725701944,
"grad_norm": 0.2343233823776245,
"learning_rate": 2.2767368421052635e-05,
"loss": 0.0029,
"step": 157
},
{
"epoch": 0.3412526997840173,
"grad_norm": 0.060517966747283936,
"learning_rate": 2.2237894736842105e-05,
"loss": 0.0034,
"step": 158
},
{
"epoch": 0.3434125269978402,
"grad_norm": 0.1326085478067398,
"learning_rate": 2.170842105263158e-05,
"loss": 0.0025,
"step": 159
},
{
"epoch": 0.34557235421166305,
"grad_norm": 0.14953480660915375,
"learning_rate": 2.1178947368421053e-05,
"loss": 0.0026,
"step": 160
},
{
"epoch": 0.34773218142548595,
"grad_norm": 0.0644599050283432,
"learning_rate": 2.0649473684210527e-05,
"loss": 0.0017,
"step": 161
},
{
"epoch": 0.34989200863930886,
"grad_norm": 0.03840641304850578,
"learning_rate": 2.0120000000000004e-05,
"loss": 0.0006,
"step": 162
},
{
"epoch": 0.35205183585313177,
"grad_norm": 0.034114349633455276,
"learning_rate": 1.9590526315789474e-05,
"loss": 0.0004,
"step": 163
},
{
"epoch": 0.3542116630669546,
"grad_norm": 0.05888564512133598,
"learning_rate": 1.9061052631578948e-05,
"loss": 0.0011,
"step": 164
},
{
"epoch": 0.3563714902807775,
"grad_norm": 0.008627941831946373,
"learning_rate": 1.8531578947368422e-05,
"loss": 0.0002,
"step": 165
},
{
"epoch": 0.35853131749460043,
"grad_norm": 0.0020207944326102734,
"learning_rate": 1.8002105263157896e-05,
"loss": 0.0001,
"step": 166
},
{
"epoch": 0.36069114470842334,
"grad_norm": 0.2896200120449066,
"learning_rate": 1.747263157894737e-05,
"loss": 0.0015,
"step": 167
},
{
"epoch": 0.36285097192224625,
"grad_norm": 0.003729312913492322,
"learning_rate": 1.6943157894736843e-05,
"loss": 0.0002,
"step": 168
},
{
"epoch": 0.3650107991360691,
"grad_norm": 0.0025894506834447384,
"learning_rate": 1.6413684210526317e-05,
"loss": 0.0002,
"step": 169
},
{
"epoch": 0.367170626349892,
"grad_norm": 0.05301901698112488,
"learning_rate": 1.588421052631579e-05,
"loss": 0.0007,
"step": 170
},
{
"epoch": 0.3693304535637149,
"grad_norm": 0.004341227002441883,
"learning_rate": 1.5354736842105264e-05,
"loss": 0.0002,
"step": 171
},
{
"epoch": 0.3714902807775378,
"grad_norm": 0.0037382582668215036,
"learning_rate": 1.4825263157894736e-05,
"loss": 0.0003,
"step": 172
},
{
"epoch": 0.37365010799136067,
"grad_norm": 0.008173175156116486,
"learning_rate": 1.4295789473684212e-05,
"loss": 0.0002,
"step": 173
},
{
"epoch": 0.3758099352051836,
"grad_norm": 0.004051721189171076,
"learning_rate": 1.3766315789473686e-05,
"loss": 0.0003,
"step": 174
},
{
"epoch": 0.3779697624190065,
"grad_norm": 0.006087577901780605,
"learning_rate": 1.3236842105263158e-05,
"loss": 0.0002,
"step": 175
},
{
"epoch": 0.3801295896328294,
"grad_norm": 0.013773689046502113,
"learning_rate": 1.2707368421052633e-05,
"loss": 0.0003,
"step": 176
},
{
"epoch": 0.38228941684665224,
"grad_norm": 0.003146266331896186,
"learning_rate": 1.2177894736842107e-05,
"loss": 0.0002,
"step": 177
},
{
"epoch": 0.38444924406047515,
"grad_norm": 0.030116969719529152,
"learning_rate": 1.1648421052631579e-05,
"loss": 0.0004,
"step": 178
},
{
"epoch": 0.38660907127429806,
"grad_norm": 0.00527324341237545,
"learning_rate": 1.1118947368421053e-05,
"loss": 0.0002,
"step": 179
},
{
"epoch": 0.38876889848812096,
"grad_norm": 0.0014988424954935908,
"learning_rate": 1.0589473684210526e-05,
"loss": 0.0001,
"step": 180
},
{
"epoch": 0.39092872570194387,
"grad_norm": 0.0027672979049384594,
"learning_rate": 1.0060000000000002e-05,
"loss": 0.0001,
"step": 181
},
{
"epoch": 0.3930885529157667,
"grad_norm": 0.02873503603041172,
"learning_rate": 9.530526315789474e-06,
"loss": 0.0002,
"step": 182
},
{
"epoch": 0.3952483801295896,
"grad_norm": 0.004887609276920557,
"learning_rate": 9.001052631578948e-06,
"loss": 0.0002,
"step": 183
},
{
"epoch": 0.39740820734341253,
"grad_norm": 0.02098269574344158,
"learning_rate": 8.471578947368422e-06,
"loss": 0.0003,
"step": 184
},
{
"epoch": 0.39956803455723544,
"grad_norm": 0.015324135310947895,
"learning_rate": 7.942105263157895e-06,
"loss": 0.0004,
"step": 185
},
{
"epoch": 0.4017278617710583,
"grad_norm": 0.009064553305506706,
"learning_rate": 7.412631578947368e-06,
"loss": 0.0002,
"step": 186
},
{
"epoch": 0.4038876889848812,
"grad_norm": 0.3744548559188843,
"learning_rate": 6.883157894736843e-06,
"loss": 0.0086,
"step": 187
},
{
"epoch": 0.4060475161987041,
"grad_norm": 0.0015008350601419806,
"learning_rate": 6.3536842105263166e-06,
"loss": 0.0001,
"step": 188
},
{
"epoch": 0.408207343412527,
"grad_norm": 0.001427732640877366,
"learning_rate": 5.8242105263157895e-06,
"loss": 0.0001,
"step": 189
},
{
"epoch": 0.4103671706263499,
"grad_norm": 0.003146109404042363,
"learning_rate": 5.294736842105263e-06,
"loss": 0.0001,
"step": 190
},
{
"epoch": 0.41252699784017277,
"grad_norm": 0.003537424374371767,
"learning_rate": 4.765263157894737e-06,
"loss": 0.0002,
"step": 191
},
{
"epoch": 0.4146868250539957,
"grad_norm": 0.04009627550840378,
"learning_rate": 4.235789473684211e-06,
"loss": 0.0011,
"step": 192
},
{
"epoch": 0.4168466522678186,
"grad_norm": 0.0008157025440596044,
"learning_rate": 3.706315789473684e-06,
"loss": 0.0001,
"step": 193
},
{
"epoch": 0.4190064794816415,
"grad_norm": 0.004071658011525869,
"learning_rate": 3.1768421052631583e-06,
"loss": 0.0004,
"step": 194
},
{
"epoch": 0.42116630669546434,
"grad_norm": 0.002962104743346572,
"learning_rate": 2.6473684210526316e-06,
"loss": 0.0001,
"step": 195
},
{
"epoch": 0.42332613390928725,
"grad_norm": 0.5851313471794128,
"learning_rate": 2.1178947368421054e-06,
"loss": 0.0009,
"step": 196
},
{
"epoch": 0.42548596112311016,
"grad_norm": 0.13741615414619446,
"learning_rate": 1.5884210526315791e-06,
"loss": 0.0006,
"step": 197
},
{
"epoch": 0.42764578833693306,
"grad_norm": 0.007148873992264271,
"learning_rate": 1.0589473684210527e-06,
"loss": 0.0002,
"step": 198
},
{
"epoch": 0.4298056155507559,
"grad_norm": 0.0032815311569720507,
"learning_rate": 5.294736842105263e-07,
"loss": 0.0001,
"step": 199
},
{
"epoch": 0.4319654427645788,
"grad_norm": 0.005409948993474245,
"learning_rate": 0.0,
"loss": 0.0003,
"step": 200
},
{
"epoch": 0.4319654427645788,
"eval_loss": 0.0005457898951135576,
"eval_runtime": 38.597,
"eval_samples_per_second": 5.052,
"eval_steps_per_second": 1.27,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.880763940785357e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}