alchemist69's picture
Training in progress, step 169, checkpoint
7138b9c verified
{
"best_metric": 11.5,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 1.0,
"eval_steps": 150,
"global_step": 169,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005917159763313609,
"grad_norm": 3.417472544242628e-05,
"learning_rate": 5e-06,
"loss": 46.0,
"step": 1
},
{
"epoch": 0.005917159763313609,
"eval_loss": 11.5,
"eval_runtime": 1.4564,
"eval_samples_per_second": 195.691,
"eval_steps_per_second": 49.438,
"step": 1
},
{
"epoch": 0.011834319526627219,
"grad_norm": 3.082885086769238e-05,
"learning_rate": 1e-05,
"loss": 46.0,
"step": 2
},
{
"epoch": 0.01775147928994083,
"grad_norm": 1.8164773791795596e-05,
"learning_rate": 1.5e-05,
"loss": 46.0,
"step": 3
},
{
"epoch": 0.023668639053254437,
"grad_norm": 2.2182997781783342e-05,
"learning_rate": 2e-05,
"loss": 46.0,
"step": 4
},
{
"epoch": 0.029585798816568046,
"grad_norm": 3.892195672960952e-05,
"learning_rate": 2.5e-05,
"loss": 46.0,
"step": 5
},
{
"epoch": 0.03550295857988166,
"grad_norm": 3.778233440243639e-05,
"learning_rate": 3e-05,
"loss": 46.0,
"step": 6
},
{
"epoch": 0.04142011834319527,
"grad_norm": 3.2646468753227964e-05,
"learning_rate": 3.5e-05,
"loss": 46.0,
"step": 7
},
{
"epoch": 0.047337278106508875,
"grad_norm": 2.2486128727905452e-05,
"learning_rate": 4e-05,
"loss": 46.0,
"step": 8
},
{
"epoch": 0.05325443786982249,
"grad_norm": 4.4991043978370726e-05,
"learning_rate": 4.5e-05,
"loss": 46.0,
"step": 9
},
{
"epoch": 0.05917159763313609,
"grad_norm": 3.6158784496365115e-05,
"learning_rate": 5e-05,
"loss": 46.0,
"step": 10
},
{
"epoch": 0.0650887573964497,
"grad_norm": 3.0189292374416254e-05,
"learning_rate": 5.500000000000001e-05,
"loss": 46.0,
"step": 11
},
{
"epoch": 0.07100591715976332,
"grad_norm": 2.9184000595705584e-05,
"learning_rate": 6e-05,
"loss": 46.0,
"step": 12
},
{
"epoch": 0.07692307692307693,
"grad_norm": 2.1351072064135224e-05,
"learning_rate": 6.500000000000001e-05,
"loss": 46.0,
"step": 13
},
{
"epoch": 0.08284023668639054,
"grad_norm": 3.9580500015290454e-05,
"learning_rate": 7e-05,
"loss": 46.0,
"step": 14
},
{
"epoch": 0.08875739644970414,
"grad_norm": 3.6071560316486284e-05,
"learning_rate": 7.500000000000001e-05,
"loss": 46.0,
"step": 15
},
{
"epoch": 0.09467455621301775,
"grad_norm": 2.757324546109885e-05,
"learning_rate": 8e-05,
"loss": 46.0,
"step": 16
},
{
"epoch": 0.10059171597633136,
"grad_norm": 4.150742097408511e-05,
"learning_rate": 8.5e-05,
"loss": 46.0,
"step": 17
},
{
"epoch": 0.10650887573964497,
"grad_norm": 3.0252529541030526e-05,
"learning_rate": 9e-05,
"loss": 46.0,
"step": 18
},
{
"epoch": 0.11242603550295859,
"grad_norm": 4.47008824266959e-05,
"learning_rate": 9.5e-05,
"loss": 46.0,
"step": 19
},
{
"epoch": 0.11834319526627218,
"grad_norm": 4.676431854022667e-05,
"learning_rate": 0.0001,
"loss": 46.0,
"step": 20
},
{
"epoch": 0.1242603550295858,
"grad_norm": 3.7658752262359485e-05,
"learning_rate": 9.99888864929809e-05,
"loss": 46.0,
"step": 21
},
{
"epoch": 0.1301775147928994,
"grad_norm": 5.5233460443560034e-05,
"learning_rate": 9.995555091232516e-05,
"loss": 46.0,
"step": 22
},
{
"epoch": 0.13609467455621302,
"grad_norm": 5.739947300753556e-05,
"learning_rate": 9.990000807704114e-05,
"loss": 46.0,
"step": 23
},
{
"epoch": 0.14201183431952663,
"grad_norm": 4.8324771341867745e-05,
"learning_rate": 9.982228267815643e-05,
"loss": 46.0,
"step": 24
},
{
"epoch": 0.14792899408284024,
"grad_norm": 4.9891212256625295e-05,
"learning_rate": 9.972240926774168e-05,
"loss": 46.0,
"step": 25
},
{
"epoch": 0.15384615384615385,
"grad_norm": 3.683907198137604e-05,
"learning_rate": 9.96004322435508e-05,
"loss": 46.0,
"step": 26
},
{
"epoch": 0.15976331360946747,
"grad_norm": 5.67148053960409e-05,
"learning_rate": 9.945640582928437e-05,
"loss": 46.0,
"step": 27
},
{
"epoch": 0.16568047337278108,
"grad_norm": 6.611295975744724e-05,
"learning_rate": 9.929039405048501e-05,
"loss": 46.0,
"step": 28
},
{
"epoch": 0.17159763313609466,
"grad_norm": 4.908446135232225e-05,
"learning_rate": 9.910247070607552e-05,
"loss": 46.0,
"step": 29
},
{
"epoch": 0.17751479289940827,
"grad_norm": 6.706344720441848e-05,
"learning_rate": 9.889271933555213e-05,
"loss": 46.0,
"step": 30
},
{
"epoch": 0.1834319526627219,
"grad_norm": 8.02269860287197e-05,
"learning_rate": 9.866123318184803e-05,
"loss": 46.0,
"step": 31
},
{
"epoch": 0.1893491124260355,
"grad_norm": 6.0282156482571736e-05,
"learning_rate": 9.840811514988294e-05,
"loss": 46.0,
"step": 32
},
{
"epoch": 0.1952662721893491,
"grad_norm": 7.383022602880374e-05,
"learning_rate": 9.813347776081789e-05,
"loss": 46.0,
"step": 33
},
{
"epoch": 0.20118343195266272,
"grad_norm": 6.196251342771575e-05,
"learning_rate": 9.783744310203491e-05,
"loss": 46.0,
"step": 34
},
{
"epoch": 0.20710059171597633,
"grad_norm": 8.574208186473697e-05,
"learning_rate": 9.752014277286432e-05,
"loss": 46.0,
"step": 35
},
{
"epoch": 0.21301775147928995,
"grad_norm": 0.00013027463865000755,
"learning_rate": 9.718171782608356e-05,
"loss": 46.0,
"step": 36
},
{
"epoch": 0.21893491124260356,
"grad_norm": 0.00014059020031709224,
"learning_rate": 9.682231870521347e-05,
"loss": 46.0,
"step": 37
},
{
"epoch": 0.22485207100591717,
"grad_norm": 9.982455958379433e-05,
"learning_rate": 9.644210517764014e-05,
"loss": 46.0,
"step": 38
},
{
"epoch": 0.23076923076923078,
"grad_norm": 8.106425229925662e-05,
"learning_rate": 9.60412462635919e-05,
"loss": 46.0,
"step": 39
},
{
"epoch": 0.23668639053254437,
"grad_norm": 7.230762275867164e-05,
"learning_rate": 9.561992016100293e-05,
"loss": 46.0,
"step": 40
},
{
"epoch": 0.24260355029585798,
"grad_norm": 7.070993888191879e-05,
"learning_rate": 9.517831416629716e-05,
"loss": 46.0,
"step": 41
},
{
"epoch": 0.2485207100591716,
"grad_norm": 0.0001705414615571499,
"learning_rate": 9.471662459112747e-05,
"loss": 46.0,
"step": 42
},
{
"epoch": 0.25443786982248523,
"grad_norm": 0.0001224745938088745,
"learning_rate": 9.423505667510724e-05,
"loss": 46.0,
"step": 43
},
{
"epoch": 0.2603550295857988,
"grad_norm": 7.8432189184241e-05,
"learning_rate": 9.373382449457304e-05,
"loss": 46.0,
"step": 44
},
{
"epoch": 0.26627218934911245,
"grad_norm": 6.436308467527851e-05,
"learning_rate": 9.321315086741916e-05,
"loss": 46.0,
"step": 45
},
{
"epoch": 0.27218934911242604,
"grad_norm": 9.919815784087405e-05,
"learning_rate": 9.267326725404599e-05,
"loss": 46.0,
"step": 46
},
{
"epoch": 0.2781065088757396,
"grad_norm": 7.329176878556609e-05,
"learning_rate": 9.21144136544666e-05,
"loss": 46.0,
"step": 47
},
{
"epoch": 0.28402366863905326,
"grad_norm": 0.00013015043805353343,
"learning_rate": 9.153683850161706e-05,
"loss": 46.0,
"step": 48
},
{
"epoch": 0.28994082840236685,
"grad_norm": 0.000146089427289553,
"learning_rate": 9.094079855091797e-05,
"loss": 46.0,
"step": 49
},
{
"epoch": 0.2958579881656805,
"grad_norm": 0.00011953832290600985,
"learning_rate": 9.032655876613636e-05,
"loss": 46.0,
"step": 50
},
{
"epoch": 0.30177514792899407,
"grad_norm": 0.0001868321414804086,
"learning_rate": 8.96943922015986e-05,
"loss": 46.0,
"step": 51
},
{
"epoch": 0.3076923076923077,
"grad_norm": 9.340223914477974e-05,
"learning_rate": 8.904457988080681e-05,
"loss": 46.0,
"step": 52
},
{
"epoch": 0.3136094674556213,
"grad_norm": 0.00012810462794732302,
"learning_rate": 8.83774106715125e-05,
"loss": 46.0,
"step": 53
},
{
"epoch": 0.31952662721893493,
"grad_norm": 0.0001306675694650039,
"learning_rate": 8.76931811573033e-05,
"loss": 46.0,
"step": 54
},
{
"epoch": 0.3254437869822485,
"grad_norm": 0.00019835562852676958,
"learning_rate": 8.699219550575953e-05,
"loss": 46.0,
"step": 55
},
{
"epoch": 0.33136094674556216,
"grad_norm": 0.00010900765482801944,
"learning_rate": 8.627476533323957e-05,
"loss": 46.0,
"step": 56
},
{
"epoch": 0.33727810650887574,
"grad_norm": 0.00016158001380972564,
"learning_rate": 8.554120956635375e-05,
"loss": 46.0,
"step": 57
},
{
"epoch": 0.3431952662721893,
"grad_norm": 0.00012027497723465785,
"learning_rate": 8.479185430018858e-05,
"loss": 46.0,
"step": 58
},
{
"epoch": 0.34911242603550297,
"grad_norm": 0.00011706927034538239,
"learning_rate": 8.402703265334455e-05,
"loss": 46.0,
"step": 59
},
{
"epoch": 0.35502958579881655,
"grad_norm": 0.00020923654665239155,
"learning_rate": 8.324708461985124e-05,
"loss": 46.0,
"step": 60
},
{
"epoch": 0.3609467455621302,
"grad_norm": 0.00013141462113708258,
"learning_rate": 8.245235691802644e-05,
"loss": 46.0,
"step": 61
},
{
"epoch": 0.3668639053254438,
"grad_norm": 0.0001231282512890175,
"learning_rate": 8.164320283634585e-05,
"loss": 46.0,
"step": 62
},
{
"epoch": 0.3727810650887574,
"grad_norm": 0.00014354191080201417,
"learning_rate": 8.081998207639212e-05,
"loss": 46.0,
"step": 63
},
{
"epoch": 0.378698224852071,
"grad_norm": 0.00018254865426570177,
"learning_rate": 7.998306059295301e-05,
"loss": 46.0,
"step": 64
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.00015154361608438194,
"learning_rate": 7.913281043133978e-05,
"loss": 46.0,
"step": 65
},
{
"epoch": 0.3905325443786982,
"grad_norm": 0.00014137514517642558,
"learning_rate": 7.826960956199794e-05,
"loss": 46.0,
"step": 66
},
{
"epoch": 0.39644970414201186,
"grad_norm": 0.000186969613423571,
"learning_rate": 7.739384171248435e-05,
"loss": 46.0,
"step": 67
},
{
"epoch": 0.40236686390532544,
"grad_norm": 0.00019048571994062513,
"learning_rate": 7.650589619688469e-05,
"loss": 46.0,
"step": 68
},
{
"epoch": 0.40828402366863903,
"grad_norm": 0.000217212233110331,
"learning_rate": 7.560616774274775e-05,
"loss": 46.0,
"step": 69
},
{
"epoch": 0.41420118343195267,
"grad_norm": 0.00014128351176623255,
"learning_rate": 7.469505631561317e-05,
"loss": 46.0,
"step": 70
},
{
"epoch": 0.42011834319526625,
"grad_norm": 0.000143827244755812,
"learning_rate": 7.377296694121058e-05,
"loss": 46.0,
"step": 71
},
{
"epoch": 0.4260355029585799,
"grad_norm": 0.00021143256162758917,
"learning_rate": 7.284030952540937e-05,
"loss": 46.0,
"step": 72
},
{
"epoch": 0.4319526627218935,
"grad_norm": 0.0002473762142471969,
"learning_rate": 7.189749867199899e-05,
"loss": 46.0,
"step": 73
},
{
"epoch": 0.4378698224852071,
"grad_norm": 0.00019267069001216441,
"learning_rate": 7.094495349838092e-05,
"loss": 46.0,
"step": 74
},
{
"epoch": 0.4437869822485207,
"grad_norm": 0.0002110818022629246,
"learning_rate": 6.998309744925411e-05,
"loss": 46.0,
"step": 75
},
{
"epoch": 0.44970414201183434,
"grad_norm": 0.00015903066378086805,
"learning_rate": 6.901235810837669e-05,
"loss": 46.0,
"step": 76
},
{
"epoch": 0.4556213017751479,
"grad_norm": 0.0001764715852914378,
"learning_rate": 6.803316700848779e-05,
"loss": 46.0,
"step": 77
},
{
"epoch": 0.46153846153846156,
"grad_norm": 0.00025015155551955104,
"learning_rate": 6.704595943947385e-05,
"loss": 46.0,
"step": 78
},
{
"epoch": 0.46745562130177515,
"grad_norm": 0.00027083579334430397,
"learning_rate": 6.605117425486482e-05,
"loss": 46.0,
"step": 79
},
{
"epoch": 0.47337278106508873,
"grad_norm": 0.00024672580184414983,
"learning_rate": 6.504925367674594e-05,
"loss": 46.0,
"step": 80
},
{
"epoch": 0.47928994082840237,
"grad_norm": 0.0003929804952349514,
"learning_rate": 6.404064309917231e-05,
"loss": 46.0,
"step": 81
},
{
"epoch": 0.48520710059171596,
"grad_norm": 0.0004307347990106791,
"learning_rate": 6.302579089017327e-05,
"loss": 46.0,
"step": 82
},
{
"epoch": 0.4911242603550296,
"grad_norm": 0.00020378813496790826,
"learning_rate": 6.200514819243476e-05,
"loss": 46.0,
"step": 83
},
{
"epoch": 0.4970414201183432,
"grad_norm": 0.00033084748429246247,
"learning_rate": 6.097916872274815e-05,
"loss": 46.0,
"step": 84
},
{
"epoch": 0.5029585798816568,
"grad_norm": 0.0002085510641336441,
"learning_rate": 5.994830857031499e-05,
"loss": 46.0,
"step": 85
},
{
"epoch": 0.5088757396449705,
"grad_norm": 0.00018850974447559565,
"learning_rate": 5.891302599399685e-05,
"loss": 46.0,
"step": 86
},
{
"epoch": 0.514792899408284,
"grad_norm": 0.00025538477348163724,
"learning_rate": 5.78737812186009e-05,
"loss": 46.0,
"step": 87
},
{
"epoch": 0.5207100591715976,
"grad_norm": 0.00026542809791862965,
"learning_rate": 5.683103623029135e-05,
"loss": 46.0,
"step": 88
},
{
"epoch": 0.5266272189349113,
"grad_norm": 0.00019196125504095107,
"learning_rate": 5.578525457121807e-05,
"loss": 46.0,
"step": 89
},
{
"epoch": 0.5325443786982249,
"grad_norm": 0.00026723742485046387,
"learning_rate": 5.473690113345342e-05,
"loss": 46.0,
"step": 90
},
{
"epoch": 0.5384615384615384,
"grad_norm": 0.0002167681377613917,
"learning_rate": 5.368644195232896e-05,
"loss": 46.0,
"step": 91
},
{
"epoch": 0.5443786982248521,
"grad_norm": 0.00020223991305101663,
"learning_rate": 5.263434399926398e-05,
"loss": 46.0,
"step": 92
},
{
"epoch": 0.5502958579881657,
"grad_norm": 0.00031044651404954493,
"learning_rate": 5.158107497417795e-05,
"loss": 46.0,
"step": 93
},
{
"epoch": 0.5562130177514792,
"grad_norm": 0.00022696513042319566,
"learning_rate": 5.052710309757899e-05,
"loss": 46.0,
"step": 94
},
{
"epoch": 0.5621301775147929,
"grad_norm": 0.0003360291011631489,
"learning_rate": 4.947289690242102e-05,
"loss": 46.0,
"step": 95
},
{
"epoch": 0.5680473372781065,
"grad_norm": 0.0002260725013911724,
"learning_rate": 4.841892502582206e-05,
"loss": 46.0,
"step": 96
},
{
"epoch": 0.5739644970414202,
"grad_norm": 0.0002723989891819656,
"learning_rate": 4.736565600073602e-05,
"loss": 46.0,
"step": 97
},
{
"epoch": 0.5798816568047337,
"grad_norm": 0.00029556764638982713,
"learning_rate": 4.631355804767105e-05,
"loss": 46.0,
"step": 98
},
{
"epoch": 0.5857988165680473,
"grad_norm": 0.0002435845381114632,
"learning_rate": 4.5263098866546586e-05,
"loss": 46.0,
"step": 99
},
{
"epoch": 0.591715976331361,
"grad_norm": 0.0002843434049282223,
"learning_rate": 4.421474542878195e-05,
"loss": 46.0,
"step": 100
},
{
"epoch": 0.5976331360946746,
"grad_norm": 0.0002432662295177579,
"learning_rate": 4.316896376970866e-05,
"loss": 46.0,
"step": 101
},
{
"epoch": 0.6035502958579881,
"grad_norm": 0.0004484949167817831,
"learning_rate": 4.212621878139912e-05,
"loss": 46.0,
"step": 102
},
{
"epoch": 0.6094674556213018,
"grad_norm": 0.00032589331385679543,
"learning_rate": 4.108697400600316e-05,
"loss": 46.0,
"step": 103
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.0002852332836482674,
"learning_rate": 4.005169142968503e-05,
"loss": 46.0,
"step": 104
},
{
"epoch": 0.621301775147929,
"grad_norm": 0.0004301300796214491,
"learning_rate": 3.9020831277251863e-05,
"loss": 46.0,
"step": 105
},
{
"epoch": 0.6272189349112426,
"grad_norm": 0.0003021268348675221,
"learning_rate": 3.7994851807565254e-05,
"loss": 46.0,
"step": 106
},
{
"epoch": 0.6331360946745562,
"grad_norm": 0.0005913956556469202,
"learning_rate": 3.6974209109826726e-05,
"loss": 46.0,
"step": 107
},
{
"epoch": 0.6390532544378699,
"grad_norm": 0.0003400585555937141,
"learning_rate": 3.595935690082769e-05,
"loss": 46.0,
"step": 108
},
{
"epoch": 0.6449704142011834,
"grad_norm": 0.00034251168835908175,
"learning_rate": 3.495074632325407e-05,
"loss": 46.0,
"step": 109
},
{
"epoch": 0.650887573964497,
"grad_norm": 0.0003791009949054569,
"learning_rate": 3.394882574513519e-05,
"loss": 46.0,
"step": 110
},
{
"epoch": 0.6568047337278107,
"grad_norm": 0.00034353407681919634,
"learning_rate": 3.295404056052616e-05,
"loss": 46.0,
"step": 111
},
{
"epoch": 0.6627218934911243,
"grad_norm": 0.00026513394550420344,
"learning_rate": 3.196683299151223e-05,
"loss": 46.0,
"step": 112
},
{
"epoch": 0.6686390532544378,
"grad_norm": 0.00021634704899042845,
"learning_rate": 3.098764189162332e-05,
"loss": 46.0,
"step": 113
},
{
"epoch": 0.6745562130177515,
"grad_norm": 0.0004164211277384311,
"learning_rate": 3.0016902550745897e-05,
"loss": 46.0,
"step": 114
},
{
"epoch": 0.6804733727810651,
"grad_norm": 0.00032345083309337497,
"learning_rate": 2.905504650161909e-05,
"loss": 46.0,
"step": 115
},
{
"epoch": 0.6863905325443787,
"grad_norm": 0.00039014124195091426,
"learning_rate": 2.810250132800103e-05,
"loss": 46.0,
"step": 116
},
{
"epoch": 0.6923076923076923,
"grad_norm": 0.00032619069679640234,
"learning_rate": 2.715969047459066e-05,
"loss": 46.0,
"step": 117
},
{
"epoch": 0.6982248520710059,
"grad_norm": 0.0004408117674756795,
"learning_rate": 2.6227033058789408e-05,
"loss": 46.0,
"step": 118
},
{
"epoch": 0.7041420118343196,
"grad_norm": 0.0005531953065656126,
"learning_rate": 2.530494368438683e-05,
"loss": 46.0,
"step": 119
},
{
"epoch": 0.7100591715976331,
"grad_norm": 0.00048261991469189525,
"learning_rate": 2.4393832257252252e-05,
"loss": 46.0,
"step": 120
},
{
"epoch": 0.7159763313609467,
"grad_norm": 0.00041955066262744367,
"learning_rate": 2.349410380311532e-05,
"loss": 46.0,
"step": 121
},
{
"epoch": 0.7218934911242604,
"grad_norm": 0.0004092879535164684,
"learning_rate": 2.260615828751566e-05,
"loss": 46.0,
"step": 122
},
{
"epoch": 0.727810650887574,
"grad_norm": 0.0005536783719435334,
"learning_rate": 2.173039043800206e-05,
"loss": 46.0,
"step": 123
},
{
"epoch": 0.7337278106508875,
"grad_norm": 0.00047029706183820963,
"learning_rate": 2.086718956866024e-05,
"loss": 46.0,
"step": 124
},
{
"epoch": 0.7396449704142012,
"grad_norm": 0.00032897520577535033,
"learning_rate": 2.0016939407046987e-05,
"loss": 46.0,
"step": 125
},
{
"epoch": 0.7455621301775148,
"grad_norm": 0.00024629110703244805,
"learning_rate": 1.9180017923607886e-05,
"loss": 46.0,
"step": 126
},
{
"epoch": 0.7514792899408284,
"grad_norm": 0.00034898053854703903,
"learning_rate": 1.835679716365417e-05,
"loss": 46.0,
"step": 127
},
{
"epoch": 0.757396449704142,
"grad_norm": 0.00032217547413893044,
"learning_rate": 1.754764308197358e-05,
"loss": 46.0,
"step": 128
},
{
"epoch": 0.7633136094674556,
"grad_norm": 0.00029810157138854265,
"learning_rate": 1.675291538014877e-05,
"loss": 46.0,
"step": 129
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.000341045088134706,
"learning_rate": 1.5972967346655448e-05,
"loss": 46.0,
"step": 130
},
{
"epoch": 0.7751479289940828,
"grad_norm": 0.0003495199780445546,
"learning_rate": 1.5208145699811415e-05,
"loss": 46.0,
"step": 131
},
{
"epoch": 0.7810650887573964,
"grad_norm": 0.0003159924817737192,
"learning_rate": 1.4458790433646263e-05,
"loss": 46.0,
"step": 132
},
{
"epoch": 0.7869822485207101,
"grad_norm": 0.0002985662722494453,
"learning_rate": 1.3725234666760428e-05,
"loss": 46.0,
"step": 133
},
{
"epoch": 0.7928994082840237,
"grad_norm": 0.0003588286635931581,
"learning_rate": 1.3007804494240478e-05,
"loss": 46.0,
"step": 134
},
{
"epoch": 0.7988165680473372,
"grad_norm": 0.00040958283352665603,
"learning_rate": 1.2306818842696716e-05,
"loss": 46.0,
"step": 135
},
{
"epoch": 0.8047337278106509,
"grad_norm": 0.00039786347770132124,
"learning_rate": 1.1622589328487504e-05,
"loss": 46.0,
"step": 136
},
{
"epoch": 0.8106508875739645,
"grad_norm": 0.00025457568699494004,
"learning_rate": 1.0955420119193199e-05,
"loss": 46.0,
"step": 137
},
{
"epoch": 0.8165680473372781,
"grad_norm": 0.000526306452229619,
"learning_rate": 1.03056077984014e-05,
"loss": 46.0,
"step": 138
},
{
"epoch": 0.8224852071005917,
"grad_norm": 0.000289287039777264,
"learning_rate": 9.673441233863662e-06,
"loss": 46.0,
"step": 139
},
{
"epoch": 0.8284023668639053,
"grad_norm": 0.0002234600979136303,
"learning_rate": 9.059201449082045e-06,
"loss": 46.0,
"step": 140
},
{
"epoch": 0.834319526627219,
"grad_norm": 0.00036921919672749937,
"learning_rate": 8.463161498382948e-06,
"loss": 46.0,
"step": 141
},
{
"epoch": 0.8402366863905325,
"grad_norm": 0.00024386531731579453,
"learning_rate": 7.885586345533397e-06,
"loss": 46.0,
"step": 142
},
{
"epoch": 0.8461538461538461,
"grad_norm": 0.0003299729141872376,
"learning_rate": 7.3267327459540015e-06,
"loss": 46.0,
"step": 143
},
{
"epoch": 0.8520710059171598,
"grad_norm": 0.00021204911172389984,
"learning_rate": 6.786849132580842e-06,
"loss": 46.0,
"step": 144
},
{
"epoch": 0.8579881656804734,
"grad_norm": 0.00035832199500873685,
"learning_rate": 6.266175505426958e-06,
"loss": 46.0,
"step": 145
},
{
"epoch": 0.863905325443787,
"grad_norm": 0.0005066677113063633,
"learning_rate": 5.76494332489278e-06,
"loss": 46.0,
"step": 146
},
{
"epoch": 0.8698224852071006,
"grad_norm": 0.0002925771113950759,
"learning_rate": 5.283375408872537e-06,
"loss": 46.0,
"step": 147
},
{
"epoch": 0.8757396449704142,
"grad_norm": 0.00039165234193205833,
"learning_rate": 4.821685833702849e-06,
"loss": 46.0,
"step": 148
},
{
"epoch": 0.8816568047337278,
"grad_norm": 0.0003404826857149601,
"learning_rate": 4.380079838997086e-06,
"loss": 46.0,
"step": 149
},
{
"epoch": 0.8875739644970414,
"grad_norm": 0.0005336723406799138,
"learning_rate": 3.958753736408105e-06,
"loss": 46.0,
"step": 150
},
{
"epoch": 0.8875739644970414,
"eval_loss": 11.5,
"eval_runtime": 1.4273,
"eval_samples_per_second": 199.677,
"eval_steps_per_second": 50.445,
"step": 150
},
{
"epoch": 0.893491124260355,
"grad_norm": 0.0005075408262200654,
"learning_rate": 3.557894822359864e-06,
"loss": 46.0,
"step": 151
},
{
"epoch": 0.8994082840236687,
"grad_norm": 0.00039445646689273417,
"learning_rate": 3.1776812947865385e-06,
"loss": 46.0,
"step": 152
},
{
"epoch": 0.9053254437869822,
"grad_norm": 0.00035393863799981773,
"learning_rate": 2.8182821739164534e-06,
"loss": 46.0,
"step": 153
},
{
"epoch": 0.9112426035502958,
"grad_norm": 0.0005202327738516033,
"learning_rate": 2.4798572271356846e-06,
"loss": 46.0,
"step": 154
},
{
"epoch": 0.9171597633136095,
"grad_norm": 0.0002673721464816481,
"learning_rate": 2.1625568979651014e-06,
"loss": 46.0,
"step": 155
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.0005175816477276385,
"learning_rate": 1.8665222391821169e-06,
"loss": 46.0,
"step": 156
},
{
"epoch": 0.9289940828402367,
"grad_norm": 0.0004950053407810628,
"learning_rate": 1.5918848501170647e-06,
"loss": 46.0,
"step": 157
},
{
"epoch": 0.9349112426035503,
"grad_norm": 0.0003163626533932984,
"learning_rate": 1.338766818151982e-06,
"loss": 46.0,
"step": 158
},
{
"epoch": 0.9408284023668639,
"grad_norm": 0.0004616921942215413,
"learning_rate": 1.1072806644478739e-06,
"loss": 46.0,
"step": 159
},
{
"epoch": 0.9467455621301775,
"grad_norm": 0.0003856797411572188,
"learning_rate": 8.975292939244928e-07,
"loss": 46.0,
"step": 160
},
{
"epoch": 0.9526627218934911,
"grad_norm": 0.0004804205091204494,
"learning_rate": 7.096059495149854e-07,
"loss": 46.0,
"step": 161
},
{
"epoch": 0.9585798816568047,
"grad_norm": 0.0006341671105474234,
"learning_rate": 5.435941707156389e-07,
"loss": 46.0,
"step": 162
},
{
"epoch": 0.9644970414201184,
"grad_norm": 0.0005707453237846494,
"learning_rate": 3.9956775644920395e-07,
"loss": 46.0,
"step": 163
},
{
"epoch": 0.9704142011834319,
"grad_norm": 0.0003669565194286406,
"learning_rate": 2.77590732258326e-07,
"loss": 46.0,
"step": 164
},
{
"epoch": 0.9763313609467456,
"grad_norm": 0.0006033536046743393,
"learning_rate": 1.7771732184357904e-07,
"loss": 46.0,
"step": 165
},
{
"epoch": 0.9822485207100592,
"grad_norm": 0.00046527519589290023,
"learning_rate": 9.999192295886972e-08,
"loss": 46.0,
"step": 166
},
{
"epoch": 0.9881656804733728,
"grad_norm": 0.0005233949050307274,
"learning_rate": 4.4449087674847125e-08,
"loss": 46.0,
"step": 167
},
{
"epoch": 0.9940828402366864,
"grad_norm": 0.0007509227143600583,
"learning_rate": 1.111350701909486e-08,
"loss": 46.0,
"step": 168
},
{
"epoch": 1.0,
"grad_norm": 0.00042899660184048116,
"learning_rate": 0.0,
"loss": 46.0,
"step": 169
}
],
"logging_steps": 1,
"max_steps": 169,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 150,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 26545806065664.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}