lesso's picture
Training in progress, step 200, checkpoint
dfff760 verified
{
"best_metric": 11.5,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.007275637527738368,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.637818763869184e-05,
"grad_norm": 2.79417527053738e-05,
"learning_rate": 1.003e-05,
"loss": 23.0,
"step": 1
},
{
"epoch": 3.637818763869184e-05,
"eval_loss": 11.5,
"eval_runtime": 151.3914,
"eval_samples_per_second": 76.457,
"eval_steps_per_second": 19.116,
"step": 1
},
{
"epoch": 7.275637527738368e-05,
"grad_norm": 2.1153544366825372e-05,
"learning_rate": 2.006e-05,
"loss": 23.0,
"step": 2
},
{
"epoch": 0.00010913456291607552,
"grad_norm": 1.7088592358049937e-05,
"learning_rate": 3.0089999999999998e-05,
"loss": 23.0,
"step": 3
},
{
"epoch": 0.00014551275055476736,
"grad_norm": 2.0742239939863794e-05,
"learning_rate": 4.012e-05,
"loss": 23.0,
"step": 4
},
{
"epoch": 0.0001818909381934592,
"grad_norm": 2.039460014202632e-05,
"learning_rate": 5.015e-05,
"loss": 23.0,
"step": 5
},
{
"epoch": 0.00021826912583215103,
"grad_norm": 3.1619365472579375e-05,
"learning_rate": 6.0179999999999996e-05,
"loss": 23.0,
"step": 6
},
{
"epoch": 0.00025464731347084286,
"grad_norm": 2.9644716050825082e-05,
"learning_rate": 7.021e-05,
"loss": 23.0,
"step": 7
},
{
"epoch": 0.0002910255011095347,
"grad_norm": 1.7494063285994343e-05,
"learning_rate": 8.024e-05,
"loss": 23.0,
"step": 8
},
{
"epoch": 0.00032740368874822657,
"grad_norm": 3.234211544622667e-05,
"learning_rate": 9.027e-05,
"loss": 23.0,
"step": 9
},
{
"epoch": 0.0003637818763869184,
"grad_norm": 2.7539184884517454e-05,
"learning_rate": 0.0001003,
"loss": 23.0,
"step": 10
},
{
"epoch": 0.00040016006402561027,
"grad_norm": 1.9417160729062743e-05,
"learning_rate": 9.97721052631579e-05,
"loss": 23.0,
"step": 11
},
{
"epoch": 0.00043653825166430207,
"grad_norm": 2.843265792762395e-05,
"learning_rate": 9.924421052631578e-05,
"loss": 23.0,
"step": 12
},
{
"epoch": 0.0004729164393029939,
"grad_norm": 4.186210935586132e-05,
"learning_rate": 9.871631578947368e-05,
"loss": 23.0,
"step": 13
},
{
"epoch": 0.0005092946269416857,
"grad_norm": 4.8141664592549205e-05,
"learning_rate": 9.818842105263158e-05,
"loss": 23.0,
"step": 14
},
{
"epoch": 0.0005456728145803776,
"grad_norm": 3.392771031940356e-05,
"learning_rate": 9.766052631578948e-05,
"loss": 23.0,
"step": 15
},
{
"epoch": 0.0005820510022190694,
"grad_norm": 2.86590147879906e-05,
"learning_rate": 9.713263157894736e-05,
"loss": 23.0,
"step": 16
},
{
"epoch": 0.0006184291898577613,
"grad_norm": 4.220665869070217e-05,
"learning_rate": 9.660473684210526e-05,
"loss": 23.0,
"step": 17
},
{
"epoch": 0.0006548073774964531,
"grad_norm": 7.459171320078894e-05,
"learning_rate": 9.607684210526316e-05,
"loss": 23.0,
"step": 18
},
{
"epoch": 0.0006911855651351449,
"grad_norm": 6.791079067625105e-05,
"learning_rate": 9.554894736842104e-05,
"loss": 23.0,
"step": 19
},
{
"epoch": 0.0007275637527738368,
"grad_norm": 6.260742520680651e-05,
"learning_rate": 9.502105263157894e-05,
"loss": 23.0,
"step": 20
},
{
"epoch": 0.0007639419404125286,
"grad_norm": 4.905338573735207e-05,
"learning_rate": 9.449315789473684e-05,
"loss": 23.0,
"step": 21
},
{
"epoch": 0.0008003201280512205,
"grad_norm": 3.65267951565329e-05,
"learning_rate": 9.396526315789474e-05,
"loss": 23.0,
"step": 22
},
{
"epoch": 0.0008366983156899123,
"grad_norm": 4.681987775256857e-05,
"learning_rate": 9.343736842105264e-05,
"loss": 23.0,
"step": 23
},
{
"epoch": 0.0008730765033286041,
"grad_norm": 3.273553738836199e-05,
"learning_rate": 9.290947368421052e-05,
"loss": 23.0,
"step": 24
},
{
"epoch": 0.000909454690967296,
"grad_norm": 8.164726023096591e-05,
"learning_rate": 9.238157894736842e-05,
"loss": 23.0,
"step": 25
},
{
"epoch": 0.0009458328786059878,
"grad_norm": 6.306690920609981e-05,
"learning_rate": 9.18536842105263e-05,
"loss": 23.0,
"step": 26
},
{
"epoch": 0.0009822110662446796,
"grad_norm": 7.175507926149294e-05,
"learning_rate": 9.132578947368422e-05,
"loss": 23.0,
"step": 27
},
{
"epoch": 0.0010185892538833714,
"grad_norm": 5.047854210715741e-05,
"learning_rate": 9.07978947368421e-05,
"loss": 23.0,
"step": 28
},
{
"epoch": 0.0010549674415220635,
"grad_norm": 0.00011784725211327896,
"learning_rate": 9.027e-05,
"loss": 23.0,
"step": 29
},
{
"epoch": 0.0010913456291607553,
"grad_norm": 0.00014080016990192235,
"learning_rate": 8.97421052631579e-05,
"loss": 23.0,
"step": 30
},
{
"epoch": 0.001127723816799447,
"grad_norm": 8.020276436582208e-05,
"learning_rate": 8.921421052631578e-05,
"loss": 23.0,
"step": 31
},
{
"epoch": 0.0011641020044381389,
"grad_norm": 5.309961488819681e-05,
"learning_rate": 8.868631578947368e-05,
"loss": 23.0,
"step": 32
},
{
"epoch": 0.0012004801920768306,
"grad_norm": 0.00015320915554184467,
"learning_rate": 8.815842105263157e-05,
"loss": 23.0,
"step": 33
},
{
"epoch": 0.0012368583797155227,
"grad_norm": 8.746675302973017e-05,
"learning_rate": 8.763052631578948e-05,
"loss": 23.0,
"step": 34
},
{
"epoch": 0.0012732365673542145,
"grad_norm": 0.00010071782890008762,
"learning_rate": 8.710263157894737e-05,
"loss": 23.0,
"step": 35
},
{
"epoch": 0.0013096147549929063,
"grad_norm": 0.00013145706907380372,
"learning_rate": 8.657473684210526e-05,
"loss": 23.0,
"step": 36
},
{
"epoch": 0.001345992942631598,
"grad_norm": 0.00013049600238446146,
"learning_rate": 8.604684210526316e-05,
"loss": 23.0,
"step": 37
},
{
"epoch": 0.0013823711302702899,
"grad_norm": 7.936532347230241e-05,
"learning_rate": 8.551894736842105e-05,
"loss": 23.0,
"step": 38
},
{
"epoch": 0.0014187493179089819,
"grad_norm": 9.912149835145101e-05,
"learning_rate": 8.499105263157895e-05,
"loss": 23.0,
"step": 39
},
{
"epoch": 0.0014551275055476737,
"grad_norm": 0.00015672955487389117,
"learning_rate": 8.446315789473683e-05,
"loss": 23.0,
"step": 40
},
{
"epoch": 0.0014915056931863655,
"grad_norm": 0.00016603790572844446,
"learning_rate": 8.393526315789474e-05,
"loss": 23.0,
"step": 41
},
{
"epoch": 0.0015278838808250573,
"grad_norm": 0.00018017899128608406,
"learning_rate": 8.340736842105263e-05,
"loss": 23.0,
"step": 42
},
{
"epoch": 0.001564262068463749,
"grad_norm": 0.00017705631034914404,
"learning_rate": 8.287947368421053e-05,
"loss": 23.0,
"step": 43
},
{
"epoch": 0.001600640256102441,
"grad_norm": 0.0002334263699594885,
"learning_rate": 8.235157894736842e-05,
"loss": 23.0,
"step": 44
},
{
"epoch": 0.0016370184437411329,
"grad_norm": 0.00010936748731182888,
"learning_rate": 8.182368421052631e-05,
"loss": 23.0,
"step": 45
},
{
"epoch": 0.0016733966313798247,
"grad_norm": 0.0001802948536351323,
"learning_rate": 8.129578947368421e-05,
"loss": 23.0,
"step": 46
},
{
"epoch": 0.0017097748190185165,
"grad_norm": 0.00026060600066557527,
"learning_rate": 8.07678947368421e-05,
"loss": 23.0,
"step": 47
},
{
"epoch": 0.0017461530066572083,
"grad_norm": 0.00030714573222212493,
"learning_rate": 8.024e-05,
"loss": 23.0,
"step": 48
},
{
"epoch": 0.0017825311942959,
"grad_norm": 0.00010379219747846946,
"learning_rate": 7.97121052631579e-05,
"loss": 23.0,
"step": 49
},
{
"epoch": 0.001818909381934592,
"grad_norm": 0.0003222045779693872,
"learning_rate": 7.918421052631579e-05,
"loss": 23.0,
"step": 50
},
{
"epoch": 0.001818909381934592,
"eval_loss": 11.5,
"eval_runtime": 151.1621,
"eval_samples_per_second": 76.573,
"eval_steps_per_second": 19.145,
"step": 50
},
{
"epoch": 0.0018552875695732839,
"grad_norm": 7.084694516379386e-05,
"learning_rate": 7.865631578947369e-05,
"loss": 23.0,
"step": 51
},
{
"epoch": 0.0018916657572119757,
"grad_norm": 5.6916571338661015e-05,
"learning_rate": 7.812842105263157e-05,
"loss": 23.0,
"step": 52
},
{
"epoch": 0.0019280439448506675,
"grad_norm": 0.00013821788888890296,
"learning_rate": 7.760052631578947e-05,
"loss": 23.0,
"step": 53
},
{
"epoch": 0.0019644221324893593,
"grad_norm": 0.00010073825251311064,
"learning_rate": 7.707263157894737e-05,
"loss": 23.0,
"step": 54
},
{
"epoch": 0.0020008003201280513,
"grad_norm": 3.989057950093411e-05,
"learning_rate": 7.654473684210527e-05,
"loss": 23.0,
"step": 55
},
{
"epoch": 0.002037178507766743,
"grad_norm": 0.00011672514665406197,
"learning_rate": 7.601684210526316e-05,
"loss": 23.0,
"step": 56
},
{
"epoch": 0.002073556695405435,
"grad_norm": 0.00011147064651595429,
"learning_rate": 7.548894736842105e-05,
"loss": 23.0,
"step": 57
},
{
"epoch": 0.002109934883044127,
"grad_norm": 0.00014741995255462825,
"learning_rate": 7.496105263157895e-05,
"loss": 23.0,
"step": 58
},
{
"epoch": 0.0021463130706828185,
"grad_norm": 0.00019834299746435136,
"learning_rate": 7.443315789473683e-05,
"loss": 23.0,
"step": 59
},
{
"epoch": 0.0021826912583215105,
"grad_norm": 7.386082143057138e-05,
"learning_rate": 7.390526315789473e-05,
"loss": 23.0,
"step": 60
},
{
"epoch": 0.002219069445960202,
"grad_norm": 0.00012008604971924797,
"learning_rate": 7.337736842105263e-05,
"loss": 23.0,
"step": 61
},
{
"epoch": 0.002255447633598894,
"grad_norm": 0.00010102576925419271,
"learning_rate": 7.284947368421053e-05,
"loss": 23.0,
"step": 62
},
{
"epoch": 0.002291825821237586,
"grad_norm": 8.834318578010425e-05,
"learning_rate": 7.232157894736843e-05,
"loss": 23.0,
"step": 63
},
{
"epoch": 0.0023282040088762777,
"grad_norm": 0.00017303289496339858,
"learning_rate": 7.179368421052631e-05,
"loss": 23.0,
"step": 64
},
{
"epoch": 0.0023645821965149697,
"grad_norm": 0.00021577121515292674,
"learning_rate": 7.126578947368421e-05,
"loss": 23.0,
"step": 65
},
{
"epoch": 0.0024009603841536613,
"grad_norm": 0.00012508737563621253,
"learning_rate": 7.07378947368421e-05,
"loss": 23.0,
"step": 66
},
{
"epoch": 0.0024373385717923533,
"grad_norm": 0.00021092577662784606,
"learning_rate": 7.021e-05,
"loss": 23.0,
"step": 67
},
{
"epoch": 0.0024737167594310453,
"grad_norm": 9.400040289619938e-05,
"learning_rate": 6.968210526315789e-05,
"loss": 23.0,
"step": 68
},
{
"epoch": 0.002510094947069737,
"grad_norm": 0.00022102220100350678,
"learning_rate": 6.915421052631579e-05,
"loss": 23.0,
"step": 69
},
{
"epoch": 0.002546473134708429,
"grad_norm": 0.00022005224309396,
"learning_rate": 6.862631578947369e-05,
"loss": 23.0,
"step": 70
},
{
"epoch": 0.0025828513223471205,
"grad_norm": 0.0001787764485925436,
"learning_rate": 6.809842105263157e-05,
"loss": 23.0,
"step": 71
},
{
"epoch": 0.0026192295099858125,
"grad_norm": 0.00012445311585906893,
"learning_rate": 6.757052631578947e-05,
"loss": 23.0,
"step": 72
},
{
"epoch": 0.0026556076976245045,
"grad_norm": 0.00024370021128561348,
"learning_rate": 6.704263157894737e-05,
"loss": 23.0,
"step": 73
},
{
"epoch": 0.002691985885263196,
"grad_norm": 0.00029588048346340656,
"learning_rate": 6.651473684210526e-05,
"loss": 23.0,
"step": 74
},
{
"epoch": 0.002728364072901888,
"grad_norm": 0.00026139113469980657,
"learning_rate": 6.598684210526317e-05,
"loss": 23.0,
"step": 75
},
{
"epoch": 0.0027647422605405797,
"grad_norm": 9.695452899904922e-05,
"learning_rate": 6.545894736842105e-05,
"loss": 23.0,
"step": 76
},
{
"epoch": 0.0028011204481792717,
"grad_norm": 0.00018712144810706377,
"learning_rate": 6.493105263157895e-05,
"loss": 23.0,
"step": 77
},
{
"epoch": 0.0028374986358179637,
"grad_norm": 0.00016666650481056422,
"learning_rate": 6.440315789473684e-05,
"loss": 23.0,
"step": 78
},
{
"epoch": 0.0028738768234566553,
"grad_norm": 0.00029639736749231815,
"learning_rate": 6.387526315789473e-05,
"loss": 23.0,
"step": 79
},
{
"epoch": 0.0029102550110953473,
"grad_norm": 0.00020341298659332097,
"learning_rate": 6.334736842105263e-05,
"loss": 23.0,
"step": 80
},
{
"epoch": 0.002946633198734039,
"grad_norm": 0.0003866904880851507,
"learning_rate": 6.281947368421052e-05,
"loss": 23.0,
"step": 81
},
{
"epoch": 0.002983011386372731,
"grad_norm": 0.0003410225035622716,
"learning_rate": 6.229157894736843e-05,
"loss": 23.0,
"step": 82
},
{
"epoch": 0.003019389574011423,
"grad_norm": 0.0003634164750110358,
"learning_rate": 6.176368421052631e-05,
"loss": 23.0,
"step": 83
},
{
"epoch": 0.0030557677616501145,
"grad_norm": 0.00019395571143832058,
"learning_rate": 6.123578947368421e-05,
"loss": 23.0,
"step": 84
},
{
"epoch": 0.0030921459492888066,
"grad_norm": 0.00014450523303821683,
"learning_rate": 6.0707894736842105e-05,
"loss": 23.0,
"step": 85
},
{
"epoch": 0.003128524136927498,
"grad_norm": 0.0003423929156269878,
"learning_rate": 6.0179999999999996e-05,
"loss": 23.0,
"step": 86
},
{
"epoch": 0.00316490232456619,
"grad_norm": 0.00019756775873247534,
"learning_rate": 5.965210526315789e-05,
"loss": 23.0,
"step": 87
},
{
"epoch": 0.003201280512204882,
"grad_norm": 0.0003216055629309267,
"learning_rate": 5.912421052631578e-05,
"loss": 23.0,
"step": 88
},
{
"epoch": 0.0032376586998435737,
"grad_norm": 0.0005142286536283791,
"learning_rate": 5.8596315789473685e-05,
"loss": 23.0,
"step": 89
},
{
"epoch": 0.0032740368874822658,
"grad_norm": 0.00019845775386784226,
"learning_rate": 5.8068421052631583e-05,
"loss": 23.0,
"step": 90
},
{
"epoch": 0.0033104150751209573,
"grad_norm": 0.0002886882866732776,
"learning_rate": 5.7540526315789475e-05,
"loss": 23.0,
"step": 91
},
{
"epoch": 0.0033467932627596494,
"grad_norm": 0.0003776240337174386,
"learning_rate": 5.701263157894737e-05,
"loss": 23.0,
"step": 92
},
{
"epoch": 0.003383171450398341,
"grad_norm": 0.00038095013587735593,
"learning_rate": 5.648473684210526e-05,
"loss": 23.0,
"step": 93
},
{
"epoch": 0.003419549638037033,
"grad_norm": 0.0003583188517950475,
"learning_rate": 5.595684210526315e-05,
"loss": 23.0,
"step": 94
},
{
"epoch": 0.003455927825675725,
"grad_norm": 0.0004853124264627695,
"learning_rate": 5.5428947368421055e-05,
"loss": 23.0,
"step": 95
},
{
"epoch": 0.0034923060133144166,
"grad_norm": 0.00038572377525269985,
"learning_rate": 5.490105263157895e-05,
"loss": 23.0,
"step": 96
},
{
"epoch": 0.0035286842009531086,
"grad_norm": 0.0003467136702965945,
"learning_rate": 5.4373157894736846e-05,
"loss": 23.0,
"step": 97
},
{
"epoch": 0.0035650623885918,
"grad_norm": 0.0002495538501534611,
"learning_rate": 5.384526315789474e-05,
"loss": 23.0,
"step": 98
},
{
"epoch": 0.003601440576230492,
"grad_norm": 0.0007139640511013567,
"learning_rate": 5.331736842105263e-05,
"loss": 23.0,
"step": 99
},
{
"epoch": 0.003637818763869184,
"grad_norm": 0.0005742310895584524,
"learning_rate": 5.278947368421052e-05,
"loss": 23.0,
"step": 100
},
{
"epoch": 0.003637818763869184,
"eval_loss": 11.5,
"eval_runtime": 151.1117,
"eval_samples_per_second": 76.599,
"eval_steps_per_second": 19.151,
"step": 100
},
{
"epoch": 0.0036741969515078758,
"grad_norm": 7.461366476491094e-05,
"learning_rate": 5.226157894736842e-05,
"loss": 23.0,
"step": 101
},
{
"epoch": 0.0037105751391465678,
"grad_norm": 0.00017520117398817092,
"learning_rate": 5.173368421052632e-05,
"loss": 23.0,
"step": 102
},
{
"epoch": 0.0037469533267852594,
"grad_norm": 0.00017740805924404413,
"learning_rate": 5.1205789473684216e-05,
"loss": 23.0,
"step": 103
},
{
"epoch": 0.0037833315144239514,
"grad_norm": 0.00010646147711668164,
"learning_rate": 5.067789473684211e-05,
"loss": 23.0,
"step": 104
},
{
"epoch": 0.0038197097020626434,
"grad_norm": 0.0001494868629379198,
"learning_rate": 5.015e-05,
"loss": 23.0,
"step": 105
},
{
"epoch": 0.003856087889701335,
"grad_norm": 0.00012090586824342608,
"learning_rate": 4.962210526315789e-05,
"loss": 23.0,
"step": 106
},
{
"epoch": 0.003892466077340027,
"grad_norm": 0.00021911323710810393,
"learning_rate": 4.909421052631579e-05,
"loss": 23.0,
"step": 107
},
{
"epoch": 0.003928844264978719,
"grad_norm": 0.0003465862537268549,
"learning_rate": 4.856631578947368e-05,
"loss": 23.0,
"step": 108
},
{
"epoch": 0.003965222452617411,
"grad_norm": 0.0003293559711892158,
"learning_rate": 4.803842105263158e-05,
"loss": 23.0,
"step": 109
},
{
"epoch": 0.004001600640256103,
"grad_norm": 0.0002986920881085098,
"learning_rate": 4.751052631578947e-05,
"loss": 23.0,
"step": 110
},
{
"epoch": 0.004037978827894795,
"grad_norm": 0.00022487634851131588,
"learning_rate": 4.698263157894737e-05,
"loss": 23.0,
"step": 111
},
{
"epoch": 0.004074357015533486,
"grad_norm": 0.00037544663064181805,
"learning_rate": 4.645473684210526e-05,
"loss": 23.0,
"step": 112
},
{
"epoch": 0.004110735203172178,
"grad_norm": 0.0001820535253500566,
"learning_rate": 4.592684210526315e-05,
"loss": 23.0,
"step": 113
},
{
"epoch": 0.00414711339081087,
"grad_norm": 0.00031552044674754143,
"learning_rate": 4.539894736842105e-05,
"loss": 23.0,
"step": 114
},
{
"epoch": 0.004183491578449562,
"grad_norm": 8.148902998073027e-05,
"learning_rate": 4.487105263157895e-05,
"loss": 23.0,
"step": 115
},
{
"epoch": 0.004219869766088254,
"grad_norm": 0.00017857461352832615,
"learning_rate": 4.434315789473684e-05,
"loss": 23.0,
"step": 116
},
{
"epoch": 0.004256247953726945,
"grad_norm": 0.00020005616534035653,
"learning_rate": 4.381526315789474e-05,
"loss": 23.0,
"step": 117
},
{
"epoch": 0.004292626141365637,
"grad_norm": 0.00036300913779996336,
"learning_rate": 4.328736842105263e-05,
"loss": 23.0,
"step": 118
},
{
"epoch": 0.004329004329004329,
"grad_norm": 0.0003054387343581766,
"learning_rate": 4.2759473684210523e-05,
"loss": 23.0,
"step": 119
},
{
"epoch": 0.004365382516643021,
"grad_norm": 0.0002077149401884526,
"learning_rate": 4.2231578947368415e-05,
"loss": 23.0,
"step": 120
},
{
"epoch": 0.004401760704281713,
"grad_norm": 0.0002281182969454676,
"learning_rate": 4.1703684210526314e-05,
"loss": 23.0,
"step": 121
},
{
"epoch": 0.004438138891920404,
"grad_norm": 0.0005008115549571812,
"learning_rate": 4.117578947368421e-05,
"loss": 23.0,
"step": 122
},
{
"epoch": 0.004474517079559096,
"grad_norm": 0.00035556236980482936,
"learning_rate": 4.0647894736842104e-05,
"loss": 23.0,
"step": 123
},
{
"epoch": 0.004510895267197788,
"grad_norm": 0.0005603492027148604,
"learning_rate": 4.012e-05,
"loss": 23.0,
"step": 124
},
{
"epoch": 0.00454727345483648,
"grad_norm": 0.0004926612018607557,
"learning_rate": 3.9592105263157894e-05,
"loss": 23.0,
"step": 125
},
{
"epoch": 0.004583651642475172,
"grad_norm": 0.0004209111793898046,
"learning_rate": 3.9064210526315785e-05,
"loss": 23.0,
"step": 126
},
{
"epoch": 0.004620029830113863,
"grad_norm": 0.00046484937774948776,
"learning_rate": 3.8536315789473684e-05,
"loss": 23.0,
"step": 127
},
{
"epoch": 0.004656408017752555,
"grad_norm": 0.0005833171890117228,
"learning_rate": 3.800842105263158e-05,
"loss": 23.0,
"step": 128
},
{
"epoch": 0.004692786205391247,
"grad_norm": 0.0005073467036709189,
"learning_rate": 3.7480526315789474e-05,
"loss": 23.0,
"step": 129
},
{
"epoch": 0.0047291643930299394,
"grad_norm": 0.0002727651153691113,
"learning_rate": 3.6952631578947366e-05,
"loss": 23.0,
"step": 130
},
{
"epoch": 0.0047655425806686314,
"grad_norm": 0.000585235480684787,
"learning_rate": 3.6424736842105264e-05,
"loss": 23.0,
"step": 131
},
{
"epoch": 0.004801920768307323,
"grad_norm": 0.0002120441640727222,
"learning_rate": 3.5896842105263156e-05,
"loss": 23.0,
"step": 132
},
{
"epoch": 0.004838298955946015,
"grad_norm": 0.0004325111221987754,
"learning_rate": 3.536894736842105e-05,
"loss": 23.0,
"step": 133
},
{
"epoch": 0.004874677143584707,
"grad_norm": 0.00032356969313696027,
"learning_rate": 3.4841052631578946e-05,
"loss": 23.0,
"step": 134
},
{
"epoch": 0.004911055331223399,
"grad_norm": 0.000456592213595286,
"learning_rate": 3.4313157894736844e-05,
"loss": 23.0,
"step": 135
},
{
"epoch": 0.004947433518862091,
"grad_norm": 0.0007706891628913581,
"learning_rate": 3.3785263157894736e-05,
"loss": 23.0,
"step": 136
},
{
"epoch": 0.004983811706500782,
"grad_norm": 0.000517175649292767,
"learning_rate": 3.325736842105263e-05,
"loss": 23.0,
"step": 137
},
{
"epoch": 0.005020189894139474,
"grad_norm": 0.000617575307842344,
"learning_rate": 3.2729473684210526e-05,
"loss": 23.0,
"step": 138
},
{
"epoch": 0.005056568081778166,
"grad_norm": 0.0002448149025440216,
"learning_rate": 3.220157894736842e-05,
"loss": 23.0,
"step": 139
},
{
"epoch": 0.005092946269416858,
"grad_norm": 0.0004209695034660399,
"learning_rate": 3.1673684210526316e-05,
"loss": 23.0,
"step": 140
},
{
"epoch": 0.00512932445705555,
"grad_norm": 0.0003657886991277337,
"learning_rate": 3.1145789473684215e-05,
"loss": 23.0,
"step": 141
},
{
"epoch": 0.005165702644694241,
"grad_norm": 0.0008267536177299917,
"learning_rate": 3.0617894736842107e-05,
"loss": 23.0,
"step": 142
},
{
"epoch": 0.005202080832332933,
"grad_norm": 0.0008407800341956317,
"learning_rate": 3.0089999999999998e-05,
"loss": 23.0,
"step": 143
},
{
"epoch": 0.005238459019971625,
"grad_norm": 0.0005053975037299097,
"learning_rate": 2.956210526315789e-05,
"loss": 23.0,
"step": 144
},
{
"epoch": 0.005274837207610317,
"grad_norm": 0.00048378558130934834,
"learning_rate": 2.9034210526315792e-05,
"loss": 23.0,
"step": 145
},
{
"epoch": 0.005311215395249009,
"grad_norm": 0.0007292490336112678,
"learning_rate": 2.8506315789473683e-05,
"loss": 23.0,
"step": 146
},
{
"epoch": 0.0053475935828877,
"grad_norm": 0.000520595523994416,
"learning_rate": 2.7978421052631575e-05,
"loss": 23.0,
"step": 147
},
{
"epoch": 0.005383971770526392,
"grad_norm": 0.0005500400438904762,
"learning_rate": 2.7450526315789474e-05,
"loss": 23.0,
"step": 148
},
{
"epoch": 0.005420349958165084,
"grad_norm": 0.00027113532996736467,
"learning_rate": 2.692263157894737e-05,
"loss": 23.0,
"step": 149
},
{
"epoch": 0.005456728145803776,
"grad_norm": 0.00021608092356473207,
"learning_rate": 2.639473684210526e-05,
"loss": 23.0,
"step": 150
},
{
"epoch": 0.005456728145803776,
"eval_loss": 11.5,
"eval_runtime": 151.1109,
"eval_samples_per_second": 76.599,
"eval_steps_per_second": 19.152,
"step": 150
},
{
"epoch": 0.005493106333442468,
"grad_norm": 0.0003489772789180279,
"learning_rate": 2.586684210526316e-05,
"loss": 23.0,
"step": 151
},
{
"epoch": 0.005529484521081159,
"grad_norm": 0.00012043579044984654,
"learning_rate": 2.5338947368421054e-05,
"loss": 23.0,
"step": 152
},
{
"epoch": 0.0055658627087198514,
"grad_norm": 0.00014974933583289385,
"learning_rate": 2.4811052631578945e-05,
"loss": 23.0,
"step": 153
},
{
"epoch": 0.0056022408963585435,
"grad_norm": 0.00023572241479996592,
"learning_rate": 2.428315789473684e-05,
"loss": 23.0,
"step": 154
},
{
"epoch": 0.0056386190839972355,
"grad_norm": 0.00013939540076535195,
"learning_rate": 2.3755263157894736e-05,
"loss": 23.0,
"step": 155
},
{
"epoch": 0.0056749972716359275,
"grad_norm": 0.00021143161575309932,
"learning_rate": 2.322736842105263e-05,
"loss": 23.0,
"step": 156
},
{
"epoch": 0.005711375459274619,
"grad_norm": 0.00038103267434053123,
"learning_rate": 2.2699473684210526e-05,
"loss": 23.0,
"step": 157
},
{
"epoch": 0.005747753646913311,
"grad_norm": 0.000460915471194312,
"learning_rate": 2.217157894736842e-05,
"loss": 23.0,
"step": 158
},
{
"epoch": 0.005784131834552003,
"grad_norm": 0.0002898467646446079,
"learning_rate": 2.1643684210526316e-05,
"loss": 23.0,
"step": 159
},
{
"epoch": 0.005820510022190695,
"grad_norm": 0.0002288204850628972,
"learning_rate": 2.1115789473684208e-05,
"loss": 23.0,
"step": 160
},
{
"epoch": 0.005856888209829387,
"grad_norm": 0.0002909198228735477,
"learning_rate": 2.0587894736842106e-05,
"loss": 23.0,
"step": 161
},
{
"epoch": 0.005893266397468078,
"grad_norm": 0.0002346673427382484,
"learning_rate": 2.006e-05,
"loss": 23.0,
"step": 162
},
{
"epoch": 0.00592964458510677,
"grad_norm": 0.0008843714604154229,
"learning_rate": 1.9532105263157893e-05,
"loss": 23.0,
"step": 163
},
{
"epoch": 0.005966022772745462,
"grad_norm": 0.0004195628280285746,
"learning_rate": 1.900421052631579e-05,
"loss": 23.0,
"step": 164
},
{
"epoch": 0.006002400960384154,
"grad_norm": 0.00046790888882242143,
"learning_rate": 1.8476315789473683e-05,
"loss": 23.0,
"step": 165
},
{
"epoch": 0.006038779148022846,
"grad_norm": 0.00015601440099999309,
"learning_rate": 1.7948421052631578e-05,
"loss": 23.0,
"step": 166
},
{
"epoch": 0.006075157335661537,
"grad_norm": 0.0005221933824941516,
"learning_rate": 1.7420526315789473e-05,
"loss": 23.0,
"step": 167
},
{
"epoch": 0.006111535523300229,
"grad_norm": 0.0002946684544440359,
"learning_rate": 1.6892631578947368e-05,
"loss": 23.0,
"step": 168
},
{
"epoch": 0.006147913710938921,
"grad_norm": 0.00034045460051856935,
"learning_rate": 1.6364736842105263e-05,
"loss": 23.0,
"step": 169
},
{
"epoch": 0.006184291898577613,
"grad_norm": 0.00024054516688920557,
"learning_rate": 1.5836842105263158e-05,
"loss": 23.0,
"step": 170
},
{
"epoch": 0.006220670086216305,
"grad_norm": 0.0005520730628632009,
"learning_rate": 1.5308947368421053e-05,
"loss": 23.0,
"step": 171
},
{
"epoch": 0.006257048273854996,
"grad_norm": 0.00023751375556457788,
"learning_rate": 1.4781052631578945e-05,
"loss": 23.0,
"step": 172
},
{
"epoch": 0.006293426461493688,
"grad_norm": 0.0005580897559411824,
"learning_rate": 1.4253157894736842e-05,
"loss": 23.0,
"step": 173
},
{
"epoch": 0.00632980464913238,
"grad_norm": 0.00024056828988250345,
"learning_rate": 1.3725263157894737e-05,
"loss": 23.0,
"step": 174
},
{
"epoch": 0.006366182836771072,
"grad_norm": 0.0004827072552870959,
"learning_rate": 1.319736842105263e-05,
"loss": 23.0,
"step": 175
},
{
"epoch": 0.006402561024409764,
"grad_norm": 0.0004384911444503814,
"learning_rate": 1.2669473684210527e-05,
"loss": 23.0,
"step": 176
},
{
"epoch": 0.0064389392120484555,
"grad_norm": 0.0005333396256901324,
"learning_rate": 1.214157894736842e-05,
"loss": 23.0,
"step": 177
},
{
"epoch": 0.0064753173996871475,
"grad_norm": 0.00027803468401543796,
"learning_rate": 1.1613684210526315e-05,
"loss": 23.0,
"step": 178
},
{
"epoch": 0.0065116955873258395,
"grad_norm": 0.0005504011642187834,
"learning_rate": 1.108578947368421e-05,
"loss": 23.0,
"step": 179
},
{
"epoch": 0.0065480737749645315,
"grad_norm": 0.0006978894816711545,
"learning_rate": 1.0557894736842104e-05,
"loss": 23.0,
"step": 180
},
{
"epoch": 0.0065844519626032235,
"grad_norm": 0.0004980647936463356,
"learning_rate": 1.003e-05,
"loss": 23.0,
"step": 181
},
{
"epoch": 0.006620830150241915,
"grad_norm": 0.0008371528820134699,
"learning_rate": 9.502105263157896e-06,
"loss": 23.0,
"step": 182
},
{
"epoch": 0.006657208337880607,
"grad_norm": 0.00043287783046253026,
"learning_rate": 8.974210526315789e-06,
"loss": 23.0,
"step": 183
},
{
"epoch": 0.006693586525519299,
"grad_norm": 0.00033211580011993647,
"learning_rate": 8.446315789473684e-06,
"loss": 23.0,
"step": 184
},
{
"epoch": 0.006729964713157991,
"grad_norm": 0.0016474088188260794,
"learning_rate": 7.918421052631579e-06,
"loss": 23.0,
"step": 185
},
{
"epoch": 0.006766342900796682,
"grad_norm": 0.0008112106588669121,
"learning_rate": 7.3905263157894725e-06,
"loss": 23.0,
"step": 186
},
{
"epoch": 0.006802721088435374,
"grad_norm": 0.0006704179104417562,
"learning_rate": 6.862631578947368e-06,
"loss": 23.0,
"step": 187
},
{
"epoch": 0.006839099276074066,
"grad_norm": 0.0007978321518748999,
"learning_rate": 6.3347368421052634e-06,
"loss": 23.0,
"step": 188
},
{
"epoch": 0.006875477463712758,
"grad_norm": 0.0008913553901948035,
"learning_rate": 5.806842105263158e-06,
"loss": 23.0,
"step": 189
},
{
"epoch": 0.00691185565135145,
"grad_norm": 0.0006912974058650434,
"learning_rate": 5.278947368421052e-06,
"loss": 23.0,
"step": 190
},
{
"epoch": 0.006948233838990141,
"grad_norm": 0.0007087139529176056,
"learning_rate": 4.751052631578948e-06,
"loss": 23.0,
"step": 191
},
{
"epoch": 0.006984612026628833,
"grad_norm": 0.00047257327241823077,
"learning_rate": 4.223157894736842e-06,
"loss": 23.0,
"step": 192
},
{
"epoch": 0.007020990214267525,
"grad_norm": 0.00034283282002434134,
"learning_rate": 3.6952631578947362e-06,
"loss": 23.0,
"step": 193
},
{
"epoch": 0.007057368401906217,
"grad_norm": 0.0011271246476098895,
"learning_rate": 3.1673684210526317e-06,
"loss": 23.0,
"step": 194
},
{
"epoch": 0.007093746589544909,
"grad_norm": 0.0007555687916465104,
"learning_rate": 2.639473684210526e-06,
"loss": 23.0,
"step": 195
},
{
"epoch": 0.0071301247771836,
"grad_norm": 0.0008845161064527929,
"learning_rate": 2.111578947368421e-06,
"loss": 23.0,
"step": 196
},
{
"epoch": 0.007166502964822292,
"grad_norm": 0.00046997558092698455,
"learning_rate": 1.5836842105263159e-06,
"loss": 23.0,
"step": 197
},
{
"epoch": 0.007202881152460984,
"grad_norm": 0.00101653509773314,
"learning_rate": 1.0557894736842105e-06,
"loss": 23.0,
"step": 198
},
{
"epoch": 0.007239259340099676,
"grad_norm": 0.00044932126183994114,
"learning_rate": 5.278947368421053e-07,
"loss": 23.0,
"step": 199
},
{
"epoch": 0.007275637527738368,
"grad_norm": 0.0005571041256189346,
"learning_rate": 0.0,
"loss": 23.0,
"step": 200
},
{
"epoch": 0.007275637527738368,
"eval_loss": 11.5,
"eval_runtime": 151.1327,
"eval_samples_per_second": 76.588,
"eval_steps_per_second": 19.149,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8059714732032.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}