lesso's picture
Training in progress, step 200, checkpoint
07e4cff verified
raw
history blame
37.2 kB
{
"best_metric": 11.5,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.007275637527738368,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.637818763869184e-05,
"grad_norm": 2.948543624370359e-05,
"learning_rate": 1.0060000000000002e-05,
"loss": 23.0,
"step": 1
},
{
"epoch": 3.637818763869184e-05,
"eval_loss": 11.5,
"eval_runtime": 151.3578,
"eval_samples_per_second": 76.474,
"eval_steps_per_second": 19.12,
"step": 1
},
{
"epoch": 7.275637527738368e-05,
"grad_norm": 2.2105969037511386e-05,
"learning_rate": 2.0120000000000004e-05,
"loss": 23.0,
"step": 2
},
{
"epoch": 0.00010913456291607552,
"grad_norm": 1.7058524463209324e-05,
"learning_rate": 3.018e-05,
"loss": 23.0,
"step": 3
},
{
"epoch": 0.00014551275055476736,
"grad_norm": 2.060344922938384e-05,
"learning_rate": 4.024000000000001e-05,
"loss": 23.0,
"step": 4
},
{
"epoch": 0.0001818909381934592,
"grad_norm": 2.0100153051316738e-05,
"learning_rate": 5.03e-05,
"loss": 23.0,
"step": 5
},
{
"epoch": 0.00021826912583215103,
"grad_norm": 3.034624933206942e-05,
"learning_rate": 6.036e-05,
"loss": 23.0,
"step": 6
},
{
"epoch": 0.00025464731347084286,
"grad_norm": 2.815291918523144e-05,
"learning_rate": 7.042e-05,
"loss": 23.0,
"step": 7
},
{
"epoch": 0.0002910255011095347,
"grad_norm": 1.748102658893913e-05,
"learning_rate": 8.048000000000002e-05,
"loss": 23.0,
"step": 8
},
{
"epoch": 0.00032740368874822657,
"grad_norm": 3.207884219591506e-05,
"learning_rate": 9.054000000000001e-05,
"loss": 23.0,
"step": 9
},
{
"epoch": 0.0003637818763869184,
"grad_norm": 2.7205978767597117e-05,
"learning_rate": 0.0001006,
"loss": 23.0,
"step": 10
},
{
"epoch": 0.00040016006402561027,
"grad_norm": 1.864074874902144e-05,
"learning_rate": 0.00010007052631578948,
"loss": 23.0,
"step": 11
},
{
"epoch": 0.00043653825166430207,
"grad_norm": 2.7616073566605337e-05,
"learning_rate": 9.954105263157895e-05,
"loss": 23.0,
"step": 12
},
{
"epoch": 0.0004729164393029939,
"grad_norm": 3.965870928368531e-05,
"learning_rate": 9.901157894736842e-05,
"loss": 23.0,
"step": 13
},
{
"epoch": 0.0005092946269416857,
"grad_norm": 4.447407991392538e-05,
"learning_rate": 9.84821052631579e-05,
"loss": 23.0,
"step": 14
},
{
"epoch": 0.0005456728145803776,
"grad_norm": 3.3188109227921814e-05,
"learning_rate": 9.795263157894737e-05,
"loss": 23.0,
"step": 15
},
{
"epoch": 0.0005820510022190694,
"grad_norm": 2.72152046818519e-05,
"learning_rate": 9.742315789473686e-05,
"loss": 23.0,
"step": 16
},
{
"epoch": 0.0006184291898577613,
"grad_norm": 4.269916098564863e-05,
"learning_rate": 9.689368421052633e-05,
"loss": 23.0,
"step": 17
},
{
"epoch": 0.0006548073774964531,
"grad_norm": 7.144509436329827e-05,
"learning_rate": 9.63642105263158e-05,
"loss": 23.0,
"step": 18
},
{
"epoch": 0.0006911855651351449,
"grad_norm": 5.8125104260398075e-05,
"learning_rate": 9.583473684210527e-05,
"loss": 23.0,
"step": 19
},
{
"epoch": 0.0007275637527738368,
"grad_norm": 5.5581836932105944e-05,
"learning_rate": 9.530526315789474e-05,
"loss": 23.0,
"step": 20
},
{
"epoch": 0.0007639419404125286,
"grad_norm": 4.992326284991577e-05,
"learning_rate": 9.477578947368422e-05,
"loss": 23.0,
"step": 21
},
{
"epoch": 0.0008003201280512205,
"grad_norm": 3.754639328690246e-05,
"learning_rate": 9.424631578947369e-05,
"loss": 23.0,
"step": 22
},
{
"epoch": 0.0008366983156899123,
"grad_norm": 4.4129676098236814e-05,
"learning_rate": 9.371684210526316e-05,
"loss": 23.0,
"step": 23
},
{
"epoch": 0.0008730765033286041,
"grad_norm": 3.2032086892286316e-05,
"learning_rate": 9.318736842105263e-05,
"loss": 23.0,
"step": 24
},
{
"epoch": 0.000909454690967296,
"grad_norm": 7.238514808705077e-05,
"learning_rate": 9.26578947368421e-05,
"loss": 23.0,
"step": 25
},
{
"epoch": 0.0009458328786059878,
"grad_norm": 5.7584187743486837e-05,
"learning_rate": 9.212842105263159e-05,
"loss": 23.0,
"step": 26
},
{
"epoch": 0.0009822110662446796,
"grad_norm": 7.019707845756784e-05,
"learning_rate": 9.159894736842107e-05,
"loss": 23.0,
"step": 27
},
{
"epoch": 0.0010185892538833714,
"grad_norm": 5.2452742238529027e-05,
"learning_rate": 9.106947368421054e-05,
"loss": 23.0,
"step": 28
},
{
"epoch": 0.0010549674415220635,
"grad_norm": 0.00011238550359848887,
"learning_rate": 9.054000000000001e-05,
"loss": 23.0,
"step": 29
},
{
"epoch": 0.0010913456291607553,
"grad_norm": 0.00013983648386783898,
"learning_rate": 9.001052631578948e-05,
"loss": 23.0,
"step": 30
},
{
"epoch": 0.001127723816799447,
"grad_norm": 6.872925587231293e-05,
"learning_rate": 8.948105263157895e-05,
"loss": 23.0,
"step": 31
},
{
"epoch": 0.0011641020044381389,
"grad_norm": 5.7285513321403414e-05,
"learning_rate": 8.895157894736842e-05,
"loss": 23.0,
"step": 32
},
{
"epoch": 0.0012004801920768306,
"grad_norm": 0.00013172421313356608,
"learning_rate": 8.842210526315789e-05,
"loss": 23.0,
"step": 33
},
{
"epoch": 0.0012368583797155227,
"grad_norm": 8.472959598293528e-05,
"learning_rate": 8.789263157894738e-05,
"loss": 23.0,
"step": 34
},
{
"epoch": 0.0012732365673542145,
"grad_norm": 9.260691149393097e-05,
"learning_rate": 8.736315789473685e-05,
"loss": 23.0,
"step": 35
},
{
"epoch": 0.0013096147549929063,
"grad_norm": 0.00011570382775971666,
"learning_rate": 8.683368421052632e-05,
"loss": 23.0,
"step": 36
},
{
"epoch": 0.001345992942631598,
"grad_norm": 0.00013264735753182322,
"learning_rate": 8.63042105263158e-05,
"loss": 23.0,
"step": 37
},
{
"epoch": 0.0013823711302702899,
"grad_norm": 7.428869866998866e-05,
"learning_rate": 8.577473684210527e-05,
"loss": 23.0,
"step": 38
},
{
"epoch": 0.0014187493179089819,
"grad_norm": 9.110313112614676e-05,
"learning_rate": 8.524526315789474e-05,
"loss": 23.0,
"step": 39
},
{
"epoch": 0.0014551275055476737,
"grad_norm": 0.00014309283869806677,
"learning_rate": 8.471578947368421e-05,
"loss": 23.0,
"step": 40
},
{
"epoch": 0.0014915056931863655,
"grad_norm": 0.00016003995551727712,
"learning_rate": 8.41863157894737e-05,
"loss": 23.0,
"step": 41
},
{
"epoch": 0.0015278838808250573,
"grad_norm": 0.00016799979493953288,
"learning_rate": 8.365684210526317e-05,
"loss": 23.0,
"step": 42
},
{
"epoch": 0.001564262068463749,
"grad_norm": 0.00016025554214138538,
"learning_rate": 8.312736842105264e-05,
"loss": 23.0,
"step": 43
},
{
"epoch": 0.001600640256102441,
"grad_norm": 0.000203637799131684,
"learning_rate": 8.259789473684211e-05,
"loss": 23.0,
"step": 44
},
{
"epoch": 0.0016370184437411329,
"grad_norm": 9.858429984888062e-05,
"learning_rate": 8.206842105263158e-05,
"loss": 23.0,
"step": 45
},
{
"epoch": 0.0016733966313798247,
"grad_norm": 0.00014126356109045446,
"learning_rate": 8.153894736842105e-05,
"loss": 23.0,
"step": 46
},
{
"epoch": 0.0017097748190185165,
"grad_norm": 0.00022886847727932036,
"learning_rate": 8.100947368421053e-05,
"loss": 23.0,
"step": 47
},
{
"epoch": 0.0017461530066572083,
"grad_norm": 0.00025842548348009586,
"learning_rate": 8.048000000000002e-05,
"loss": 23.0,
"step": 48
},
{
"epoch": 0.0017825311942959,
"grad_norm": 0.00010498856863705441,
"learning_rate": 7.995052631578949e-05,
"loss": 23.0,
"step": 49
},
{
"epoch": 0.001818909381934592,
"grad_norm": 0.0002834200859069824,
"learning_rate": 7.942105263157896e-05,
"loss": 23.0,
"step": 50
},
{
"epoch": 0.001818909381934592,
"eval_loss": 11.5,
"eval_runtime": 151.2778,
"eval_samples_per_second": 76.515,
"eval_steps_per_second": 19.13,
"step": 50
},
{
"epoch": 0.0018552875695732839,
"grad_norm": 6.455896800616756e-05,
"learning_rate": 7.889157894736843e-05,
"loss": 23.0,
"step": 51
},
{
"epoch": 0.0018916657572119757,
"grad_norm": 5.494441938935779e-05,
"learning_rate": 7.83621052631579e-05,
"loss": 23.0,
"step": 52
},
{
"epoch": 0.0019280439448506675,
"grad_norm": 0.00012172012066002935,
"learning_rate": 7.783263157894737e-05,
"loss": 23.0,
"step": 53
},
{
"epoch": 0.0019644221324893593,
"grad_norm": 8.499459363520145e-05,
"learning_rate": 7.730315789473684e-05,
"loss": 23.0,
"step": 54
},
{
"epoch": 0.0020008003201280513,
"grad_norm": 3.97942858398892e-05,
"learning_rate": 7.677368421052632e-05,
"loss": 23.0,
"step": 55
},
{
"epoch": 0.002037178507766743,
"grad_norm": 0.00011038257071049884,
"learning_rate": 7.624421052631579e-05,
"loss": 23.0,
"step": 56
},
{
"epoch": 0.002073556695405435,
"grad_norm": 9.370837506139651e-05,
"learning_rate": 7.571473684210526e-05,
"loss": 23.0,
"step": 57
},
{
"epoch": 0.002109934883044127,
"grad_norm": 0.00014087239105720073,
"learning_rate": 7.518526315789475e-05,
"loss": 23.0,
"step": 58
},
{
"epoch": 0.0021463130706828185,
"grad_norm": 0.0001720043655950576,
"learning_rate": 7.465578947368422e-05,
"loss": 23.0,
"step": 59
},
{
"epoch": 0.0021826912583215105,
"grad_norm": 6.865164323244244e-05,
"learning_rate": 7.412631578947369e-05,
"loss": 23.0,
"step": 60
},
{
"epoch": 0.002219069445960202,
"grad_norm": 0.00011147729674121365,
"learning_rate": 7.359684210526317e-05,
"loss": 23.0,
"step": 61
},
{
"epoch": 0.002255447633598894,
"grad_norm": 9.154774306807667e-05,
"learning_rate": 7.306736842105264e-05,
"loss": 23.0,
"step": 62
},
{
"epoch": 0.002291825821237586,
"grad_norm": 7.708803605055436e-05,
"learning_rate": 7.253789473684211e-05,
"loss": 23.0,
"step": 63
},
{
"epoch": 0.0023282040088762777,
"grad_norm": 0.00016180398233700544,
"learning_rate": 7.200842105263158e-05,
"loss": 23.0,
"step": 64
},
{
"epoch": 0.0023645821965149697,
"grad_norm": 0.00018443948647473007,
"learning_rate": 7.147894736842105e-05,
"loss": 23.0,
"step": 65
},
{
"epoch": 0.0024009603841536613,
"grad_norm": 0.0001334257103735581,
"learning_rate": 7.094947368421052e-05,
"loss": 23.0,
"step": 66
},
{
"epoch": 0.0024373385717923533,
"grad_norm": 0.00017161580035462976,
"learning_rate": 7.042e-05,
"loss": 23.0,
"step": 67
},
{
"epoch": 0.0024737167594310453,
"grad_norm": 7.314240065170452e-05,
"learning_rate": 6.989052631578948e-05,
"loss": 23.0,
"step": 68
},
{
"epoch": 0.002510094947069737,
"grad_norm": 0.00018164435459766537,
"learning_rate": 6.936105263157896e-05,
"loss": 23.0,
"step": 69
},
{
"epoch": 0.002546473134708429,
"grad_norm": 0.00018509691290091723,
"learning_rate": 6.883157894736843e-05,
"loss": 23.0,
"step": 70
},
{
"epoch": 0.0025828513223471205,
"grad_norm": 0.00014178521814756095,
"learning_rate": 6.83021052631579e-05,
"loss": 23.0,
"step": 71
},
{
"epoch": 0.0026192295099858125,
"grad_norm": 0.00012139921454945579,
"learning_rate": 6.777263157894737e-05,
"loss": 23.0,
"step": 72
},
{
"epoch": 0.0026556076976245045,
"grad_norm": 0.0001967524876818061,
"learning_rate": 6.724315789473684e-05,
"loss": 23.0,
"step": 73
},
{
"epoch": 0.002691985885263196,
"grad_norm": 0.0002260772162117064,
"learning_rate": 6.671368421052631e-05,
"loss": 23.0,
"step": 74
},
{
"epoch": 0.002728364072901888,
"grad_norm": 0.00024784033303149045,
"learning_rate": 6.61842105263158e-05,
"loss": 23.0,
"step": 75
},
{
"epoch": 0.0027647422605405797,
"grad_norm": 8.939324470702559e-05,
"learning_rate": 6.565473684210527e-05,
"loss": 23.0,
"step": 76
},
{
"epoch": 0.0028011204481792717,
"grad_norm": 0.00017008943541441113,
"learning_rate": 6.512526315789474e-05,
"loss": 23.0,
"step": 77
},
{
"epoch": 0.0028374986358179637,
"grad_norm": 0.00015797036758158356,
"learning_rate": 6.459578947368421e-05,
"loss": 23.0,
"step": 78
},
{
"epoch": 0.0028738768234566553,
"grad_norm": 0.00023262340982910246,
"learning_rate": 6.406631578947369e-05,
"loss": 23.0,
"step": 79
},
{
"epoch": 0.0029102550110953473,
"grad_norm": 0.0001813859707908705,
"learning_rate": 6.353684210526316e-05,
"loss": 23.0,
"step": 80
},
{
"epoch": 0.002946633198734039,
"grad_norm": 0.0003337754460517317,
"learning_rate": 6.300736842105263e-05,
"loss": 23.0,
"step": 81
},
{
"epoch": 0.002983011386372731,
"grad_norm": 0.0003210855938959867,
"learning_rate": 6.247789473684212e-05,
"loss": 23.0,
"step": 82
},
{
"epoch": 0.003019389574011423,
"grad_norm": 0.00035266485065221786,
"learning_rate": 6.194842105263159e-05,
"loss": 23.0,
"step": 83
},
{
"epoch": 0.0030557677616501145,
"grad_norm": 0.00018341882969252765,
"learning_rate": 6.141894736842106e-05,
"loss": 23.0,
"step": 84
},
{
"epoch": 0.0030921459492888066,
"grad_norm": 0.00015443051233887672,
"learning_rate": 6.088947368421053e-05,
"loss": 23.0,
"step": 85
},
{
"epoch": 0.003128524136927498,
"grad_norm": 0.00027659107581712306,
"learning_rate": 6.036e-05,
"loss": 23.0,
"step": 86
},
{
"epoch": 0.00316490232456619,
"grad_norm": 0.0001798662997316569,
"learning_rate": 5.9830526315789475e-05,
"loss": 23.0,
"step": 87
},
{
"epoch": 0.003201280512204882,
"grad_norm": 0.00030918928678147495,
"learning_rate": 5.9301052631578946e-05,
"loss": 23.0,
"step": 88
},
{
"epoch": 0.0032376586998435737,
"grad_norm": 0.00047841985360719264,
"learning_rate": 5.877157894736843e-05,
"loss": 23.0,
"step": 89
},
{
"epoch": 0.0032740368874822658,
"grad_norm": 0.0001805957726901397,
"learning_rate": 5.82421052631579e-05,
"loss": 23.0,
"step": 90
},
{
"epoch": 0.0033104150751209573,
"grad_norm": 0.00023698716540820897,
"learning_rate": 5.771263157894737e-05,
"loss": 23.0,
"step": 91
},
{
"epoch": 0.0033467932627596494,
"grad_norm": 0.00030717943445779383,
"learning_rate": 5.718315789473685e-05,
"loss": 23.0,
"step": 92
},
{
"epoch": 0.003383171450398341,
"grad_norm": 0.00028171727899461985,
"learning_rate": 5.665368421052632e-05,
"loss": 23.0,
"step": 93
},
{
"epoch": 0.003419549638037033,
"grad_norm": 0.0003316371876280755,
"learning_rate": 5.612421052631579e-05,
"loss": 23.0,
"step": 94
},
{
"epoch": 0.003455927825675725,
"grad_norm": 0.00039376236964017153,
"learning_rate": 5.559473684210527e-05,
"loss": 23.0,
"step": 95
},
{
"epoch": 0.0034923060133144166,
"grad_norm": 0.00033099291613325477,
"learning_rate": 5.506526315789474e-05,
"loss": 23.0,
"step": 96
},
{
"epoch": 0.0035286842009531086,
"grad_norm": 0.0002871527394745499,
"learning_rate": 5.453578947368421e-05,
"loss": 23.0,
"step": 97
},
{
"epoch": 0.0035650623885918,
"grad_norm": 0.00022404693299904466,
"learning_rate": 5.400631578947369e-05,
"loss": 23.0,
"step": 98
},
{
"epoch": 0.003601440576230492,
"grad_norm": 0.0005800426588393748,
"learning_rate": 5.347684210526316e-05,
"loss": 23.0,
"step": 99
},
{
"epoch": 0.003637818763869184,
"grad_norm": 0.00046914396807551384,
"learning_rate": 5.294736842105263e-05,
"loss": 23.0,
"step": 100
},
{
"epoch": 0.003637818763869184,
"eval_loss": 11.5,
"eval_runtime": 151.3111,
"eval_samples_per_second": 76.498,
"eval_steps_per_second": 19.126,
"step": 100
},
{
"epoch": 0.0036741969515078758,
"grad_norm": 5.8791010815184563e-05,
"learning_rate": 5.24178947368421e-05,
"loss": 23.0,
"step": 101
},
{
"epoch": 0.0037105751391465678,
"grad_norm": 0.00015836917737033218,
"learning_rate": 5.1888421052631585e-05,
"loss": 23.0,
"step": 102
},
{
"epoch": 0.0037469533267852594,
"grad_norm": 0.0001639866823097691,
"learning_rate": 5.135894736842106e-05,
"loss": 23.0,
"step": 103
},
{
"epoch": 0.0037833315144239514,
"grad_norm": 9.088047954719514e-05,
"learning_rate": 5.082947368421053e-05,
"loss": 23.0,
"step": 104
},
{
"epoch": 0.0038197097020626434,
"grad_norm": 0.00011946928862016648,
"learning_rate": 5.03e-05,
"loss": 23.0,
"step": 105
},
{
"epoch": 0.003856087889701335,
"grad_norm": 9.466482151765376e-05,
"learning_rate": 4.977052631578947e-05,
"loss": 23.0,
"step": 106
},
{
"epoch": 0.003892466077340027,
"grad_norm": 0.00016984842659439892,
"learning_rate": 4.924105263157895e-05,
"loss": 23.0,
"step": 107
},
{
"epoch": 0.003928844264978719,
"grad_norm": 0.00028872015536762774,
"learning_rate": 4.871157894736843e-05,
"loss": 23.0,
"step": 108
},
{
"epoch": 0.003965222452617411,
"grad_norm": 0.00028236303478479385,
"learning_rate": 4.81821052631579e-05,
"loss": 23.0,
"step": 109
},
{
"epoch": 0.004001600640256103,
"grad_norm": 0.00025114035815931857,
"learning_rate": 4.765263157894737e-05,
"loss": 23.0,
"step": 110
},
{
"epoch": 0.004037978827894795,
"grad_norm": 0.0002083306753775105,
"learning_rate": 4.7123157894736845e-05,
"loss": 23.0,
"step": 111
},
{
"epoch": 0.004074357015533486,
"grad_norm": 0.0003315545618534088,
"learning_rate": 4.6593684210526316e-05,
"loss": 23.0,
"step": 112
},
{
"epoch": 0.004110735203172178,
"grad_norm": 0.00013258626859169453,
"learning_rate": 4.606421052631579e-05,
"loss": 23.0,
"step": 113
},
{
"epoch": 0.00414711339081087,
"grad_norm": 0.0002853393671102822,
"learning_rate": 4.553473684210527e-05,
"loss": 23.0,
"step": 114
},
{
"epoch": 0.004183491578449562,
"grad_norm": 8.260518370661885e-05,
"learning_rate": 4.500526315789474e-05,
"loss": 23.0,
"step": 115
},
{
"epoch": 0.004219869766088254,
"grad_norm": 0.00017261721950490028,
"learning_rate": 4.447578947368421e-05,
"loss": 23.0,
"step": 116
},
{
"epoch": 0.004256247953726945,
"grad_norm": 0.0001484427193645388,
"learning_rate": 4.394631578947369e-05,
"loss": 23.0,
"step": 117
},
{
"epoch": 0.004292626141365637,
"grad_norm": 0.0003230143338441849,
"learning_rate": 4.341684210526316e-05,
"loss": 23.0,
"step": 118
},
{
"epoch": 0.004329004329004329,
"grad_norm": 0.00024061408475972712,
"learning_rate": 4.2887368421052636e-05,
"loss": 23.0,
"step": 119
},
{
"epoch": 0.004365382516643021,
"grad_norm": 0.00017947243759408593,
"learning_rate": 4.2357894736842106e-05,
"loss": 23.0,
"step": 120
},
{
"epoch": 0.004401760704281713,
"grad_norm": 0.00017323675274383277,
"learning_rate": 4.182842105263158e-05,
"loss": 23.0,
"step": 121
},
{
"epoch": 0.004438138891920404,
"grad_norm": 0.0004172915650997311,
"learning_rate": 4.1298947368421053e-05,
"loss": 23.0,
"step": 122
},
{
"epoch": 0.004474517079559096,
"grad_norm": 0.0003498275764286518,
"learning_rate": 4.0769473684210524e-05,
"loss": 23.0,
"step": 123
},
{
"epoch": 0.004510895267197788,
"grad_norm": 0.0005179645959287882,
"learning_rate": 4.024000000000001e-05,
"loss": 23.0,
"step": 124
},
{
"epoch": 0.00454727345483648,
"grad_norm": 0.0003826706379186362,
"learning_rate": 3.971052631578948e-05,
"loss": 23.0,
"step": 125
},
{
"epoch": 0.004583651642475172,
"grad_norm": 0.0003725514397956431,
"learning_rate": 3.918105263157895e-05,
"loss": 23.0,
"step": 126
},
{
"epoch": 0.004620029830113863,
"grad_norm": 0.0003671155427582562,
"learning_rate": 3.865157894736842e-05,
"loss": 23.0,
"step": 127
},
{
"epoch": 0.004656408017752555,
"grad_norm": 0.0005094852531328797,
"learning_rate": 3.8122105263157896e-05,
"loss": 23.0,
"step": 128
},
{
"epoch": 0.004692786205391247,
"grad_norm": 0.0004659521800931543,
"learning_rate": 3.759263157894737e-05,
"loss": 23.0,
"step": 129
},
{
"epoch": 0.0047291643930299394,
"grad_norm": 0.0002527102187741548,
"learning_rate": 3.7063157894736844e-05,
"loss": 23.0,
"step": 130
},
{
"epoch": 0.0047655425806686314,
"grad_norm": 0.0005124033777974546,
"learning_rate": 3.653368421052632e-05,
"loss": 23.0,
"step": 131
},
{
"epoch": 0.004801920768307323,
"grad_norm": 0.00018957318388856947,
"learning_rate": 3.600421052631579e-05,
"loss": 23.0,
"step": 132
},
{
"epoch": 0.004838298955946015,
"grad_norm": 0.00038366386434063315,
"learning_rate": 3.547473684210526e-05,
"loss": 23.0,
"step": 133
},
{
"epoch": 0.004874677143584707,
"grad_norm": 0.0002823302347678691,
"learning_rate": 3.494526315789474e-05,
"loss": 23.0,
"step": 134
},
{
"epoch": 0.004911055331223399,
"grad_norm": 0.00040033028926700354,
"learning_rate": 3.4415789473684216e-05,
"loss": 23.0,
"step": 135
},
{
"epoch": 0.004947433518862091,
"grad_norm": 0.0007199504761956632,
"learning_rate": 3.3886315789473686e-05,
"loss": 23.0,
"step": 136
},
{
"epoch": 0.004983811706500782,
"grad_norm": 0.0004205556761007756,
"learning_rate": 3.3356842105263156e-05,
"loss": 23.0,
"step": 137
},
{
"epoch": 0.005020189894139474,
"grad_norm": 0.00045116819092072546,
"learning_rate": 3.2827368421052634e-05,
"loss": 23.0,
"step": 138
},
{
"epoch": 0.005056568081778166,
"grad_norm": 0.00022940752387512475,
"learning_rate": 3.2297894736842104e-05,
"loss": 23.0,
"step": 139
},
{
"epoch": 0.005092946269416858,
"grad_norm": 0.0003795432858169079,
"learning_rate": 3.176842105263158e-05,
"loss": 23.0,
"step": 140
},
{
"epoch": 0.00512932445705555,
"grad_norm": 0.0003170387935824692,
"learning_rate": 3.123894736842106e-05,
"loss": 23.0,
"step": 141
},
{
"epoch": 0.005165702644694241,
"grad_norm": 0.0007250338094308972,
"learning_rate": 3.070947368421053e-05,
"loss": 23.0,
"step": 142
},
{
"epoch": 0.005202080832332933,
"grad_norm": 0.0007052431465126574,
"learning_rate": 3.018e-05,
"loss": 23.0,
"step": 143
},
{
"epoch": 0.005238459019971625,
"grad_norm": 0.0004865964874625206,
"learning_rate": 2.9650526315789473e-05,
"loss": 23.0,
"step": 144
},
{
"epoch": 0.005274837207610317,
"grad_norm": 0.0004168594314251095,
"learning_rate": 2.912105263157895e-05,
"loss": 23.0,
"step": 145
},
{
"epoch": 0.005311215395249009,
"grad_norm": 0.0005948792095296085,
"learning_rate": 2.8591578947368424e-05,
"loss": 23.0,
"step": 146
},
{
"epoch": 0.0053475935828877,
"grad_norm": 0.00040314634679816663,
"learning_rate": 2.8062105263157894e-05,
"loss": 23.0,
"step": 147
},
{
"epoch": 0.005383971770526392,
"grad_norm": 0.0005048158345744014,
"learning_rate": 2.753263157894737e-05,
"loss": 23.0,
"step": 148
},
{
"epoch": 0.005420349958165084,
"grad_norm": 0.00025671147159300745,
"learning_rate": 2.7003157894736845e-05,
"loss": 23.0,
"step": 149
},
{
"epoch": 0.005456728145803776,
"grad_norm": 0.00019615764904301614,
"learning_rate": 2.6473684210526315e-05,
"loss": 23.0,
"step": 150
},
{
"epoch": 0.005456728145803776,
"eval_loss": 11.5,
"eval_runtime": 151.2449,
"eval_samples_per_second": 76.531,
"eval_steps_per_second": 19.135,
"step": 150
},
{
"epoch": 0.005493106333442468,
"grad_norm": 0.00028963954537175596,
"learning_rate": 2.5944210526315793e-05,
"loss": 23.0,
"step": 151
},
{
"epoch": 0.005529484521081159,
"grad_norm": 0.00010640855907695368,
"learning_rate": 2.5414736842105266e-05,
"loss": 23.0,
"step": 152
},
{
"epoch": 0.0055658627087198514,
"grad_norm": 0.00012901003356091678,
"learning_rate": 2.4885263157894737e-05,
"loss": 23.0,
"step": 153
},
{
"epoch": 0.0056022408963585435,
"grad_norm": 0.00019396428251639009,
"learning_rate": 2.4355789473684214e-05,
"loss": 23.0,
"step": 154
},
{
"epoch": 0.0056386190839972355,
"grad_norm": 0.00010774251131806523,
"learning_rate": 2.3826315789473684e-05,
"loss": 23.0,
"step": 155
},
{
"epoch": 0.0056749972716359275,
"grad_norm": 0.0001899301423691213,
"learning_rate": 2.3296842105263158e-05,
"loss": 23.0,
"step": 156
},
{
"epoch": 0.005711375459274619,
"grad_norm": 0.0002964092418551445,
"learning_rate": 2.2767368421052635e-05,
"loss": 23.0,
"step": 157
},
{
"epoch": 0.005747753646913311,
"grad_norm": 0.0003504469932522625,
"learning_rate": 2.2237894736842105e-05,
"loss": 23.0,
"step": 158
},
{
"epoch": 0.005784131834552003,
"grad_norm": 0.000246691721258685,
"learning_rate": 2.170842105263158e-05,
"loss": 23.0,
"step": 159
},
{
"epoch": 0.005820510022190695,
"grad_norm": 0.00022581017401535064,
"learning_rate": 2.1178947368421053e-05,
"loss": 23.0,
"step": 160
},
{
"epoch": 0.005856888209829387,
"grad_norm": 0.000321000290568918,
"learning_rate": 2.0649473684210527e-05,
"loss": 23.0,
"step": 161
},
{
"epoch": 0.005893266397468078,
"grad_norm": 0.00020670663798227906,
"learning_rate": 2.0120000000000004e-05,
"loss": 23.0,
"step": 162
},
{
"epoch": 0.00592964458510677,
"grad_norm": 0.0008010378223843873,
"learning_rate": 1.9590526315789474e-05,
"loss": 23.0,
"step": 163
},
{
"epoch": 0.005966022772745462,
"grad_norm": 0.0004023460205644369,
"learning_rate": 1.9061052631578948e-05,
"loss": 23.0,
"step": 164
},
{
"epoch": 0.006002400960384154,
"grad_norm": 0.0003998402098659426,
"learning_rate": 1.8531578947368422e-05,
"loss": 23.0,
"step": 165
},
{
"epoch": 0.006038779148022846,
"grad_norm": 0.00017719727475196123,
"learning_rate": 1.8002105263157896e-05,
"loss": 23.0,
"step": 166
},
{
"epoch": 0.006075157335661537,
"grad_norm": 0.0005806902190670371,
"learning_rate": 1.747263157894737e-05,
"loss": 23.0,
"step": 167
},
{
"epoch": 0.006111535523300229,
"grad_norm": 0.0002449015446472913,
"learning_rate": 1.6943157894736843e-05,
"loss": 23.0,
"step": 168
},
{
"epoch": 0.006147913710938921,
"grad_norm": 0.00036219405592419207,
"learning_rate": 1.6413684210526317e-05,
"loss": 23.0,
"step": 169
},
{
"epoch": 0.006184291898577613,
"grad_norm": 0.00018645984528120607,
"learning_rate": 1.588421052631579e-05,
"loss": 23.0,
"step": 170
},
{
"epoch": 0.006220670086216305,
"grad_norm": 0.0004933659802190959,
"learning_rate": 1.5354736842105264e-05,
"loss": 23.0,
"step": 171
},
{
"epoch": 0.006257048273854996,
"grad_norm": 0.0002150293003069237,
"learning_rate": 1.4825263157894736e-05,
"loss": 23.0,
"step": 172
},
{
"epoch": 0.006293426461493688,
"grad_norm": 0.0005116685060784221,
"learning_rate": 1.4295789473684212e-05,
"loss": 23.0,
"step": 173
},
{
"epoch": 0.00632980464913238,
"grad_norm": 0.0002115621027769521,
"learning_rate": 1.3766315789473686e-05,
"loss": 23.0,
"step": 174
},
{
"epoch": 0.006366182836771072,
"grad_norm": 0.00042912649223580956,
"learning_rate": 1.3236842105263158e-05,
"loss": 23.0,
"step": 175
},
{
"epoch": 0.006402561024409764,
"grad_norm": 0.00039016309892758727,
"learning_rate": 1.2707368421052633e-05,
"loss": 23.0,
"step": 176
},
{
"epoch": 0.0064389392120484555,
"grad_norm": 0.0004473703447729349,
"learning_rate": 1.2177894736842107e-05,
"loss": 23.0,
"step": 177
},
{
"epoch": 0.0064753173996871475,
"grad_norm": 0.00025407978682778776,
"learning_rate": 1.1648421052631579e-05,
"loss": 23.0,
"step": 178
},
{
"epoch": 0.0065116955873258395,
"grad_norm": 0.0005332750151865184,
"learning_rate": 1.1118947368421053e-05,
"loss": 23.0,
"step": 179
},
{
"epoch": 0.0065480737749645315,
"grad_norm": 0.0004931697621941566,
"learning_rate": 1.0589473684210526e-05,
"loss": 23.0,
"step": 180
},
{
"epoch": 0.0065844519626032235,
"grad_norm": 0.0004956377088092268,
"learning_rate": 1.0060000000000002e-05,
"loss": 23.0,
"step": 181
},
{
"epoch": 0.006620830150241915,
"grad_norm": 0.0007806757930666208,
"learning_rate": 9.530526315789474e-06,
"loss": 23.0,
"step": 182
},
{
"epoch": 0.006657208337880607,
"grad_norm": 0.00038233300438150764,
"learning_rate": 9.001052631578948e-06,
"loss": 23.0,
"step": 183
},
{
"epoch": 0.006693586525519299,
"grad_norm": 0.00035164743894711137,
"learning_rate": 8.471578947368422e-06,
"loss": 23.0,
"step": 184
},
{
"epoch": 0.006729964713157991,
"grad_norm": 0.0013723446754738688,
"learning_rate": 7.942105263157895e-06,
"loss": 23.0,
"step": 185
},
{
"epoch": 0.006766342900796682,
"grad_norm": 0.0006331033073365688,
"learning_rate": 7.412631578947368e-06,
"loss": 23.0,
"step": 186
},
{
"epoch": 0.006802721088435374,
"grad_norm": 0.0006040234002284706,
"learning_rate": 6.883157894736843e-06,
"loss": 23.0,
"step": 187
},
{
"epoch": 0.006839099276074066,
"grad_norm": 0.0006602167850360274,
"learning_rate": 6.3536842105263166e-06,
"loss": 23.0,
"step": 188
},
{
"epoch": 0.006875477463712758,
"grad_norm": 0.000685389619320631,
"learning_rate": 5.8242105263157895e-06,
"loss": 23.0,
"step": 189
},
{
"epoch": 0.00691185565135145,
"grad_norm": 0.0005400713998824358,
"learning_rate": 5.294736842105263e-06,
"loss": 23.0,
"step": 190
},
{
"epoch": 0.006948233838990141,
"grad_norm": 0.0006456027622334659,
"learning_rate": 4.765263157894737e-06,
"loss": 23.0,
"step": 191
},
{
"epoch": 0.006984612026628833,
"grad_norm": 0.0004271976067684591,
"learning_rate": 4.235789473684211e-06,
"loss": 23.0,
"step": 192
},
{
"epoch": 0.007020990214267525,
"grad_norm": 0.00027371750911697745,
"learning_rate": 3.706315789473684e-06,
"loss": 23.0,
"step": 193
},
{
"epoch": 0.007057368401906217,
"grad_norm": 0.0009070454980246723,
"learning_rate": 3.1768421052631583e-06,
"loss": 23.0,
"step": 194
},
{
"epoch": 0.007093746589544909,
"grad_norm": 0.0005404745461419225,
"learning_rate": 2.6473684210526316e-06,
"loss": 23.0,
"step": 195
},
{
"epoch": 0.0071301247771836,
"grad_norm": 0.0008082005078904331,
"learning_rate": 2.1178947368421054e-06,
"loss": 23.0,
"step": 196
},
{
"epoch": 0.007166502964822292,
"grad_norm": 0.0005105668096803129,
"learning_rate": 1.5884210526315791e-06,
"loss": 23.0,
"step": 197
},
{
"epoch": 0.007202881152460984,
"grad_norm": 0.0008565490716136992,
"learning_rate": 1.0589473684210527e-06,
"loss": 23.0,
"step": 198
},
{
"epoch": 0.007239259340099676,
"grad_norm": 0.00041791878174990416,
"learning_rate": 5.294736842105263e-07,
"loss": 23.0,
"step": 199
},
{
"epoch": 0.007275637527738368,
"grad_norm": 0.00048213935224339366,
"learning_rate": 0.0,
"loss": 23.0,
"step": 200
},
{
"epoch": 0.007275637527738368,
"eval_loss": 11.5,
"eval_runtime": 151.3318,
"eval_samples_per_second": 76.488,
"eval_steps_per_second": 19.124,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8059714732032.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}