lesso's picture
Training in progress, step 200, checkpoint
ef7d560 verified
{
"best_metric": 0.7936509251594543,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.5376344086021505,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002688172043010753,
"grad_norm": 2.552518367767334,
"learning_rate": 1.008e-05,
"loss": 4.2159,
"step": 1
},
{
"epoch": 0.002688172043010753,
"eval_loss": 2.7429797649383545,
"eval_runtime": 20.0523,
"eval_samples_per_second": 7.83,
"eval_steps_per_second": 1.995,
"step": 1
},
{
"epoch": 0.005376344086021506,
"grad_norm": 4.294527053833008,
"learning_rate": 2.016e-05,
"loss": 5.2935,
"step": 2
},
{
"epoch": 0.008064516129032258,
"grad_norm": 3.8511745929718018,
"learning_rate": 3.024e-05,
"loss": 4.435,
"step": 3
},
{
"epoch": 0.010752688172043012,
"grad_norm": 4.876188278198242,
"learning_rate": 4.032e-05,
"loss": 5.0044,
"step": 4
},
{
"epoch": 0.013440860215053764,
"grad_norm": 4.7782392501831055,
"learning_rate": 5.04e-05,
"loss": 4.3481,
"step": 5
},
{
"epoch": 0.016129032258064516,
"grad_norm": 4.02572774887085,
"learning_rate": 6.048e-05,
"loss": 3.9645,
"step": 6
},
{
"epoch": 0.01881720430107527,
"grad_norm": 4.530522346496582,
"learning_rate": 7.055999999999999e-05,
"loss": 3.7968,
"step": 7
},
{
"epoch": 0.021505376344086023,
"grad_norm": 3.8177340030670166,
"learning_rate": 8.064e-05,
"loss": 3.9752,
"step": 8
},
{
"epoch": 0.024193548387096774,
"grad_norm": 3.42366361618042,
"learning_rate": 9.072e-05,
"loss": 3.7669,
"step": 9
},
{
"epoch": 0.026881720430107527,
"grad_norm": 3.45857572555542,
"learning_rate": 0.0001008,
"loss": 3.1495,
"step": 10
},
{
"epoch": 0.02956989247311828,
"grad_norm": 5.064671516418457,
"learning_rate": 0.00010026947368421052,
"loss": 2.8932,
"step": 11
},
{
"epoch": 0.03225806451612903,
"grad_norm": 7.331480979919434,
"learning_rate": 9.973894736842104e-05,
"loss": 2.1098,
"step": 12
},
{
"epoch": 0.03494623655913978,
"grad_norm": 6.972163677215576,
"learning_rate": 9.920842105263157e-05,
"loss": 3.326,
"step": 13
},
{
"epoch": 0.03763440860215054,
"grad_norm": 4.03656005859375,
"learning_rate": 9.86778947368421e-05,
"loss": 2.4259,
"step": 14
},
{
"epoch": 0.04032258064516129,
"grad_norm": 6.760096073150635,
"learning_rate": 9.814736842105264e-05,
"loss": 1.9487,
"step": 15
},
{
"epoch": 0.043010752688172046,
"grad_norm": 4.662262439727783,
"learning_rate": 9.761684210526316e-05,
"loss": 2.489,
"step": 16
},
{
"epoch": 0.0456989247311828,
"grad_norm": 5.427197456359863,
"learning_rate": 9.708631578947368e-05,
"loss": 2.9028,
"step": 17
},
{
"epoch": 0.04838709677419355,
"grad_norm": 6.550124168395996,
"learning_rate": 9.655578947368421e-05,
"loss": 2.5639,
"step": 18
},
{
"epoch": 0.051075268817204304,
"grad_norm": 5.742612361907959,
"learning_rate": 9.602526315789473e-05,
"loss": 1.5072,
"step": 19
},
{
"epoch": 0.053763440860215055,
"grad_norm": 4.321170330047607,
"learning_rate": 9.549473684210525e-05,
"loss": 1.1754,
"step": 20
},
{
"epoch": 0.056451612903225805,
"grad_norm": 3.095183849334717,
"learning_rate": 9.496421052631579e-05,
"loss": 1.5671,
"step": 21
},
{
"epoch": 0.05913978494623656,
"grad_norm": 2.853109121322632,
"learning_rate": 9.443368421052631e-05,
"loss": 1.2742,
"step": 22
},
{
"epoch": 0.06182795698924731,
"grad_norm": 3.149440050125122,
"learning_rate": 9.390315789473683e-05,
"loss": 2.0117,
"step": 23
},
{
"epoch": 0.06451612903225806,
"grad_norm": 3.0655312538146973,
"learning_rate": 9.337263157894737e-05,
"loss": 2.2624,
"step": 24
},
{
"epoch": 0.06720430107526881,
"grad_norm": 3.4676096439361572,
"learning_rate": 9.28421052631579e-05,
"loss": 2.3864,
"step": 25
},
{
"epoch": 0.06989247311827956,
"grad_norm": 3.4489779472351074,
"learning_rate": 9.231157894736842e-05,
"loss": 1.5537,
"step": 26
},
{
"epoch": 0.07258064516129033,
"grad_norm": 3.5075297355651855,
"learning_rate": 9.178105263157895e-05,
"loss": 1.6461,
"step": 27
},
{
"epoch": 0.07526881720430108,
"grad_norm": 2.4398374557495117,
"learning_rate": 9.125052631578948e-05,
"loss": 1.4466,
"step": 28
},
{
"epoch": 0.07795698924731183,
"grad_norm": 3.644092321395874,
"learning_rate": 9.072e-05,
"loss": 2.507,
"step": 29
},
{
"epoch": 0.08064516129032258,
"grad_norm": 3.263528823852539,
"learning_rate": 9.018947368421052e-05,
"loss": 1.2544,
"step": 30
},
{
"epoch": 0.08333333333333333,
"grad_norm": 3.7490763664245605,
"learning_rate": 8.965894736842104e-05,
"loss": 1.2655,
"step": 31
},
{
"epoch": 0.08602150537634409,
"grad_norm": 3.2782695293426514,
"learning_rate": 8.912842105263157e-05,
"loss": 1.7594,
"step": 32
},
{
"epoch": 0.08870967741935484,
"grad_norm": 2.6544880867004395,
"learning_rate": 8.85978947368421e-05,
"loss": 1.6763,
"step": 33
},
{
"epoch": 0.0913978494623656,
"grad_norm": 2.461487054824829,
"learning_rate": 8.806736842105264e-05,
"loss": 1.4153,
"step": 34
},
{
"epoch": 0.09408602150537634,
"grad_norm": 2.037763833999634,
"learning_rate": 8.753684210526316e-05,
"loss": 1.4954,
"step": 35
},
{
"epoch": 0.0967741935483871,
"grad_norm": 2.2501564025878906,
"learning_rate": 8.700631578947369e-05,
"loss": 1.8277,
"step": 36
},
{
"epoch": 0.09946236559139784,
"grad_norm": 2.666536331176758,
"learning_rate": 8.647578947368421e-05,
"loss": 2.2322,
"step": 37
},
{
"epoch": 0.10215053763440861,
"grad_norm": 3.9293577671051025,
"learning_rate": 8.594526315789473e-05,
"loss": 2.394,
"step": 38
},
{
"epoch": 0.10483870967741936,
"grad_norm": 5.047905921936035,
"learning_rate": 8.541473684210525e-05,
"loss": 2.2877,
"step": 39
},
{
"epoch": 0.10752688172043011,
"grad_norm": 3.5374960899353027,
"learning_rate": 8.488421052631578e-05,
"loss": 1.552,
"step": 40
},
{
"epoch": 0.11021505376344086,
"grad_norm": 4.972512722015381,
"learning_rate": 8.435368421052631e-05,
"loss": 2.5782,
"step": 41
},
{
"epoch": 0.11290322580645161,
"grad_norm": 4.151662349700928,
"learning_rate": 8.382315789473684e-05,
"loss": 1.3923,
"step": 42
},
{
"epoch": 0.11559139784946236,
"grad_norm": 3.7074525356292725,
"learning_rate": 8.329263157894737e-05,
"loss": 1.3753,
"step": 43
},
{
"epoch": 0.11827956989247312,
"grad_norm": 3.257704973220825,
"learning_rate": 8.27621052631579e-05,
"loss": 1.5227,
"step": 44
},
{
"epoch": 0.12096774193548387,
"grad_norm": 2.8200066089630127,
"learning_rate": 8.223157894736842e-05,
"loss": 1.0166,
"step": 45
},
{
"epoch": 0.12365591397849462,
"grad_norm": 9.245245933532715,
"learning_rate": 8.170105263157894e-05,
"loss": 3.0691,
"step": 46
},
{
"epoch": 0.12634408602150538,
"grad_norm": 6.105884075164795,
"learning_rate": 8.117052631578946e-05,
"loss": 2.7272,
"step": 47
},
{
"epoch": 0.12903225806451613,
"grad_norm": 9.19589900970459,
"learning_rate": 8.064e-05,
"loss": 2.4159,
"step": 48
},
{
"epoch": 0.13172043010752688,
"grad_norm": 6.029378890991211,
"learning_rate": 8.010947368421052e-05,
"loss": 1.4225,
"step": 49
},
{
"epoch": 0.13440860215053763,
"grad_norm": 4.892293930053711,
"learning_rate": 7.957894736842105e-05,
"loss": 0.7743,
"step": 50
},
{
"epoch": 0.13440860215053763,
"eval_loss": 1.1576098203659058,
"eval_runtime": 20.0295,
"eval_samples_per_second": 7.838,
"eval_steps_per_second": 1.997,
"step": 50
},
{
"epoch": 0.13709677419354838,
"grad_norm": 3.6493349075317383,
"learning_rate": 7.904842105263158e-05,
"loss": 3.4341,
"step": 51
},
{
"epoch": 0.13978494623655913,
"grad_norm": 3.946737051010132,
"learning_rate": 7.85178947368421e-05,
"loss": 4.1712,
"step": 52
},
{
"epoch": 0.1424731182795699,
"grad_norm": 3.0488572120666504,
"learning_rate": 7.798736842105263e-05,
"loss": 2.7678,
"step": 53
},
{
"epoch": 0.14516129032258066,
"grad_norm": 2.641152858734131,
"learning_rate": 7.745684210526315e-05,
"loss": 3.4299,
"step": 54
},
{
"epoch": 0.1478494623655914,
"grad_norm": 2.6064324378967285,
"learning_rate": 7.692631578947369e-05,
"loss": 2.8889,
"step": 55
},
{
"epoch": 0.15053763440860216,
"grad_norm": 2.853044271469116,
"learning_rate": 7.639578947368421e-05,
"loss": 3.9092,
"step": 56
},
{
"epoch": 0.1532258064516129,
"grad_norm": 3.7264606952667236,
"learning_rate": 7.586526315789473e-05,
"loss": 3.1663,
"step": 57
},
{
"epoch": 0.15591397849462366,
"grad_norm": 2.3590335845947266,
"learning_rate": 7.533473684210526e-05,
"loss": 2.5423,
"step": 58
},
{
"epoch": 0.1586021505376344,
"grad_norm": 3.6274800300598145,
"learning_rate": 7.480421052631578e-05,
"loss": 2.7857,
"step": 59
},
{
"epoch": 0.16129032258064516,
"grad_norm": 3.466926097869873,
"learning_rate": 7.427368421052632e-05,
"loss": 2.1407,
"step": 60
},
{
"epoch": 0.1639784946236559,
"grad_norm": 2.135305166244507,
"learning_rate": 7.374315789473685e-05,
"loss": 2.0501,
"step": 61
},
{
"epoch": 0.16666666666666666,
"grad_norm": 2.145434617996216,
"learning_rate": 7.321263157894737e-05,
"loss": 1.8671,
"step": 62
},
{
"epoch": 0.1693548387096774,
"grad_norm": 2.250318765640259,
"learning_rate": 7.26821052631579e-05,
"loss": 2.8158,
"step": 63
},
{
"epoch": 0.17204301075268819,
"grad_norm": 2.0222816467285156,
"learning_rate": 7.215157894736842e-05,
"loss": 1.9672,
"step": 64
},
{
"epoch": 0.17473118279569894,
"grad_norm": 2.0015830993652344,
"learning_rate": 7.162105263157894e-05,
"loss": 1.7105,
"step": 65
},
{
"epoch": 0.1774193548387097,
"grad_norm": 2.1358511447906494,
"learning_rate": 7.109052631578947e-05,
"loss": 2.0722,
"step": 66
},
{
"epoch": 0.18010752688172044,
"grad_norm": 2.1164755821228027,
"learning_rate": 7.055999999999999e-05,
"loss": 1.3778,
"step": 67
},
{
"epoch": 0.1827956989247312,
"grad_norm": 2.6545348167419434,
"learning_rate": 7.002947368421052e-05,
"loss": 1.5793,
"step": 68
},
{
"epoch": 0.18548387096774194,
"grad_norm": 2.278327703475952,
"learning_rate": 6.949894736842105e-05,
"loss": 1.3986,
"step": 69
},
{
"epoch": 0.1881720430107527,
"grad_norm": 2.1539969444274902,
"learning_rate": 6.896842105263158e-05,
"loss": 0.8514,
"step": 70
},
{
"epoch": 0.19086021505376344,
"grad_norm": 2.815715789794922,
"learning_rate": 6.843789473684211e-05,
"loss": 1.9194,
"step": 71
},
{
"epoch": 0.1935483870967742,
"grad_norm": 1.7789136171340942,
"learning_rate": 6.790736842105263e-05,
"loss": 1.1168,
"step": 72
},
{
"epoch": 0.19623655913978494,
"grad_norm": 2.7908670902252197,
"learning_rate": 6.737684210526315e-05,
"loss": 1.6579,
"step": 73
},
{
"epoch": 0.1989247311827957,
"grad_norm": 3.0436408519744873,
"learning_rate": 6.684631578947368e-05,
"loss": 2.1317,
"step": 74
},
{
"epoch": 0.20161290322580644,
"grad_norm": 2.6586692333221436,
"learning_rate": 6.631578947368421e-05,
"loss": 1.5078,
"step": 75
},
{
"epoch": 0.20430107526881722,
"grad_norm": 2.3957104682922363,
"learning_rate": 6.578526315789473e-05,
"loss": 1.4137,
"step": 76
},
{
"epoch": 0.20698924731182797,
"grad_norm": 2.467710256576538,
"learning_rate": 6.525473684210526e-05,
"loss": 1.5379,
"step": 77
},
{
"epoch": 0.20967741935483872,
"grad_norm": 2.3089401721954346,
"learning_rate": 6.47242105263158e-05,
"loss": 1.3105,
"step": 78
},
{
"epoch": 0.21236559139784947,
"grad_norm": 3.3448803424835205,
"learning_rate": 6.419368421052632e-05,
"loss": 2.9907,
"step": 79
},
{
"epoch": 0.21505376344086022,
"grad_norm": 1.918912649154663,
"learning_rate": 6.366315789473684e-05,
"loss": 1.0538,
"step": 80
},
{
"epoch": 0.21774193548387097,
"grad_norm": 2.1565945148468018,
"learning_rate": 6.313263157894736e-05,
"loss": 0.8427,
"step": 81
},
{
"epoch": 0.22043010752688172,
"grad_norm": 1.9118677377700806,
"learning_rate": 6.26021052631579e-05,
"loss": 0.8702,
"step": 82
},
{
"epoch": 0.22311827956989247,
"grad_norm": 2.161041498184204,
"learning_rate": 6.207157894736842e-05,
"loss": 1.5612,
"step": 83
},
{
"epoch": 0.22580645161290322,
"grad_norm": 1.8799899816513062,
"learning_rate": 6.154105263157894e-05,
"loss": 1.0472,
"step": 84
},
{
"epoch": 0.22849462365591397,
"grad_norm": 3.115447521209717,
"learning_rate": 6.1010526315789474e-05,
"loss": 1.3093,
"step": 85
},
{
"epoch": 0.23118279569892472,
"grad_norm": 2.5335817337036133,
"learning_rate": 6.048e-05,
"loss": 1.5528,
"step": 86
},
{
"epoch": 0.23387096774193547,
"grad_norm": 2.8771631717681885,
"learning_rate": 5.994947368421052e-05,
"loss": 1.6662,
"step": 87
},
{
"epoch": 0.23655913978494625,
"grad_norm": 3.9417519569396973,
"learning_rate": 5.941894736842104e-05,
"loss": 1.8012,
"step": 88
},
{
"epoch": 0.239247311827957,
"grad_norm": 3.2698731422424316,
"learning_rate": 5.888842105263158e-05,
"loss": 2.3075,
"step": 89
},
{
"epoch": 0.24193548387096775,
"grad_norm": 2.6435606479644775,
"learning_rate": 5.835789473684211e-05,
"loss": 1.3781,
"step": 90
},
{
"epoch": 0.2446236559139785,
"grad_norm": 3.099759817123413,
"learning_rate": 5.782736842105263e-05,
"loss": 2.1561,
"step": 91
},
{
"epoch": 0.24731182795698925,
"grad_norm": 3.145350217819214,
"learning_rate": 5.7296842105263154e-05,
"loss": 1.4506,
"step": 92
},
{
"epoch": 0.25,
"grad_norm": 4.324268341064453,
"learning_rate": 5.676631578947368e-05,
"loss": 0.9423,
"step": 93
},
{
"epoch": 0.25268817204301075,
"grad_norm": 2.9348642826080322,
"learning_rate": 5.623578947368421e-05,
"loss": 1.7886,
"step": 94
},
{
"epoch": 0.2553763440860215,
"grad_norm": 1.738901138305664,
"learning_rate": 5.570526315789474e-05,
"loss": 0.676,
"step": 95
},
{
"epoch": 0.25806451612903225,
"grad_norm": 2.8710925579071045,
"learning_rate": 5.5174736842105266e-05,
"loss": 0.9851,
"step": 96
},
{
"epoch": 0.260752688172043,
"grad_norm": 3.99045729637146,
"learning_rate": 5.464421052631579e-05,
"loss": 1.8844,
"step": 97
},
{
"epoch": 0.26344086021505375,
"grad_norm": 4.602737903594971,
"learning_rate": 5.411368421052631e-05,
"loss": 1.3643,
"step": 98
},
{
"epoch": 0.2661290322580645,
"grad_norm": 3.7166576385498047,
"learning_rate": 5.358315789473684e-05,
"loss": 0.6935,
"step": 99
},
{
"epoch": 0.26881720430107525,
"grad_norm": 3.1870436668395996,
"learning_rate": 5.3052631578947364e-05,
"loss": 1.2186,
"step": 100
},
{
"epoch": 0.26881720430107525,
"eval_loss": 0.9393484592437744,
"eval_runtime": 20.0614,
"eval_samples_per_second": 7.826,
"eval_steps_per_second": 1.994,
"step": 100
},
{
"epoch": 0.271505376344086,
"grad_norm": 2.3323299884796143,
"learning_rate": 5.252210526315789e-05,
"loss": 3.5211,
"step": 101
},
{
"epoch": 0.27419354838709675,
"grad_norm": 2.551894187927246,
"learning_rate": 5.199157894736842e-05,
"loss": 4.4608,
"step": 102
},
{
"epoch": 0.2768817204301075,
"grad_norm": 2.600679397583008,
"learning_rate": 5.1461052631578946e-05,
"loss": 4.0707,
"step": 103
},
{
"epoch": 0.27956989247311825,
"grad_norm": 1.9256941080093384,
"learning_rate": 5.0930526315789476e-05,
"loss": 2.119,
"step": 104
},
{
"epoch": 0.28225806451612906,
"grad_norm": 2.2405953407287598,
"learning_rate": 5.04e-05,
"loss": 3.1069,
"step": 105
},
{
"epoch": 0.2849462365591398,
"grad_norm": 1.9663243293762207,
"learning_rate": 4.986947368421052e-05,
"loss": 2.7355,
"step": 106
},
{
"epoch": 0.28763440860215056,
"grad_norm": 2.2502665519714355,
"learning_rate": 4.933894736842105e-05,
"loss": 3.8927,
"step": 107
},
{
"epoch": 0.2903225806451613,
"grad_norm": 2.449840784072876,
"learning_rate": 4.880842105263158e-05,
"loss": 2.5857,
"step": 108
},
{
"epoch": 0.29301075268817206,
"grad_norm": 2.5077898502349854,
"learning_rate": 4.8277894736842103e-05,
"loss": 2.0671,
"step": 109
},
{
"epoch": 0.2956989247311828,
"grad_norm": 2.9152989387512207,
"learning_rate": 4.7747368421052626e-05,
"loss": 2.771,
"step": 110
},
{
"epoch": 0.29838709677419356,
"grad_norm": 2.127004861831665,
"learning_rate": 4.7216842105263156e-05,
"loss": 2.009,
"step": 111
},
{
"epoch": 0.3010752688172043,
"grad_norm": 2.5167322158813477,
"learning_rate": 4.6686315789473686e-05,
"loss": 2.798,
"step": 112
},
{
"epoch": 0.30376344086021506,
"grad_norm": 1.6835650205612183,
"learning_rate": 4.615578947368421e-05,
"loss": 2.0009,
"step": 113
},
{
"epoch": 0.3064516129032258,
"grad_norm": 1.6886358261108398,
"learning_rate": 4.562526315789474e-05,
"loss": 1.8121,
"step": 114
},
{
"epoch": 0.30913978494623656,
"grad_norm": 2.1738483905792236,
"learning_rate": 4.509473684210526e-05,
"loss": 2.1961,
"step": 115
},
{
"epoch": 0.3118279569892473,
"grad_norm": 1.9415655136108398,
"learning_rate": 4.4564210526315784e-05,
"loss": 1.3629,
"step": 116
},
{
"epoch": 0.31451612903225806,
"grad_norm": 2.252319812774658,
"learning_rate": 4.403368421052632e-05,
"loss": 1.5139,
"step": 117
},
{
"epoch": 0.3172043010752688,
"grad_norm": 2.353317975997925,
"learning_rate": 4.350315789473684e-05,
"loss": 1.9432,
"step": 118
},
{
"epoch": 0.31989247311827956,
"grad_norm": 2.4038631916046143,
"learning_rate": 4.2972631578947366e-05,
"loss": 1.7128,
"step": 119
},
{
"epoch": 0.3225806451612903,
"grad_norm": 1.8164989948272705,
"learning_rate": 4.244210526315789e-05,
"loss": 1.0845,
"step": 120
},
{
"epoch": 0.32526881720430106,
"grad_norm": 1.1192851066589355,
"learning_rate": 4.191157894736842e-05,
"loss": 0.4165,
"step": 121
},
{
"epoch": 0.3279569892473118,
"grad_norm": 1.6266660690307617,
"learning_rate": 4.138105263157895e-05,
"loss": 1.181,
"step": 122
},
{
"epoch": 0.33064516129032256,
"grad_norm": 1.9494553804397583,
"learning_rate": 4.085052631578947e-05,
"loss": 1.3595,
"step": 123
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.290536403656006,
"learning_rate": 4.032e-05,
"loss": 2.1863,
"step": 124
},
{
"epoch": 0.33602150537634407,
"grad_norm": 1.9435406923294067,
"learning_rate": 3.978947368421052e-05,
"loss": 0.785,
"step": 125
},
{
"epoch": 0.3387096774193548,
"grad_norm": 1.6345041990280151,
"learning_rate": 3.925894736842105e-05,
"loss": 0.6604,
"step": 126
},
{
"epoch": 0.34139784946236557,
"grad_norm": 2.3190295696258545,
"learning_rate": 3.8728421052631575e-05,
"loss": 1.5344,
"step": 127
},
{
"epoch": 0.34408602150537637,
"grad_norm": 2.137402296066284,
"learning_rate": 3.8197894736842105e-05,
"loss": 1.1624,
"step": 128
},
{
"epoch": 0.3467741935483871,
"grad_norm": 3.097121238708496,
"learning_rate": 3.766736842105263e-05,
"loss": 1.1613,
"step": 129
},
{
"epoch": 0.34946236559139787,
"grad_norm": 1.4628736972808838,
"learning_rate": 3.713684210526316e-05,
"loss": 0.7341,
"step": 130
},
{
"epoch": 0.3521505376344086,
"grad_norm": 2.344586133956909,
"learning_rate": 3.660631578947369e-05,
"loss": 1.0426,
"step": 131
},
{
"epoch": 0.3548387096774194,
"grad_norm": 2.10186505317688,
"learning_rate": 3.607578947368421e-05,
"loss": 0.6319,
"step": 132
},
{
"epoch": 0.3575268817204301,
"grad_norm": 3.2302911281585693,
"learning_rate": 3.554526315789473e-05,
"loss": 1.7709,
"step": 133
},
{
"epoch": 0.3602150537634409,
"grad_norm": 2.4297268390655518,
"learning_rate": 3.501473684210526e-05,
"loss": 1.4642,
"step": 134
},
{
"epoch": 0.3629032258064516,
"grad_norm": 1.544806718826294,
"learning_rate": 3.448421052631579e-05,
"loss": 0.5347,
"step": 135
},
{
"epoch": 0.3655913978494624,
"grad_norm": 1.8195679187774658,
"learning_rate": 3.3953684210526315e-05,
"loss": 0.9418,
"step": 136
},
{
"epoch": 0.3682795698924731,
"grad_norm": 2.9363420009613037,
"learning_rate": 3.342315789473684e-05,
"loss": 1.5149,
"step": 137
},
{
"epoch": 0.3709677419354839,
"grad_norm": 1.6591293811798096,
"learning_rate": 3.289263157894737e-05,
"loss": 0.8025,
"step": 138
},
{
"epoch": 0.3736559139784946,
"grad_norm": 1.8703725337982178,
"learning_rate": 3.23621052631579e-05,
"loss": 0.9463,
"step": 139
},
{
"epoch": 0.3763440860215054,
"grad_norm": 1.9521825313568115,
"learning_rate": 3.183157894736842e-05,
"loss": 0.8371,
"step": 140
},
{
"epoch": 0.3790322580645161,
"grad_norm": 2.850534200668335,
"learning_rate": 3.130105263157895e-05,
"loss": 0.9155,
"step": 141
},
{
"epoch": 0.3817204301075269,
"grad_norm": 2.7304813861846924,
"learning_rate": 3.077052631578947e-05,
"loss": 1.3678,
"step": 142
},
{
"epoch": 0.3844086021505376,
"grad_norm": 5.073098659515381,
"learning_rate": 3.024e-05,
"loss": 1.4571,
"step": 143
},
{
"epoch": 0.3870967741935484,
"grad_norm": 4.6329498291015625,
"learning_rate": 2.970947368421052e-05,
"loss": 2.6228,
"step": 144
},
{
"epoch": 0.3897849462365591,
"grad_norm": 2.2699081897735596,
"learning_rate": 2.9178947368421054e-05,
"loss": 1.0457,
"step": 145
},
{
"epoch": 0.3924731182795699,
"grad_norm": 2.2033205032348633,
"learning_rate": 2.8648421052631577e-05,
"loss": 1.2035,
"step": 146
},
{
"epoch": 0.3951612903225806,
"grad_norm": 4.73455810546875,
"learning_rate": 2.8117894736842103e-05,
"loss": 1.7207,
"step": 147
},
{
"epoch": 0.3978494623655914,
"grad_norm": 3.5149385929107666,
"learning_rate": 2.7587368421052633e-05,
"loss": 0.7439,
"step": 148
},
{
"epoch": 0.40053763440860213,
"grad_norm": 2.338346004486084,
"learning_rate": 2.7056842105263156e-05,
"loss": 0.5313,
"step": 149
},
{
"epoch": 0.4032258064516129,
"grad_norm": 1.7660259008407593,
"learning_rate": 2.6526315789473682e-05,
"loss": 0.3979,
"step": 150
},
{
"epoch": 0.4032258064516129,
"eval_loss": 0.8605657815933228,
"eval_runtime": 20.0392,
"eval_samples_per_second": 7.835,
"eval_steps_per_second": 1.996,
"step": 150
},
{
"epoch": 0.40591397849462363,
"grad_norm": 2.3321237564086914,
"learning_rate": 2.599578947368421e-05,
"loss": 3.6574,
"step": 151
},
{
"epoch": 0.40860215053763443,
"grad_norm": 2.897630214691162,
"learning_rate": 2.5465263157894738e-05,
"loss": 2.9582,
"step": 152
},
{
"epoch": 0.4112903225806452,
"grad_norm": 2.3918817043304443,
"learning_rate": 2.493473684210526e-05,
"loss": 2.7901,
"step": 153
},
{
"epoch": 0.41397849462365593,
"grad_norm": 2.5373759269714355,
"learning_rate": 2.440421052631579e-05,
"loss": 3.1311,
"step": 154
},
{
"epoch": 0.4166666666666667,
"grad_norm": 2.5285072326660156,
"learning_rate": 2.3873684210526313e-05,
"loss": 2.5189,
"step": 155
},
{
"epoch": 0.41935483870967744,
"grad_norm": 2.504533052444458,
"learning_rate": 2.3343157894736843e-05,
"loss": 3.2569,
"step": 156
},
{
"epoch": 0.4220430107526882,
"grad_norm": 2.3029768466949463,
"learning_rate": 2.281263157894737e-05,
"loss": 2.6988,
"step": 157
},
{
"epoch": 0.42473118279569894,
"grad_norm": 2.2616817951202393,
"learning_rate": 2.2282105263157892e-05,
"loss": 2.3272,
"step": 158
},
{
"epoch": 0.4274193548387097,
"grad_norm": 2.2895076274871826,
"learning_rate": 2.175157894736842e-05,
"loss": 2.3862,
"step": 159
},
{
"epoch": 0.43010752688172044,
"grad_norm": 1.9649949073791504,
"learning_rate": 2.1221052631578944e-05,
"loss": 2.1478,
"step": 160
},
{
"epoch": 0.4327956989247312,
"grad_norm": 2.0496561527252197,
"learning_rate": 2.0690526315789474e-05,
"loss": 1.3665,
"step": 161
},
{
"epoch": 0.43548387096774194,
"grad_norm": 2.4721364974975586,
"learning_rate": 2.016e-05,
"loss": 3.1437,
"step": 162
},
{
"epoch": 0.4381720430107527,
"grad_norm": 2.5533034801483154,
"learning_rate": 1.9629473684210526e-05,
"loss": 2.2656,
"step": 163
},
{
"epoch": 0.44086021505376344,
"grad_norm": 1.7975753545761108,
"learning_rate": 1.9098947368421053e-05,
"loss": 1.4366,
"step": 164
},
{
"epoch": 0.4435483870967742,
"grad_norm": 2.068775177001953,
"learning_rate": 1.856842105263158e-05,
"loss": 1.3049,
"step": 165
},
{
"epoch": 0.44623655913978494,
"grad_norm": 2.0522570610046387,
"learning_rate": 1.8037894736842105e-05,
"loss": 1.8766,
"step": 166
},
{
"epoch": 0.4489247311827957,
"grad_norm": 1.8050427436828613,
"learning_rate": 1.750736842105263e-05,
"loss": 1.501,
"step": 167
},
{
"epoch": 0.45161290322580644,
"grad_norm": 2.184504508972168,
"learning_rate": 1.6976842105263157e-05,
"loss": 1.7882,
"step": 168
},
{
"epoch": 0.4543010752688172,
"grad_norm": 1.4424117803573608,
"learning_rate": 1.6446315789473684e-05,
"loss": 0.6933,
"step": 169
},
{
"epoch": 0.45698924731182794,
"grad_norm": 2.0629289150238037,
"learning_rate": 1.591578947368421e-05,
"loss": 1.1983,
"step": 170
},
{
"epoch": 0.4596774193548387,
"grad_norm": 2.0509307384490967,
"learning_rate": 1.5385263157894736e-05,
"loss": 1.709,
"step": 171
},
{
"epoch": 0.46236559139784944,
"grad_norm": 1.423912525177002,
"learning_rate": 1.485473684210526e-05,
"loss": 0.6537,
"step": 172
},
{
"epoch": 0.4650537634408602,
"grad_norm": 2.0477561950683594,
"learning_rate": 1.4324210526315789e-05,
"loss": 0.791,
"step": 173
},
{
"epoch": 0.46774193548387094,
"grad_norm": 2.9739744663238525,
"learning_rate": 1.3793684210526316e-05,
"loss": 1.7767,
"step": 174
},
{
"epoch": 0.47043010752688175,
"grad_norm": 2.3965930938720703,
"learning_rate": 1.3263157894736841e-05,
"loss": 1.8458,
"step": 175
},
{
"epoch": 0.4731182795698925,
"grad_norm": 2.0706305503845215,
"learning_rate": 1.2732631578947369e-05,
"loss": 0.8644,
"step": 176
},
{
"epoch": 0.47580645161290325,
"grad_norm": 1.389144778251648,
"learning_rate": 1.2202105263157895e-05,
"loss": 0.5712,
"step": 177
},
{
"epoch": 0.478494623655914,
"grad_norm": 2.4105353355407715,
"learning_rate": 1.1671578947368421e-05,
"loss": 1.7319,
"step": 178
},
{
"epoch": 0.48118279569892475,
"grad_norm": 1.4054794311523438,
"learning_rate": 1.1141052631578946e-05,
"loss": 0.594,
"step": 179
},
{
"epoch": 0.4838709677419355,
"grad_norm": 1.2603883743286133,
"learning_rate": 1.0610526315789472e-05,
"loss": 0.521,
"step": 180
},
{
"epoch": 0.48655913978494625,
"grad_norm": 2.3499302864074707,
"learning_rate": 1.008e-05,
"loss": 1.2467,
"step": 181
},
{
"epoch": 0.489247311827957,
"grad_norm": 1.8965387344360352,
"learning_rate": 9.549473684210526e-06,
"loss": 1.0413,
"step": 182
},
{
"epoch": 0.49193548387096775,
"grad_norm": 1.9322240352630615,
"learning_rate": 9.018947368421052e-06,
"loss": 1.1569,
"step": 183
},
{
"epoch": 0.4946236559139785,
"grad_norm": 2.103156089782715,
"learning_rate": 8.488421052631579e-06,
"loss": 1.1426,
"step": 184
},
{
"epoch": 0.49731182795698925,
"grad_norm": 1.795997142791748,
"learning_rate": 7.957894736842105e-06,
"loss": 1.1144,
"step": 185
},
{
"epoch": 0.5,
"grad_norm": 2.546483278274536,
"learning_rate": 7.42736842105263e-06,
"loss": 1.2147,
"step": 186
},
{
"epoch": 0.5026881720430108,
"grad_norm": 2.0869460105895996,
"learning_rate": 6.896842105263158e-06,
"loss": 1.4491,
"step": 187
},
{
"epoch": 0.5053763440860215,
"grad_norm": 2.946209192276001,
"learning_rate": 6.3663157894736845e-06,
"loss": 1.5029,
"step": 188
},
{
"epoch": 0.5080645161290323,
"grad_norm": 1.2845457792282104,
"learning_rate": 5.835789473684211e-06,
"loss": 0.5538,
"step": 189
},
{
"epoch": 0.510752688172043,
"grad_norm": 1.6108278036117554,
"learning_rate": 5.305263157894736e-06,
"loss": 0.5395,
"step": 190
},
{
"epoch": 0.5134408602150538,
"grad_norm": 2.4547786712646484,
"learning_rate": 4.774736842105263e-06,
"loss": 0.7314,
"step": 191
},
{
"epoch": 0.5161290322580645,
"grad_norm": 4.319262981414795,
"learning_rate": 4.244210526315789e-06,
"loss": 2.1648,
"step": 192
},
{
"epoch": 0.5188172043010753,
"grad_norm": 1.8788155317306519,
"learning_rate": 3.713684210526315e-06,
"loss": 0.9098,
"step": 193
},
{
"epoch": 0.521505376344086,
"grad_norm": 2.349867820739746,
"learning_rate": 3.1831578947368422e-06,
"loss": 1.2202,
"step": 194
},
{
"epoch": 0.5241935483870968,
"grad_norm": 2.8968505859375,
"learning_rate": 2.652631578947368e-06,
"loss": 1.8995,
"step": 195
},
{
"epoch": 0.5268817204301075,
"grad_norm": 4.167923927307129,
"learning_rate": 2.1221052631578947e-06,
"loss": 2.6088,
"step": 196
},
{
"epoch": 0.5295698924731183,
"grad_norm": 4.212091445922852,
"learning_rate": 1.5915789473684211e-06,
"loss": 1.8908,
"step": 197
},
{
"epoch": 0.532258064516129,
"grad_norm": 1.670013666152954,
"learning_rate": 1.0610526315789473e-06,
"loss": 0.529,
"step": 198
},
{
"epoch": 0.5349462365591398,
"grad_norm": 2.4547181129455566,
"learning_rate": 5.305263157894737e-07,
"loss": 0.7957,
"step": 199
},
{
"epoch": 0.5376344086021505,
"grad_norm": 2.100339651107788,
"learning_rate": 0.0,
"loss": 0.3624,
"step": 200
},
{
"epoch": 0.5376344086021505,
"eval_loss": 0.7936509251594543,
"eval_runtime": 20.0443,
"eval_samples_per_second": 7.833,
"eval_steps_per_second": 1.996,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.65254329581568e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}