aleegis12's picture
Training in progress, step 500, checkpoint
514a33a verified
{
"best_metric": 0.8191825747489929,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.006646704397791965,
"eval_steps": 100,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.329340879558393e-05,
"grad_norm": 1.8721857070922852,
"learning_rate": 5e-06,
"loss": 1.3474,
"step": 1
},
{
"epoch": 1.329340879558393e-05,
"eval_loss": 1.8458729982376099,
"eval_runtime": 6416.19,
"eval_samples_per_second": 19.746,
"eval_steps_per_second": 4.937,
"step": 1
},
{
"epoch": 2.658681759116786e-05,
"grad_norm": 2.6393752098083496,
"learning_rate": 1e-05,
"loss": 1.3589,
"step": 2
},
{
"epoch": 3.988022638675179e-05,
"grad_norm": 2.632097005844116,
"learning_rate": 1.5e-05,
"loss": 1.3262,
"step": 3
},
{
"epoch": 5.317363518233572e-05,
"grad_norm": 2.478116750717163,
"learning_rate": 2e-05,
"loss": 1.2122,
"step": 4
},
{
"epoch": 6.646704397791965e-05,
"grad_norm": 2.2656593322753906,
"learning_rate": 2.5e-05,
"loss": 1.2625,
"step": 5
},
{
"epoch": 7.976045277350358e-05,
"grad_norm": 2.969680070877075,
"learning_rate": 3e-05,
"loss": 1.1768,
"step": 6
},
{
"epoch": 9.305386156908751e-05,
"grad_norm": 2.586886405944824,
"learning_rate": 3.5e-05,
"loss": 1.2187,
"step": 7
},
{
"epoch": 0.00010634727036467144,
"grad_norm": 2.716963052749634,
"learning_rate": 4e-05,
"loss": 1.1879,
"step": 8
},
{
"epoch": 0.00011964067916025536,
"grad_norm": 2.8885955810546875,
"learning_rate": 4.5e-05,
"loss": 1.1258,
"step": 9
},
{
"epoch": 0.0001329340879558393,
"grad_norm": 3.392652988433838,
"learning_rate": 5e-05,
"loss": 1.1421,
"step": 10
},
{
"epoch": 0.00014622749675142322,
"grad_norm": 3.4042739868164062,
"learning_rate": 5.500000000000001e-05,
"loss": 1.0849,
"step": 11
},
{
"epoch": 0.00015952090554700716,
"grad_norm": 3.3209352493286133,
"learning_rate": 6e-05,
"loss": 1.1645,
"step": 12
},
{
"epoch": 0.00017281431434259108,
"grad_norm": 2.3565409183502197,
"learning_rate": 6.500000000000001e-05,
"loss": 1.0708,
"step": 13
},
{
"epoch": 0.00018610772313817502,
"grad_norm": 3.035846710205078,
"learning_rate": 7e-05,
"loss": 1.0408,
"step": 14
},
{
"epoch": 0.00019940113193375893,
"grad_norm": 2.8364081382751465,
"learning_rate": 7.500000000000001e-05,
"loss": 1.037,
"step": 15
},
{
"epoch": 0.00021269454072934287,
"grad_norm": 2.6129062175750732,
"learning_rate": 8e-05,
"loss": 1.011,
"step": 16
},
{
"epoch": 0.00022598794952492681,
"grad_norm": 2.415523052215576,
"learning_rate": 8.5e-05,
"loss": 1.0361,
"step": 17
},
{
"epoch": 0.00023928135832051073,
"grad_norm": 2.23931622505188,
"learning_rate": 9e-05,
"loss": 0.9874,
"step": 18
},
{
"epoch": 0.00025257476711609464,
"grad_norm": 2.3277835845947266,
"learning_rate": 9.5e-05,
"loss": 0.945,
"step": 19
},
{
"epoch": 0.0002658681759116786,
"grad_norm": 2.562563896179199,
"learning_rate": 0.0001,
"loss": 1.0356,
"step": 20
},
{
"epoch": 0.0002791615847072625,
"grad_norm": 2.91632342338562,
"learning_rate": 9.999892908320647e-05,
"loss": 0.9731,
"step": 21
},
{
"epoch": 0.00029245499350284644,
"grad_norm": 2.5617427825927734,
"learning_rate": 9.999571637870036e-05,
"loss": 0.886,
"step": 22
},
{
"epoch": 0.00030574840229843035,
"grad_norm": 2.8165314197540283,
"learning_rate": 9.999036202410325e-05,
"loss": 0.9657,
"step": 23
},
{
"epoch": 0.0003190418110940143,
"grad_norm": 3.312476396560669,
"learning_rate": 9.998286624877786e-05,
"loss": 0.9609,
"step": 24
},
{
"epoch": 0.00033233521988959824,
"grad_norm": 3.2963664531707764,
"learning_rate": 9.997322937381829e-05,
"loss": 0.9087,
"step": 25
},
{
"epoch": 0.00034562862868518215,
"grad_norm": 2.8535521030426025,
"learning_rate": 9.996145181203615e-05,
"loss": 1.1381,
"step": 26
},
{
"epoch": 0.0003589220374807661,
"grad_norm": 3.450657367706299,
"learning_rate": 9.994753406794301e-05,
"loss": 0.8032,
"step": 27
},
{
"epoch": 0.00037221544627635004,
"grad_norm": 3.046003818511963,
"learning_rate": 9.99314767377287e-05,
"loss": 0.9935,
"step": 28
},
{
"epoch": 0.00038550885507193395,
"grad_norm": 3.389086961746216,
"learning_rate": 9.991328050923581e-05,
"loss": 1.0387,
"step": 29
},
{
"epoch": 0.00039880226386751786,
"grad_norm": 4.072761535644531,
"learning_rate": 9.989294616193017e-05,
"loss": 0.9106,
"step": 30
},
{
"epoch": 0.00041209567266310183,
"grad_norm": 3.2475500106811523,
"learning_rate": 9.98704745668676e-05,
"loss": 1.074,
"step": 31
},
{
"epoch": 0.00042538908145868575,
"grad_norm": 3.8227083683013916,
"learning_rate": 9.98458666866564e-05,
"loss": 1.0989,
"step": 32
},
{
"epoch": 0.00043868249025426966,
"grad_norm": 4.062333583831787,
"learning_rate": 9.981912357541627e-05,
"loss": 0.9062,
"step": 33
},
{
"epoch": 0.00045197589904985363,
"grad_norm": 4.411200523376465,
"learning_rate": 9.97902463787331e-05,
"loss": 1.1829,
"step": 34
},
{
"epoch": 0.00046526930784543754,
"grad_norm": 3.290609121322632,
"learning_rate": 9.975923633360985e-05,
"loss": 1.119,
"step": 35
},
{
"epoch": 0.00047856271664102146,
"grad_norm": 3.268493175506592,
"learning_rate": 9.972609476841367e-05,
"loss": 1.1031,
"step": 36
},
{
"epoch": 0.0004918561254366054,
"grad_norm": 3.230178117752075,
"learning_rate": 9.969082310281891e-05,
"loss": 0.8486,
"step": 37
},
{
"epoch": 0.0005051495342321893,
"grad_norm": 3.4341812133789062,
"learning_rate": 9.965342284774632e-05,
"loss": 0.9245,
"step": 38
},
{
"epoch": 0.0005184429430277732,
"grad_norm": 3.4522907733917236,
"learning_rate": 9.961389560529836e-05,
"loss": 0.9227,
"step": 39
},
{
"epoch": 0.0005317363518233572,
"grad_norm": 3.976810932159424,
"learning_rate": 9.957224306869053e-05,
"loss": 1.0307,
"step": 40
},
{
"epoch": 0.0005450297606189411,
"grad_norm": 4.624007225036621,
"learning_rate": 9.952846702217886e-05,
"loss": 0.9322,
"step": 41
},
{
"epoch": 0.000558323169414525,
"grad_norm": 4.030016899108887,
"learning_rate": 9.948256934098352e-05,
"loss": 1.033,
"step": 42
},
{
"epoch": 0.000571616578210109,
"grad_norm": 3.9090261459350586,
"learning_rate": 9.943455199120837e-05,
"loss": 0.9218,
"step": 43
},
{
"epoch": 0.0005849099870056929,
"grad_norm": 4.588808536529541,
"learning_rate": 9.938441702975689e-05,
"loss": 1.163,
"step": 44
},
{
"epoch": 0.0005982033958012768,
"grad_norm": 4.292607307434082,
"learning_rate": 9.933216660424395e-05,
"loss": 0.9927,
"step": 45
},
{
"epoch": 0.0006114968045968607,
"grad_norm": 5.112330913543701,
"learning_rate": 9.927780295290389e-05,
"loss": 0.986,
"step": 46
},
{
"epoch": 0.0006247902133924447,
"grad_norm": 4.3529839515686035,
"learning_rate": 9.922132840449459e-05,
"loss": 0.9831,
"step": 47
},
{
"epoch": 0.0006380836221880286,
"grad_norm": 4.980841159820557,
"learning_rate": 9.916274537819775e-05,
"loss": 1.0926,
"step": 48
},
{
"epoch": 0.0006513770309836126,
"grad_norm": 6.681604862213135,
"learning_rate": 9.91020563835152e-05,
"loss": 1.1136,
"step": 49
},
{
"epoch": 0.0006646704397791965,
"grad_norm": 8.970100402832031,
"learning_rate": 9.903926402016153e-05,
"loss": 1.2344,
"step": 50
},
{
"epoch": 0.0006779638485747804,
"grad_norm": 3.5337533950805664,
"learning_rate": 9.897437097795257e-05,
"loss": 1.4309,
"step": 51
},
{
"epoch": 0.0006912572573703643,
"grad_norm": 2.607673406600952,
"learning_rate": 9.890738003669029e-05,
"loss": 1.2103,
"step": 52
},
{
"epoch": 0.0007045506661659482,
"grad_norm": 2.365112781524658,
"learning_rate": 9.883829406604363e-05,
"loss": 1.1847,
"step": 53
},
{
"epoch": 0.0007178440749615322,
"grad_norm": 1.980522871017456,
"learning_rate": 9.876711602542563e-05,
"loss": 0.94,
"step": 54
},
{
"epoch": 0.0007311374837571162,
"grad_norm": 1.7357509136199951,
"learning_rate": 9.869384896386668e-05,
"loss": 1.017,
"step": 55
},
{
"epoch": 0.0007444308925527001,
"grad_norm": 1.8685534000396729,
"learning_rate": 9.861849601988383e-05,
"loss": 0.977,
"step": 56
},
{
"epoch": 0.000757724301348284,
"grad_norm": 2.183997869491577,
"learning_rate": 9.854106042134641e-05,
"loss": 1.006,
"step": 57
},
{
"epoch": 0.0007710177101438679,
"grad_norm": 2.0316593647003174,
"learning_rate": 9.846154548533773e-05,
"loss": 1.2017,
"step": 58
},
{
"epoch": 0.0007843111189394518,
"grad_norm": 2.0671939849853516,
"learning_rate": 9.837995461801299e-05,
"loss": 0.9001,
"step": 59
},
{
"epoch": 0.0007976045277350357,
"grad_norm": 2.08467698097229,
"learning_rate": 9.829629131445342e-05,
"loss": 0.8321,
"step": 60
},
{
"epoch": 0.0008108979365306198,
"grad_norm": 1.6940298080444336,
"learning_rate": 9.821055915851647e-05,
"loss": 1.0874,
"step": 61
},
{
"epoch": 0.0008241913453262037,
"grad_norm": 2.179821491241455,
"learning_rate": 9.812276182268236e-05,
"loss": 1.1044,
"step": 62
},
{
"epoch": 0.0008374847541217876,
"grad_norm": 1.9811848402023315,
"learning_rate": 9.803290306789676e-05,
"loss": 0.9414,
"step": 63
},
{
"epoch": 0.0008507781629173715,
"grad_norm": 1.8735086917877197,
"learning_rate": 9.794098674340965e-05,
"loss": 0.997,
"step": 64
},
{
"epoch": 0.0008640715717129554,
"grad_norm": 2.031641960144043,
"learning_rate": 9.784701678661045e-05,
"loss": 1.0661,
"step": 65
},
{
"epoch": 0.0008773649805085393,
"grad_norm": 2.245903253555298,
"learning_rate": 9.775099722285935e-05,
"loss": 0.9864,
"step": 66
},
{
"epoch": 0.0008906583893041232,
"grad_norm": 2.0989179611206055,
"learning_rate": 9.765293216531486e-05,
"loss": 0.9457,
"step": 67
},
{
"epoch": 0.0009039517980997073,
"grad_norm": 2.0688443183898926,
"learning_rate": 9.755282581475769e-05,
"loss": 1.009,
"step": 68
},
{
"epoch": 0.0009172452068952912,
"grad_norm": 1.911279320716858,
"learning_rate": 9.74506824594107e-05,
"loss": 0.9685,
"step": 69
},
{
"epoch": 0.0009305386156908751,
"grad_norm": 2.2344343662261963,
"learning_rate": 9.73465064747553e-05,
"loss": 0.8613,
"step": 70
},
{
"epoch": 0.000943832024486459,
"grad_norm": 2.0513057708740234,
"learning_rate": 9.724030232334391e-05,
"loss": 0.9773,
"step": 71
},
{
"epoch": 0.0009571254332820429,
"grad_norm": 2.369529962539673,
"learning_rate": 9.713207455460894e-05,
"loss": 1.1409,
"step": 72
},
{
"epoch": 0.0009704188420776268,
"grad_norm": 2.4331586360931396,
"learning_rate": 9.702182780466775e-05,
"loss": 0.872,
"step": 73
},
{
"epoch": 0.0009837122508732107,
"grad_norm": 2.6117405891418457,
"learning_rate": 9.690956679612421e-05,
"loss": 0.8944,
"step": 74
},
{
"epoch": 0.0009970056596687947,
"grad_norm": 2.2126312255859375,
"learning_rate": 9.67952963378663e-05,
"loss": 1.0206,
"step": 75
},
{
"epoch": 0.0010102990684643786,
"grad_norm": 2.374542474746704,
"learning_rate": 9.667902132486009e-05,
"loss": 0.9574,
"step": 76
},
{
"epoch": 0.0010235924772599625,
"grad_norm": 3.5025453567504883,
"learning_rate": 9.656074673794018e-05,
"loss": 0.9695,
"step": 77
},
{
"epoch": 0.0010368858860555464,
"grad_norm": 2.540903091430664,
"learning_rate": 9.644047764359622e-05,
"loss": 0.8131,
"step": 78
},
{
"epoch": 0.0010501792948511305,
"grad_norm": 2.9146223068237305,
"learning_rate": 9.631821919375591e-05,
"loss": 0.9428,
"step": 79
},
{
"epoch": 0.0010634727036467144,
"grad_norm": 2.4570088386535645,
"learning_rate": 9.619397662556435e-05,
"loss": 0.7913,
"step": 80
},
{
"epoch": 0.0010767661124422984,
"grad_norm": 2.292844533920288,
"learning_rate": 9.606775526115963e-05,
"loss": 0.897,
"step": 81
},
{
"epoch": 0.0010900595212378823,
"grad_norm": 2.6344974040985107,
"learning_rate": 9.593956050744492e-05,
"loss": 0.8382,
"step": 82
},
{
"epoch": 0.0011033529300334662,
"grad_norm": 2.832695484161377,
"learning_rate": 9.580939785585681e-05,
"loss": 1.0191,
"step": 83
},
{
"epoch": 0.00111664633882905,
"grad_norm": 2.5540668964385986,
"learning_rate": 9.567727288213005e-05,
"loss": 0.913,
"step": 84
},
{
"epoch": 0.001129939747624634,
"grad_norm": 3.082251787185669,
"learning_rate": 9.554319124605879e-05,
"loss": 0.9144,
"step": 85
},
{
"epoch": 0.001143233156420218,
"grad_norm": 2.795367956161499,
"learning_rate": 9.540715869125407e-05,
"loss": 0.8055,
"step": 86
},
{
"epoch": 0.0011565265652158018,
"grad_norm": 2.8071813583374023,
"learning_rate": 9.526918104489777e-05,
"loss": 0.8654,
"step": 87
},
{
"epoch": 0.0011698199740113858,
"grad_norm": 2.827481508255005,
"learning_rate": 9.512926421749304e-05,
"loss": 0.7734,
"step": 88
},
{
"epoch": 0.0011831133828069697,
"grad_norm": 3.1861209869384766,
"learning_rate": 9.498741420261108e-05,
"loss": 0.8429,
"step": 89
},
{
"epoch": 0.0011964067916025536,
"grad_norm": 3.1388401985168457,
"learning_rate": 9.484363707663442e-05,
"loss": 0.7359,
"step": 90
},
{
"epoch": 0.0012097002003981375,
"grad_norm": 3.5609400272369385,
"learning_rate": 9.469793899849661e-05,
"loss": 0.9125,
"step": 91
},
{
"epoch": 0.0012229936091937214,
"grad_norm": 3.0074970722198486,
"learning_rate": 9.45503262094184e-05,
"loss": 0.7249,
"step": 92
},
{
"epoch": 0.0012362870179893056,
"grad_norm": 4.342213153839111,
"learning_rate": 9.440080503264037e-05,
"loss": 0.7186,
"step": 93
},
{
"epoch": 0.0012495804267848895,
"grad_norm": 5.011289596557617,
"learning_rate": 9.42493818731521e-05,
"loss": 0.8043,
"step": 94
},
{
"epoch": 0.0012628738355804734,
"grad_norm": 5.687901020050049,
"learning_rate": 9.409606321741775e-05,
"loss": 1.1269,
"step": 95
},
{
"epoch": 0.0012761672443760573,
"grad_norm": 4.859226703643799,
"learning_rate": 9.394085563309827e-05,
"loss": 1.0209,
"step": 96
},
{
"epoch": 0.0012894606531716412,
"grad_norm": 4.058477401733398,
"learning_rate": 9.378376576876999e-05,
"loss": 0.8054,
"step": 97
},
{
"epoch": 0.0013027540619672251,
"grad_norm": 4.398575782775879,
"learning_rate": 9.362480035363986e-05,
"loss": 0.8996,
"step": 98
},
{
"epoch": 0.001316047470762809,
"grad_norm": 5.308574199676514,
"learning_rate": 9.34639661972572e-05,
"loss": 0.9358,
"step": 99
},
{
"epoch": 0.001329340879558393,
"grad_norm": 6.339052200317383,
"learning_rate": 9.330127018922194e-05,
"loss": 0.9692,
"step": 100
},
{
"epoch": 0.001329340879558393,
"eval_loss": 1.1524839401245117,
"eval_runtime": 6446.603,
"eval_samples_per_second": 19.653,
"eval_steps_per_second": 4.913,
"step": 100
},
{
"epoch": 0.0013426342883539769,
"grad_norm": 3.103745222091675,
"learning_rate": 9.31367192988896e-05,
"loss": 1.3258,
"step": 101
},
{
"epoch": 0.0013559276971495608,
"grad_norm": 2.7128050327301025,
"learning_rate": 9.297032057507264e-05,
"loss": 1.2396,
"step": 102
},
{
"epoch": 0.0013692211059451447,
"grad_norm": 2.0733771324157715,
"learning_rate": 9.280208114573859e-05,
"loss": 1.094,
"step": 103
},
{
"epoch": 0.0013825145147407286,
"grad_norm": 1.806241750717163,
"learning_rate": 9.263200821770461e-05,
"loss": 1.1404,
"step": 104
},
{
"epoch": 0.0013958079235363125,
"grad_norm": 1.4612520933151245,
"learning_rate": 9.246010907632895e-05,
"loss": 1.0117,
"step": 105
},
{
"epoch": 0.0014091013323318964,
"grad_norm": 1.496860384941101,
"learning_rate": 9.228639108519868e-05,
"loss": 0.9669,
"step": 106
},
{
"epoch": 0.0014223947411274806,
"grad_norm": 1.775729775428772,
"learning_rate": 9.211086168581433e-05,
"loss": 0.9885,
"step": 107
},
{
"epoch": 0.0014356881499230645,
"grad_norm": 1.6379244327545166,
"learning_rate": 9.193352839727121e-05,
"loss": 1.1367,
"step": 108
},
{
"epoch": 0.0014489815587186484,
"grad_norm": 2.074195384979248,
"learning_rate": 9.175439881593716e-05,
"loss": 0.9692,
"step": 109
},
{
"epoch": 0.0014622749675142323,
"grad_norm": 2.178331136703491,
"learning_rate": 9.157348061512727e-05,
"loss": 0.9866,
"step": 110
},
{
"epoch": 0.0014755683763098162,
"grad_norm": 2.0639569759368896,
"learning_rate": 9.139078154477512e-05,
"loss": 0.8893,
"step": 111
},
{
"epoch": 0.0014888617851054001,
"grad_norm": 1.9862120151519775,
"learning_rate": 9.120630943110077e-05,
"loss": 0.8218,
"step": 112
},
{
"epoch": 0.001502155193900984,
"grad_norm": 2.1280570030212402,
"learning_rate": 9.102007217627568e-05,
"loss": 0.9186,
"step": 113
},
{
"epoch": 0.001515448602696568,
"grad_norm": 2.001906156539917,
"learning_rate": 9.083207775808396e-05,
"loss": 0.9463,
"step": 114
},
{
"epoch": 0.0015287420114921519,
"grad_norm": 1.7380023002624512,
"learning_rate": 9.064233422958077e-05,
"loss": 1.1009,
"step": 115
},
{
"epoch": 0.0015420354202877358,
"grad_norm": 2.008915901184082,
"learning_rate": 9.045084971874738e-05,
"loss": 0.9508,
"step": 116
},
{
"epoch": 0.0015553288290833197,
"grad_norm": 2.4702091217041016,
"learning_rate": 9.025763242814291e-05,
"loss": 0.9842,
"step": 117
},
{
"epoch": 0.0015686222378789036,
"grad_norm": 1.9039493799209595,
"learning_rate": 9.006269063455304e-05,
"loss": 0.8789,
"step": 118
},
{
"epoch": 0.0015819156466744875,
"grad_norm": 2.197869300842285,
"learning_rate": 8.986603268863536e-05,
"loss": 0.9725,
"step": 119
},
{
"epoch": 0.0015952090554700715,
"grad_norm": 2.327695369720459,
"learning_rate": 8.966766701456177e-05,
"loss": 0.8861,
"step": 120
},
{
"epoch": 0.0016085024642656556,
"grad_norm": 2.6188266277313232,
"learning_rate": 8.94676021096575e-05,
"loss": 0.8315,
"step": 121
},
{
"epoch": 0.0016217958730612395,
"grad_norm": 2.575977087020874,
"learning_rate": 8.926584654403724e-05,
"loss": 0.8774,
"step": 122
},
{
"epoch": 0.0016350892818568234,
"grad_norm": 2.5948164463043213,
"learning_rate": 8.906240896023794e-05,
"loss": 0.9388,
"step": 123
},
{
"epoch": 0.0016483826906524073,
"grad_norm": 2.5215580463409424,
"learning_rate": 8.885729807284856e-05,
"loss": 0.9225,
"step": 124
},
{
"epoch": 0.0016616760994479912,
"grad_norm": 2.7977840900421143,
"learning_rate": 8.865052266813685e-05,
"loss": 0.9,
"step": 125
},
{
"epoch": 0.0016749695082435752,
"grad_norm": 2.6751739978790283,
"learning_rate": 8.844209160367299e-05,
"loss": 0.9199,
"step": 126
},
{
"epoch": 0.001688262917039159,
"grad_norm": 2.8910608291625977,
"learning_rate": 8.823201380795001e-05,
"loss": 0.8998,
"step": 127
},
{
"epoch": 0.001701556325834743,
"grad_norm": 2.7337453365325928,
"learning_rate": 8.802029828000156e-05,
"loss": 0.8704,
"step": 128
},
{
"epoch": 0.001714849734630327,
"grad_norm": 2.6069486141204834,
"learning_rate": 8.780695408901613e-05,
"loss": 1.0066,
"step": 129
},
{
"epoch": 0.0017281431434259108,
"grad_norm": 2.8615970611572266,
"learning_rate": 8.759199037394887e-05,
"loss": 0.9266,
"step": 130
},
{
"epoch": 0.0017414365522214947,
"grad_norm": 2.9473369121551514,
"learning_rate": 8.737541634312985e-05,
"loss": 0.9654,
"step": 131
},
{
"epoch": 0.0017547299610170786,
"grad_norm": 2.752246141433716,
"learning_rate": 8.715724127386972e-05,
"loss": 0.9983,
"step": 132
},
{
"epoch": 0.0017680233698126626,
"grad_norm": 2.2985565662384033,
"learning_rate": 8.693747451206232e-05,
"loss": 0.9895,
"step": 133
},
{
"epoch": 0.0017813167786082465,
"grad_norm": 2.8219075202941895,
"learning_rate": 8.671612547178428e-05,
"loss": 1.0577,
"step": 134
},
{
"epoch": 0.0017946101874038306,
"grad_norm": 2.7868380546569824,
"learning_rate": 8.649320363489179e-05,
"loss": 0.8642,
"step": 135
},
{
"epoch": 0.0018079035961994145,
"grad_norm": 2.816678762435913,
"learning_rate": 8.626871855061438e-05,
"loss": 0.8492,
"step": 136
},
{
"epoch": 0.0018211970049949984,
"grad_norm": 3.1291842460632324,
"learning_rate": 8.604267983514594e-05,
"loss": 0.949,
"step": 137
},
{
"epoch": 0.0018344904137905823,
"grad_norm": 2.592275857925415,
"learning_rate": 8.581509717123273e-05,
"loss": 0.864,
"step": 138
},
{
"epoch": 0.0018477838225861663,
"grad_norm": 3.2860209941864014,
"learning_rate": 8.558598030775857e-05,
"loss": 1.0107,
"step": 139
},
{
"epoch": 0.0018610772313817502,
"grad_norm": 4.629628658294678,
"learning_rate": 8.535533905932738e-05,
"loss": 0.9317,
"step": 140
},
{
"epoch": 0.001874370640177334,
"grad_norm": 3.7329866886138916,
"learning_rate": 8.51231833058426e-05,
"loss": 0.9857,
"step": 141
},
{
"epoch": 0.001887664048972918,
"grad_norm": 2.920423746109009,
"learning_rate": 8.488952299208401e-05,
"loss": 0.8091,
"step": 142
},
{
"epoch": 0.001900957457768502,
"grad_norm": 3.3774452209472656,
"learning_rate": 8.46543681272818e-05,
"loss": 0.9237,
"step": 143
},
{
"epoch": 0.0019142508665640858,
"grad_norm": 3.698071002960205,
"learning_rate": 8.44177287846877e-05,
"loss": 0.9587,
"step": 144
},
{
"epoch": 0.0019275442753596697,
"grad_norm": 4.451910972595215,
"learning_rate": 8.417961510114356e-05,
"loss": 0.8888,
"step": 145
},
{
"epoch": 0.0019408376841552537,
"grad_norm": 4.134186267852783,
"learning_rate": 8.39400372766471e-05,
"loss": 0.8612,
"step": 146
},
{
"epoch": 0.0019541310929508376,
"grad_norm": 3.487985610961914,
"learning_rate": 8.36990055739149e-05,
"loss": 0.8982,
"step": 147
},
{
"epoch": 0.0019674245017464215,
"grad_norm": 4.111109733581543,
"learning_rate": 8.345653031794292e-05,
"loss": 0.901,
"step": 148
},
{
"epoch": 0.0019807179105420054,
"grad_norm": 6.2020344734191895,
"learning_rate": 8.321262189556409e-05,
"loss": 1.1266,
"step": 149
},
{
"epoch": 0.0019940113193375893,
"grad_norm": 7.866227149963379,
"learning_rate": 8.296729075500344e-05,
"loss": 0.8518,
"step": 150
},
{
"epoch": 0.0020073047281331732,
"grad_norm": 2.092473030090332,
"learning_rate": 8.272054740543052e-05,
"loss": 1.2409,
"step": 151
},
{
"epoch": 0.002020598136928757,
"grad_norm": 2.1192219257354736,
"learning_rate": 8.247240241650918e-05,
"loss": 1.0041,
"step": 152
},
{
"epoch": 0.002033891545724341,
"grad_norm": 1.6776955127716064,
"learning_rate": 8.222286641794488e-05,
"loss": 1.0113,
"step": 153
},
{
"epoch": 0.002047184954519925,
"grad_norm": 1.6508393287658691,
"learning_rate": 8.197195009902924e-05,
"loss": 0.9146,
"step": 154
},
{
"epoch": 0.002060478363315509,
"grad_norm": 1.587715744972229,
"learning_rate": 8.171966420818228e-05,
"loss": 0.9908,
"step": 155
},
{
"epoch": 0.002073771772111093,
"grad_norm": 1.6308236122131348,
"learning_rate": 8.146601955249188e-05,
"loss": 0.8901,
"step": 156
},
{
"epoch": 0.002087065180906677,
"grad_norm": 1.8169214725494385,
"learning_rate": 8.121102699725089e-05,
"loss": 0.9361,
"step": 157
},
{
"epoch": 0.002100358589702261,
"grad_norm": 2.603496551513672,
"learning_rate": 8.095469746549172e-05,
"loss": 0.9172,
"step": 158
},
{
"epoch": 0.002113651998497845,
"grad_norm": 1.6392323970794678,
"learning_rate": 8.069704193751832e-05,
"loss": 1.1013,
"step": 159
},
{
"epoch": 0.002126945407293429,
"grad_norm": 1.9545910358428955,
"learning_rate": 8.043807145043604e-05,
"loss": 1.0921,
"step": 160
},
{
"epoch": 0.002140238816089013,
"grad_norm": 2.238255023956299,
"learning_rate": 8.017779709767858e-05,
"loss": 1.0125,
"step": 161
},
{
"epoch": 0.0021535322248845967,
"grad_norm": 1.8243411779403687,
"learning_rate": 7.991623002853296e-05,
"loss": 0.8934,
"step": 162
},
{
"epoch": 0.0021668256336801806,
"grad_norm": 2.3958449363708496,
"learning_rate": 7.965338144766186e-05,
"loss": 0.965,
"step": 163
},
{
"epoch": 0.0021801190424757646,
"grad_norm": 1.8638349771499634,
"learning_rate": 7.938926261462366e-05,
"loss": 1.0105,
"step": 164
},
{
"epoch": 0.0021934124512713485,
"grad_norm": 1.7631226778030396,
"learning_rate": 7.912388484339012e-05,
"loss": 0.8695,
"step": 165
},
{
"epoch": 0.0022067058600669324,
"grad_norm": 2.349992036819458,
"learning_rate": 7.88572595018617e-05,
"loss": 0.892,
"step": 166
},
{
"epoch": 0.0022199992688625163,
"grad_norm": 2.006511926651001,
"learning_rate": 7.858939801138061e-05,
"loss": 1.0383,
"step": 167
},
{
"epoch": 0.0022332926776581,
"grad_norm": 1.9308192729949951,
"learning_rate": 7.832031184624164e-05,
"loss": 0.9448,
"step": 168
},
{
"epoch": 0.002246586086453684,
"grad_norm": 1.9999449253082275,
"learning_rate": 7.80500125332005e-05,
"loss": 1.0001,
"step": 169
},
{
"epoch": 0.002259879495249268,
"grad_norm": 1.9072273969650269,
"learning_rate": 7.777851165098012e-05,
"loss": 1.0205,
"step": 170
},
{
"epoch": 0.002273172904044852,
"grad_norm": 2.0177414417266846,
"learning_rate": 7.750582082977467e-05,
"loss": 0.9317,
"step": 171
},
{
"epoch": 0.002286466312840436,
"grad_norm": 2.2211759090423584,
"learning_rate": 7.723195175075136e-05,
"loss": 0.9795,
"step": 172
},
{
"epoch": 0.0022997597216360198,
"grad_norm": 2.068176746368408,
"learning_rate": 7.695691614555003e-05,
"loss": 0.8416,
"step": 173
},
{
"epoch": 0.0023130531304316037,
"grad_norm": 2.195427179336548,
"learning_rate": 7.668072579578058e-05,
"loss": 1.0732,
"step": 174
},
{
"epoch": 0.0023263465392271876,
"grad_norm": 1.893680453300476,
"learning_rate": 7.64033925325184e-05,
"loss": 0.7674,
"step": 175
},
{
"epoch": 0.0023396399480227715,
"grad_norm": 2.43972110748291,
"learning_rate": 7.612492823579745e-05,
"loss": 0.8503,
"step": 176
},
{
"epoch": 0.0023529333568183554,
"grad_norm": 2.2311770915985107,
"learning_rate": 7.584534483410137e-05,
"loss": 0.8512,
"step": 177
},
{
"epoch": 0.0023662267656139394,
"grad_norm": 1.9317328929901123,
"learning_rate": 7.55646543038526e-05,
"loss": 0.8084,
"step": 178
},
{
"epoch": 0.0023795201744095233,
"grad_norm": 2.440880298614502,
"learning_rate": 7.528286866889924e-05,
"loss": 0.8489,
"step": 179
},
{
"epoch": 0.002392813583205107,
"grad_norm": 2.4223392009735107,
"learning_rate": 7.500000000000001e-05,
"loss": 0.774,
"step": 180
},
{
"epoch": 0.002406106992000691,
"grad_norm": 2.648416519165039,
"learning_rate": 7.471606041430723e-05,
"loss": 0.8745,
"step": 181
},
{
"epoch": 0.002419400400796275,
"grad_norm": 2.996626138687134,
"learning_rate": 7.443106207484776e-05,
"loss": 0.9904,
"step": 182
},
{
"epoch": 0.002432693809591859,
"grad_norm": 2.850503921508789,
"learning_rate": 7.414501719000187e-05,
"loss": 0.7681,
"step": 183
},
{
"epoch": 0.002445987218387443,
"grad_norm": 2.932617664337158,
"learning_rate": 7.385793801298042e-05,
"loss": 0.9635,
"step": 184
},
{
"epoch": 0.002459280627183027,
"grad_norm": 3.0173890590667725,
"learning_rate": 7.35698368412999e-05,
"loss": 0.8415,
"step": 185
},
{
"epoch": 0.002472574035978611,
"grad_norm": 2.9359889030456543,
"learning_rate": 7.328072601625557e-05,
"loss": 0.9175,
"step": 186
},
{
"epoch": 0.002485867444774195,
"grad_norm": 3.4823861122131348,
"learning_rate": 7.2990617922393e-05,
"loss": 0.8688,
"step": 187
},
{
"epoch": 0.002499160853569779,
"grad_norm": 3.141000270843506,
"learning_rate": 7.269952498697734e-05,
"loss": 0.8778,
"step": 188
},
{
"epoch": 0.002512454262365363,
"grad_norm": 3.1278038024902344,
"learning_rate": 7.240745967946113e-05,
"loss": 0.9589,
"step": 189
},
{
"epoch": 0.0025257476711609468,
"grad_norm": 3.5584897994995117,
"learning_rate": 7.211443451095007e-05,
"loss": 0.9538,
"step": 190
},
{
"epoch": 0.0025390410799565307,
"grad_norm": 2.9297611713409424,
"learning_rate": 7.18204620336671e-05,
"loss": 0.6884,
"step": 191
},
{
"epoch": 0.0025523344887521146,
"grad_norm": 2.969492197036743,
"learning_rate": 7.152555484041476e-05,
"loss": 0.9301,
"step": 192
},
{
"epoch": 0.0025656278975476985,
"grad_norm": 3.1263134479522705,
"learning_rate": 7.122972556403567e-05,
"loss": 0.8869,
"step": 193
},
{
"epoch": 0.0025789213063432824,
"grad_norm": 3.8709805011749268,
"learning_rate": 7.09329868768714e-05,
"loss": 0.8329,
"step": 194
},
{
"epoch": 0.0025922147151388663,
"grad_norm": 3.882387399673462,
"learning_rate": 7.063535149021973e-05,
"loss": 0.8367,
"step": 195
},
{
"epoch": 0.0026055081239344502,
"grad_norm": 3.4667751789093018,
"learning_rate": 7.033683215379002e-05,
"loss": 0.9047,
"step": 196
},
{
"epoch": 0.002618801532730034,
"grad_norm": 3.365933895111084,
"learning_rate": 7.003744165515705e-05,
"loss": 0.7373,
"step": 197
},
{
"epoch": 0.002632094941525618,
"grad_norm": 3.5493037700653076,
"learning_rate": 6.973719281921335e-05,
"loss": 0.8867,
"step": 198
},
{
"epoch": 0.002645388350321202,
"grad_norm": 3.7933878898620605,
"learning_rate": 6.943609850761979e-05,
"loss": 0.902,
"step": 199
},
{
"epoch": 0.002658681759116786,
"grad_norm": 6.037397384643555,
"learning_rate": 6.91341716182545e-05,
"loss": 0.9443,
"step": 200
},
{
"epoch": 0.002658681759116786,
"eval_loss": 0.9358128905296326,
"eval_runtime": 6447.2498,
"eval_samples_per_second": 19.651,
"eval_steps_per_second": 4.913,
"step": 200
},
{
"epoch": 0.00267197516791237,
"grad_norm": 1.469771385192871,
"learning_rate": 6.883142508466054e-05,
"loss": 1.1812,
"step": 201
},
{
"epoch": 0.0026852685767079537,
"grad_norm": 1.9593838453292847,
"learning_rate": 6.852787187549182e-05,
"loss": 0.9595,
"step": 202
},
{
"epoch": 0.0026985619855035376,
"grad_norm": 1.881966471672058,
"learning_rate": 6.82235249939575e-05,
"loss": 1.0891,
"step": 203
},
{
"epoch": 0.0027118553942991216,
"grad_norm": 1.5613300800323486,
"learning_rate": 6.7918397477265e-05,
"loss": 1.1484,
"step": 204
},
{
"epoch": 0.0027251488030947055,
"grad_norm": 1.6164745092391968,
"learning_rate": 6.761250239606169e-05,
"loss": 0.8214,
"step": 205
},
{
"epoch": 0.0027384422118902894,
"grad_norm": 1.4947420358657837,
"learning_rate": 6.730585285387465e-05,
"loss": 0.9956,
"step": 206
},
{
"epoch": 0.0027517356206858733,
"grad_norm": 1.6902318000793457,
"learning_rate": 6.699846198654971e-05,
"loss": 0.8679,
"step": 207
},
{
"epoch": 0.0027650290294814572,
"grad_norm": 1.6245845556259155,
"learning_rate": 6.669034296168855e-05,
"loss": 0.9362,
"step": 208
},
{
"epoch": 0.002778322438277041,
"grad_norm": 1.687902569770813,
"learning_rate": 6.638150897808468e-05,
"loss": 0.9086,
"step": 209
},
{
"epoch": 0.002791615847072625,
"grad_norm": 1.6738238334655762,
"learning_rate": 6.607197326515808e-05,
"loss": 1.0715,
"step": 210
},
{
"epoch": 0.002804909255868209,
"grad_norm": 1.8981541395187378,
"learning_rate": 6.57617490823885e-05,
"loss": 1.0964,
"step": 211
},
{
"epoch": 0.002818202664663793,
"grad_norm": 1.994768500328064,
"learning_rate": 6.545084971874738e-05,
"loss": 1.0326,
"step": 212
},
{
"epoch": 0.002831496073459377,
"grad_norm": 1.953397274017334,
"learning_rate": 6.513928849212873e-05,
"loss": 0.9181,
"step": 213
},
{
"epoch": 0.002844789482254961,
"grad_norm": 1.5838758945465088,
"learning_rate": 6.482707874877854e-05,
"loss": 1.0109,
"step": 214
},
{
"epoch": 0.002858082891050545,
"grad_norm": 2.204489231109619,
"learning_rate": 6.451423386272312e-05,
"loss": 0.9583,
"step": 215
},
{
"epoch": 0.002871376299846129,
"grad_norm": 1.679416537284851,
"learning_rate": 6.420076723519614e-05,
"loss": 1.0152,
"step": 216
},
{
"epoch": 0.002884669708641713,
"grad_norm": 2.062034845352173,
"learning_rate": 6.388669229406462e-05,
"loss": 0.9058,
"step": 217
},
{
"epoch": 0.002897963117437297,
"grad_norm": 1.9006659984588623,
"learning_rate": 6.357202249325371e-05,
"loss": 0.8065,
"step": 218
},
{
"epoch": 0.0029112565262328807,
"grad_norm": 1.9091770648956299,
"learning_rate": 6.32567713121704e-05,
"loss": 0.8315,
"step": 219
},
{
"epoch": 0.0029245499350284646,
"grad_norm": 2.309170722961426,
"learning_rate": 6.294095225512603e-05,
"loss": 0.9633,
"step": 220
},
{
"epoch": 0.0029378433438240485,
"grad_norm": 2.036591053009033,
"learning_rate": 6.26245788507579e-05,
"loss": 0.9114,
"step": 221
},
{
"epoch": 0.0029511367526196325,
"grad_norm": 2.540407419204712,
"learning_rate": 6.230766465144967e-05,
"loss": 0.8928,
"step": 222
},
{
"epoch": 0.0029644301614152164,
"grad_norm": 2.447540044784546,
"learning_rate": 6.199022323275083e-05,
"loss": 0.8958,
"step": 223
},
{
"epoch": 0.0029777235702108003,
"grad_norm": 2.1421115398406982,
"learning_rate": 6.167226819279528e-05,
"loss": 0.9212,
"step": 224
},
{
"epoch": 0.002991016979006384,
"grad_norm": 2.2318172454833984,
"learning_rate": 6.135381315171867e-05,
"loss": 0.7849,
"step": 225
},
{
"epoch": 0.003004310387801968,
"grad_norm": 2.107386350631714,
"learning_rate": 6.103487175107507e-05,
"loss": 0.8173,
"step": 226
},
{
"epoch": 0.003017603796597552,
"grad_norm": 2.08105206489563,
"learning_rate": 6.071545765325254e-05,
"loss": 0.9439,
"step": 227
},
{
"epoch": 0.003030897205393136,
"grad_norm": 2.376014471054077,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.9148,
"step": 228
},
{
"epoch": 0.00304419061418872,
"grad_norm": 2.1427810192108154,
"learning_rate": 6.007526611628086e-05,
"loss": 0.8044,
"step": 229
},
{
"epoch": 0.0030574840229843038,
"grad_norm": 2.6007277965545654,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.8667,
"step": 230
},
{
"epoch": 0.0030707774317798877,
"grad_norm": 2.6047568321228027,
"learning_rate": 5.9433348234327765e-05,
"loss": 0.8761,
"step": 231
},
{
"epoch": 0.0030840708405754716,
"grad_norm": 2.6537301540374756,
"learning_rate": 5.911177627460739e-05,
"loss": 1.0008,
"step": 232
},
{
"epoch": 0.0030973642493710555,
"grad_norm": 2.5320510864257812,
"learning_rate": 5.8789813996717736e-05,
"loss": 0.9914,
"step": 233
},
{
"epoch": 0.0031106576581666394,
"grad_norm": 2.1913628578186035,
"learning_rate": 5.8467475192451226e-05,
"loss": 0.8323,
"step": 234
},
{
"epoch": 0.0031239510669622233,
"grad_norm": 2.716449737548828,
"learning_rate": 5.814477366972945e-05,
"loss": 0.885,
"step": 235
},
{
"epoch": 0.0031372444757578073,
"grad_norm": 3.720883846282959,
"learning_rate": 5.782172325201155e-05,
"loss": 1.0299,
"step": 236
},
{
"epoch": 0.003150537884553391,
"grad_norm": 2.6181509494781494,
"learning_rate": 5.749833777770225e-05,
"loss": 0.8591,
"step": 237
},
{
"epoch": 0.003163831293348975,
"grad_norm": 2.382664918899536,
"learning_rate": 5.717463109955896e-05,
"loss": 0.8851,
"step": 238
},
{
"epoch": 0.003177124702144559,
"grad_norm": 2.3294265270233154,
"learning_rate": 5.685061708409841e-05,
"loss": 0.7961,
"step": 239
},
{
"epoch": 0.003190418110940143,
"grad_norm": 3.5375571250915527,
"learning_rate": 5.6526309611002594e-05,
"loss": 0.9115,
"step": 240
},
{
"epoch": 0.003203711519735727,
"grad_norm": 2.9233639240264893,
"learning_rate": 5.6201722572524275e-05,
"loss": 0.7504,
"step": 241
},
{
"epoch": 0.003217004928531311,
"grad_norm": 3.2264790534973145,
"learning_rate": 5.587686987289189e-05,
"loss": 0.7414,
"step": 242
},
{
"epoch": 0.003230298337326895,
"grad_norm": 2.921107530593872,
"learning_rate": 5.5551765427713884e-05,
"loss": 0.5761,
"step": 243
},
{
"epoch": 0.003243591746122479,
"grad_norm": 3.3718862533569336,
"learning_rate": 5.522642316338268e-05,
"loss": 0.8256,
"step": 244
},
{
"epoch": 0.003256885154918063,
"grad_norm": 2.911158800125122,
"learning_rate": 5.490085701647805e-05,
"loss": 0.7299,
"step": 245
},
{
"epoch": 0.003270178563713647,
"grad_norm": 3.8199920654296875,
"learning_rate": 5.457508093317013e-05,
"loss": 0.7782,
"step": 246
},
{
"epoch": 0.0032834719725092307,
"grad_norm": 3.6611592769622803,
"learning_rate": 5.4249108868622086e-05,
"loss": 0.7346,
"step": 247
},
{
"epoch": 0.0032967653813048147,
"grad_norm": 3.3983826637268066,
"learning_rate": 5.392295478639225e-05,
"loss": 0.68,
"step": 248
},
{
"epoch": 0.0033100587901003986,
"grad_norm": 4.487311840057373,
"learning_rate": 5.359663265783598e-05,
"loss": 0.7677,
"step": 249
},
{
"epoch": 0.0033233521988959825,
"grad_norm": 5.948139667510986,
"learning_rate": 5.327015646150716e-05,
"loss": 0.7702,
"step": 250
},
{
"epoch": 0.0033366456076915664,
"grad_norm": 1.1044285297393799,
"learning_rate": 5.294354018255945e-05,
"loss": 0.9482,
"step": 251
},
{
"epoch": 0.0033499390164871503,
"grad_norm": 1.6621804237365723,
"learning_rate": 5.26167978121472e-05,
"loss": 0.8978,
"step": 252
},
{
"epoch": 0.0033632324252827342,
"grad_norm": 1.7893524169921875,
"learning_rate": 5.228994334682604e-05,
"loss": 0.9667,
"step": 253
},
{
"epoch": 0.003376525834078318,
"grad_norm": 1.6742503643035889,
"learning_rate": 5.196299078795344e-05,
"loss": 0.8508,
"step": 254
},
{
"epoch": 0.003389819242873902,
"grad_norm": 1.6336678266525269,
"learning_rate": 5.1635954141088813e-05,
"loss": 1.0183,
"step": 255
},
{
"epoch": 0.003403112651669486,
"grad_norm": 1.4634300470352173,
"learning_rate": 5.1308847415393666e-05,
"loss": 0.8325,
"step": 256
},
{
"epoch": 0.00341640606046507,
"grad_norm": 1.8752628564834595,
"learning_rate": 5.0981684623031415e-05,
"loss": 0.8682,
"step": 257
},
{
"epoch": 0.003429699469260654,
"grad_norm": 1.516921043395996,
"learning_rate": 5.0654479778567223e-05,
"loss": 0.87,
"step": 258
},
{
"epoch": 0.0034429928780562377,
"grad_norm": 1.953859567642212,
"learning_rate": 5.0327246898367597e-05,
"loss": 1.0406,
"step": 259
},
{
"epoch": 0.0034562862868518216,
"grad_norm": 1.692104697227478,
"learning_rate": 5e-05,
"loss": 1.0228,
"step": 260
},
{
"epoch": 0.0034695796956474055,
"grad_norm": 1.6310532093048096,
"learning_rate": 4.9672753101632415e-05,
"loss": 1.0342,
"step": 261
},
{
"epoch": 0.0034828731044429895,
"grad_norm": 1.8367486000061035,
"learning_rate": 4.934552022143279e-05,
"loss": 0.8968,
"step": 262
},
{
"epoch": 0.0034961665132385734,
"grad_norm": 1.6751683950424194,
"learning_rate": 4.901831537696859e-05,
"loss": 0.9189,
"step": 263
},
{
"epoch": 0.0035094599220341573,
"grad_norm": 1.8059558868408203,
"learning_rate": 4.869115258460635e-05,
"loss": 0.9239,
"step": 264
},
{
"epoch": 0.003522753330829741,
"grad_norm": 1.7002429962158203,
"learning_rate": 4.83640458589112e-05,
"loss": 0.8102,
"step": 265
},
{
"epoch": 0.003536046739625325,
"grad_norm": 1.738466501235962,
"learning_rate": 4.8037009212046586e-05,
"loss": 1.0022,
"step": 266
},
{
"epoch": 0.003549340148420909,
"grad_norm": 2.1397879123687744,
"learning_rate": 4.7710056653173976e-05,
"loss": 0.8882,
"step": 267
},
{
"epoch": 0.003562633557216493,
"grad_norm": 2.1561543941497803,
"learning_rate": 4.738320218785281e-05,
"loss": 0.9589,
"step": 268
},
{
"epoch": 0.003575926966012077,
"grad_norm": 2.116396427154541,
"learning_rate": 4.7056459817440544e-05,
"loss": 0.8827,
"step": 269
},
{
"epoch": 0.003589220374807661,
"grad_norm": 2.172356367111206,
"learning_rate": 4.6729843538492847e-05,
"loss": 0.9613,
"step": 270
},
{
"epoch": 0.003602513783603245,
"grad_norm": 1.7960728406906128,
"learning_rate": 4.640336734216403e-05,
"loss": 0.8781,
"step": 271
},
{
"epoch": 0.003615807192398829,
"grad_norm": 1.8511698246002197,
"learning_rate": 4.607704521360776e-05,
"loss": 0.8647,
"step": 272
},
{
"epoch": 0.003629100601194413,
"grad_norm": 2.0229597091674805,
"learning_rate": 4.575089113137792e-05,
"loss": 1.004,
"step": 273
},
{
"epoch": 0.003642394009989997,
"grad_norm": 2.3480939865112305,
"learning_rate": 4.542491906682989e-05,
"loss": 1.0119,
"step": 274
},
{
"epoch": 0.0036556874187855808,
"grad_norm": 1.9140769243240356,
"learning_rate": 4.509914298352197e-05,
"loss": 0.8089,
"step": 275
},
{
"epoch": 0.0036689808275811647,
"grad_norm": 1.9026778936386108,
"learning_rate": 4.477357683661734e-05,
"loss": 0.7569,
"step": 276
},
{
"epoch": 0.0036822742363767486,
"grad_norm": 2.2026915550231934,
"learning_rate": 4.444823457228612e-05,
"loss": 0.8855,
"step": 277
},
{
"epoch": 0.0036955676451723325,
"grad_norm": 2.2045536041259766,
"learning_rate": 4.412313012710813e-05,
"loss": 0.8306,
"step": 278
},
{
"epoch": 0.0037088610539679164,
"grad_norm": 2.3194525241851807,
"learning_rate": 4.379827742747575e-05,
"loss": 0.8154,
"step": 279
},
{
"epoch": 0.0037221544627635004,
"grad_norm": 2.6129322052001953,
"learning_rate": 4.347369038899744e-05,
"loss": 0.9187,
"step": 280
},
{
"epoch": 0.0037354478715590843,
"grad_norm": 2.2725565433502197,
"learning_rate": 4.3149382915901606e-05,
"loss": 0.793,
"step": 281
},
{
"epoch": 0.003748741280354668,
"grad_norm": 2.23557186126709,
"learning_rate": 4.282536890044104e-05,
"loss": 0.8578,
"step": 282
},
{
"epoch": 0.003762034689150252,
"grad_norm": 2.394350290298462,
"learning_rate": 4.250166222229774e-05,
"loss": 0.8555,
"step": 283
},
{
"epoch": 0.003775328097945836,
"grad_norm": 2.935340642929077,
"learning_rate": 4.2178276747988446e-05,
"loss": 0.852,
"step": 284
},
{
"epoch": 0.00378862150674142,
"grad_norm": 3.061005115509033,
"learning_rate": 4.185522633027057e-05,
"loss": 0.8782,
"step": 285
},
{
"epoch": 0.003801914915537004,
"grad_norm": 2.7278201580047607,
"learning_rate": 4.153252480754877e-05,
"loss": 0.8451,
"step": 286
},
{
"epoch": 0.0038152083243325878,
"grad_norm": 2.643934488296509,
"learning_rate": 4.1210186003282275e-05,
"loss": 0.7744,
"step": 287
},
{
"epoch": 0.0038285017331281717,
"grad_norm": 3.218041181564331,
"learning_rate": 4.088822372539263e-05,
"loss": 0.8261,
"step": 288
},
{
"epoch": 0.0038417951419237556,
"grad_norm": 2.650805950164795,
"learning_rate": 4.0566651765672246e-05,
"loss": 0.8542,
"step": 289
},
{
"epoch": 0.0038550885507193395,
"grad_norm": 2.679455041885376,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.8374,
"step": 290
},
{
"epoch": 0.0038683819595149234,
"grad_norm": 3.0015034675598145,
"learning_rate": 3.992473388371915e-05,
"loss": 0.9829,
"step": 291
},
{
"epoch": 0.0038816753683105073,
"grad_norm": 3.549671173095703,
"learning_rate": 3.960441545911204e-05,
"loss": 0.8356,
"step": 292
},
{
"epoch": 0.0038949687771060912,
"grad_norm": 3.1017885208129883,
"learning_rate": 3.928454234674747e-05,
"loss": 0.8398,
"step": 293
},
{
"epoch": 0.003908262185901675,
"grad_norm": 3.0585556030273438,
"learning_rate": 3.896512824892495e-05,
"loss": 0.7647,
"step": 294
},
{
"epoch": 0.0039215555946972595,
"grad_norm": 3.385533571243286,
"learning_rate": 3.864618684828134e-05,
"loss": 0.6782,
"step": 295
},
{
"epoch": 0.003934849003492843,
"grad_norm": 5.169503211975098,
"learning_rate": 3.832773180720475e-05,
"loss": 0.7389,
"step": 296
},
{
"epoch": 0.003948142412288427,
"grad_norm": 2.9151158332824707,
"learning_rate": 3.800977676724919e-05,
"loss": 0.6878,
"step": 297
},
{
"epoch": 0.003961435821084011,
"grad_norm": 3.42378568649292,
"learning_rate": 3.769233534855035e-05,
"loss": 0.6473,
"step": 298
},
{
"epoch": 0.003974729229879595,
"grad_norm": 4.071504592895508,
"learning_rate": 3.73754211492421e-05,
"loss": 0.8104,
"step": 299
},
{
"epoch": 0.003988022638675179,
"grad_norm": 5.344113349914551,
"learning_rate": 3.705904774487396e-05,
"loss": 0.9578,
"step": 300
},
{
"epoch": 0.003988022638675179,
"eval_loss": 0.8697348237037659,
"eval_runtime": 6447.6628,
"eval_samples_per_second": 19.65,
"eval_steps_per_second": 4.912,
"step": 300
},
{
"epoch": 0.004001316047470763,
"grad_norm": 0.9838849902153015,
"learning_rate": 3.6743228687829595e-05,
"loss": 0.9883,
"step": 301
},
{
"epoch": 0.0040146094562663465,
"grad_norm": 1.8244836330413818,
"learning_rate": 3.642797750674629e-05,
"loss": 1.0104,
"step": 302
},
{
"epoch": 0.004027902865061931,
"grad_norm": 1.7432421445846558,
"learning_rate": 3.6113307705935396e-05,
"loss": 1.0969,
"step": 303
},
{
"epoch": 0.004041196273857514,
"grad_norm": 1.5802562236785889,
"learning_rate": 3.579923276480387e-05,
"loss": 0.9227,
"step": 304
},
{
"epoch": 0.004054489682653099,
"grad_norm": 1.689244270324707,
"learning_rate": 3.5485766137276894e-05,
"loss": 0.8085,
"step": 305
},
{
"epoch": 0.004067783091448682,
"grad_norm": 1.4541411399841309,
"learning_rate": 3.5172921251221455e-05,
"loss": 1.0265,
"step": 306
},
{
"epoch": 0.0040810765002442665,
"grad_norm": 1.967889666557312,
"learning_rate": 3.486071150787128e-05,
"loss": 0.9648,
"step": 307
},
{
"epoch": 0.00409436990903985,
"grad_norm": 1.8028714656829834,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.1209,
"step": 308
},
{
"epoch": 0.004107663317835434,
"grad_norm": 2.3967385292053223,
"learning_rate": 3.423825091761153e-05,
"loss": 0.9468,
"step": 309
},
{
"epoch": 0.004120956726631018,
"grad_norm": 1.7051564455032349,
"learning_rate": 3.392802673484193e-05,
"loss": 0.8184,
"step": 310
},
{
"epoch": 0.004134250135426602,
"grad_norm": 1.4805026054382324,
"learning_rate": 3.361849102191533e-05,
"loss": 1.0213,
"step": 311
},
{
"epoch": 0.004147543544222186,
"grad_norm": 1.5888340473175049,
"learning_rate": 3.330965703831146e-05,
"loss": 0.9103,
"step": 312
},
{
"epoch": 0.00416083695301777,
"grad_norm": 1.7905009984970093,
"learning_rate": 3.300153801345028e-05,
"loss": 0.9134,
"step": 313
},
{
"epoch": 0.004174130361813354,
"grad_norm": 1.981239914894104,
"learning_rate": 3.2694147146125345e-05,
"loss": 0.9723,
"step": 314
},
{
"epoch": 0.004187423770608938,
"grad_norm": 1.7049620151519775,
"learning_rate": 3.2387497603938326e-05,
"loss": 1.0175,
"step": 315
},
{
"epoch": 0.004200717179404522,
"grad_norm": 1.575150966644287,
"learning_rate": 3.2081602522734986e-05,
"loss": 0.8881,
"step": 316
},
{
"epoch": 0.004214010588200106,
"grad_norm": 1.5370079278945923,
"learning_rate": 3.177647500604252e-05,
"loss": 0.9056,
"step": 317
},
{
"epoch": 0.00422730399699569,
"grad_norm": 1.9242894649505615,
"learning_rate": 3.147212812450819e-05,
"loss": 0.8755,
"step": 318
},
{
"epoch": 0.0042405974057912734,
"grad_norm": 2.297606945037842,
"learning_rate": 3.116857491533947e-05,
"loss": 0.9929,
"step": 319
},
{
"epoch": 0.004253890814586858,
"grad_norm": 1.8832775354385376,
"learning_rate": 3.086582838174551e-05,
"loss": 0.852,
"step": 320
},
{
"epoch": 0.004267184223382441,
"grad_norm": 1.8884977102279663,
"learning_rate": 3.056390149238022e-05,
"loss": 0.9385,
"step": 321
},
{
"epoch": 0.004280477632178026,
"grad_norm": 2.0682528018951416,
"learning_rate": 3.0262807180786647e-05,
"loss": 0.8438,
"step": 322
},
{
"epoch": 0.004293771040973609,
"grad_norm": 2.1671676635742188,
"learning_rate": 2.996255834484296e-05,
"loss": 0.7709,
"step": 323
},
{
"epoch": 0.0043070644497691934,
"grad_norm": 2.2052109241485596,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.8592,
"step": 324
},
{
"epoch": 0.004320357858564777,
"grad_norm": 1.8438620567321777,
"learning_rate": 2.936464850978027e-05,
"loss": 0.752,
"step": 325
},
{
"epoch": 0.004333651267360361,
"grad_norm": 1.9005708694458008,
"learning_rate": 2.9067013123128613e-05,
"loss": 0.7647,
"step": 326
},
{
"epoch": 0.004346944676155945,
"grad_norm": 2.098482847213745,
"learning_rate": 2.8770274435964355e-05,
"loss": 0.8867,
"step": 327
},
{
"epoch": 0.004360238084951529,
"grad_norm": 2.2485973834991455,
"learning_rate": 2.8474445159585235e-05,
"loss": 0.701,
"step": 328
},
{
"epoch": 0.004373531493747113,
"grad_norm": 2.077705144882202,
"learning_rate": 2.8179537966332887e-05,
"loss": 0.896,
"step": 329
},
{
"epoch": 0.004386824902542697,
"grad_norm": 2.2783312797546387,
"learning_rate": 2.7885565489049946e-05,
"loss": 0.7303,
"step": 330
},
{
"epoch": 0.00440011831133828,
"grad_norm": 2.306527853012085,
"learning_rate": 2.759254032053888e-05,
"loss": 0.817,
"step": 331
},
{
"epoch": 0.004413411720133865,
"grad_norm": 2.3645269870758057,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.8617,
"step": 332
},
{
"epoch": 0.004426705128929448,
"grad_norm": 2.2920470237731934,
"learning_rate": 2.700938207760701e-05,
"loss": 0.873,
"step": 333
},
{
"epoch": 0.004439998537725033,
"grad_norm": 2.666349172592163,
"learning_rate": 2.671927398374443e-05,
"loss": 0.9184,
"step": 334
},
{
"epoch": 0.004453291946520616,
"grad_norm": 2.1982336044311523,
"learning_rate": 2.6430163158700115e-05,
"loss": 0.9704,
"step": 335
},
{
"epoch": 0.0044665853553162,
"grad_norm": 2.6879024505615234,
"learning_rate": 2.6142061987019577e-05,
"loss": 0.6279,
"step": 336
},
{
"epoch": 0.004479878764111784,
"grad_norm": 2.241543769836426,
"learning_rate": 2.5854982809998153e-05,
"loss": 0.7901,
"step": 337
},
{
"epoch": 0.004493172172907368,
"grad_norm": 2.5128378868103027,
"learning_rate": 2.556893792515227e-05,
"loss": 0.7943,
"step": 338
},
{
"epoch": 0.004506465581702952,
"grad_norm": 2.971026659011841,
"learning_rate": 2.5283939585692783e-05,
"loss": 0.7436,
"step": 339
},
{
"epoch": 0.004519758990498536,
"grad_norm": 2.6545255184173584,
"learning_rate": 2.500000000000001e-05,
"loss": 0.8035,
"step": 340
},
{
"epoch": 0.00453305239929412,
"grad_norm": 3.0109236240386963,
"learning_rate": 2.471713133110078e-05,
"loss": 0.892,
"step": 341
},
{
"epoch": 0.004546345808089704,
"grad_norm": 2.4946107864379883,
"learning_rate": 2.4435345696147403e-05,
"loss": 0.6965,
"step": 342
},
{
"epoch": 0.004559639216885288,
"grad_norm": 3.106733798980713,
"learning_rate": 2.4154655165898627e-05,
"loss": 0.8695,
"step": 343
},
{
"epoch": 0.004572932625680872,
"grad_norm": 3.4859445095062256,
"learning_rate": 2.3875071764202563e-05,
"loss": 0.7741,
"step": 344
},
{
"epoch": 0.004586226034476456,
"grad_norm": 3.2647345066070557,
"learning_rate": 2.3596607467481603e-05,
"loss": 0.8903,
"step": 345
},
{
"epoch": 0.0045995194432720396,
"grad_norm": 3.4573628902435303,
"learning_rate": 2.3319274204219428e-05,
"loss": 0.8457,
"step": 346
},
{
"epoch": 0.004612812852067624,
"grad_norm": 4.146410942077637,
"learning_rate": 2.3043083854449988e-05,
"loss": 0.8782,
"step": 347
},
{
"epoch": 0.004626106260863207,
"grad_norm": 3.767638683319092,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.0579,
"step": 348
},
{
"epoch": 0.004639399669658792,
"grad_norm": 4.626466751098633,
"learning_rate": 2.2494179170225333e-05,
"loss": 0.6208,
"step": 349
},
{
"epoch": 0.004652693078454375,
"grad_norm": 5.113692760467529,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.8458,
"step": 350
},
{
"epoch": 0.00466598648724996,
"grad_norm": 1.0921778678894043,
"learning_rate": 2.194998746679952e-05,
"loss": 1.0162,
"step": 351
},
{
"epoch": 0.004679279896045543,
"grad_norm": 1.255760669708252,
"learning_rate": 2.167968815375837e-05,
"loss": 0.9845,
"step": 352
},
{
"epoch": 0.004692573304841127,
"grad_norm": 1.47697114944458,
"learning_rate": 2.1410601988619394e-05,
"loss": 1.0847,
"step": 353
},
{
"epoch": 0.004705866713636711,
"grad_norm": 1.4926693439483643,
"learning_rate": 2.1142740498138324e-05,
"loss": 0.89,
"step": 354
},
{
"epoch": 0.004719160122432295,
"grad_norm": 1.4988641738891602,
"learning_rate": 2.08761151566099e-05,
"loss": 0.9026,
"step": 355
},
{
"epoch": 0.004732453531227879,
"grad_norm": 1.8952800035476685,
"learning_rate": 2.061073738537635e-05,
"loss": 0.8742,
"step": 356
},
{
"epoch": 0.004745746940023463,
"grad_norm": 1.5465558767318726,
"learning_rate": 2.034661855233815e-05,
"loss": 0.9852,
"step": 357
},
{
"epoch": 0.0047590403488190465,
"grad_norm": 1.766453504562378,
"learning_rate": 2.008376997146705e-05,
"loss": 0.9855,
"step": 358
},
{
"epoch": 0.004772333757614631,
"grad_norm": 1.4662683010101318,
"learning_rate": 1.982220290232143e-05,
"loss": 1.0793,
"step": 359
},
{
"epoch": 0.004785627166410214,
"grad_norm": 1.4013056755065918,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.9454,
"step": 360
},
{
"epoch": 0.004798920575205799,
"grad_norm": 1.6331448554992676,
"learning_rate": 1.9302958062481673e-05,
"loss": 0.8652,
"step": 361
},
{
"epoch": 0.004812213984001382,
"grad_norm": 1.8215956687927246,
"learning_rate": 1.9045302534508297e-05,
"loss": 0.8735,
"step": 362
},
{
"epoch": 0.0048255073927969665,
"grad_norm": 1.5076251029968262,
"learning_rate": 1.8788973002749112e-05,
"loss": 0.8257,
"step": 363
},
{
"epoch": 0.00483880080159255,
"grad_norm": 1.6483572721481323,
"learning_rate": 1.8533980447508137e-05,
"loss": 0.8008,
"step": 364
},
{
"epoch": 0.004852094210388134,
"grad_norm": 1.7253248691558838,
"learning_rate": 1.8280335791817733e-05,
"loss": 0.8845,
"step": 365
},
{
"epoch": 0.004865387619183718,
"grad_norm": 1.6659144163131714,
"learning_rate": 1.8028049900970767e-05,
"loss": 0.8973,
"step": 366
},
{
"epoch": 0.004878681027979302,
"grad_norm": 1.8108363151550293,
"learning_rate": 1.777713358205514e-05,
"loss": 0.8601,
"step": 367
},
{
"epoch": 0.004891974436774886,
"grad_norm": 1.6906031370162964,
"learning_rate": 1.7527597583490822e-05,
"loss": 0.8842,
"step": 368
},
{
"epoch": 0.00490526784557047,
"grad_norm": 1.9413644075393677,
"learning_rate": 1.7279452594569483e-05,
"loss": 0.8367,
"step": 369
},
{
"epoch": 0.004918561254366054,
"grad_norm": 1.9167553186416626,
"learning_rate": 1.703270924499656e-05,
"loss": 0.8102,
"step": 370
},
{
"epoch": 0.004931854663161638,
"grad_norm": 1.9133857488632202,
"learning_rate": 1.678737810443593e-05,
"loss": 0.9387,
"step": 371
},
{
"epoch": 0.004945148071957222,
"grad_norm": 1.723745584487915,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.8633,
"step": 372
},
{
"epoch": 0.004958441480752806,
"grad_norm": 2.0419116020202637,
"learning_rate": 1.6300994426085103e-05,
"loss": 0.8323,
"step": 373
},
{
"epoch": 0.00497173488954839,
"grad_norm": 1.9794385433197021,
"learning_rate": 1.605996272335291e-05,
"loss": 0.8905,
"step": 374
},
{
"epoch": 0.0049850282983439735,
"grad_norm": 2.3014209270477295,
"learning_rate": 1.5820384898856434e-05,
"loss": 0.9045,
"step": 375
},
{
"epoch": 0.004998321707139558,
"grad_norm": 1.863885521888733,
"learning_rate": 1.5582271215312294e-05,
"loss": 0.7435,
"step": 376
},
{
"epoch": 0.005011615115935141,
"grad_norm": 2.2184691429138184,
"learning_rate": 1.5345631872718214e-05,
"loss": 0.8444,
"step": 377
},
{
"epoch": 0.005024908524730726,
"grad_norm": 2.174875497817993,
"learning_rate": 1.5110477007916001e-05,
"loss": 0.9467,
"step": 378
},
{
"epoch": 0.005038201933526309,
"grad_norm": 2.121537446975708,
"learning_rate": 1.4876816694157419e-05,
"loss": 0.9535,
"step": 379
},
{
"epoch": 0.0050514953423218935,
"grad_norm": 2.0780320167541504,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.8615,
"step": 380
},
{
"epoch": 0.005064788751117477,
"grad_norm": 1.9616137742996216,
"learning_rate": 1.4414019692241437e-05,
"loss": 0.8279,
"step": 381
},
{
"epoch": 0.005078082159913061,
"grad_norm": 2.3762927055358887,
"learning_rate": 1.4184902828767287e-05,
"loss": 0.7334,
"step": 382
},
{
"epoch": 0.005091375568708645,
"grad_norm": 3.85578989982605,
"learning_rate": 1.3957320164854059e-05,
"loss": 0.9445,
"step": 383
},
{
"epoch": 0.005104668977504229,
"grad_norm": 2.578687906265259,
"learning_rate": 1.373128144938563e-05,
"loss": 0.7616,
"step": 384
},
{
"epoch": 0.005117962386299813,
"grad_norm": 2.467428684234619,
"learning_rate": 1.3506796365108232e-05,
"loss": 0.8458,
"step": 385
},
{
"epoch": 0.005131255795095397,
"grad_norm": 2.394502639770508,
"learning_rate": 1.3283874528215733e-05,
"loss": 0.7064,
"step": 386
},
{
"epoch": 0.0051445492038909805,
"grad_norm": 2.236889600753784,
"learning_rate": 1.3062525487937699e-05,
"loss": 0.8134,
"step": 387
},
{
"epoch": 0.005157842612686565,
"grad_norm": 2.4321963787078857,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.7657,
"step": 388
},
{
"epoch": 0.005171136021482148,
"grad_norm": 2.509622097015381,
"learning_rate": 1.2624583656870154e-05,
"loss": 0.8433,
"step": 389
},
{
"epoch": 0.005184429430277733,
"grad_norm": 2.68182373046875,
"learning_rate": 1.2408009626051137e-05,
"loss": 0.7196,
"step": 390
},
{
"epoch": 0.005197722839073316,
"grad_norm": 2.600435495376587,
"learning_rate": 1.2193045910983863e-05,
"loss": 0.8124,
"step": 391
},
{
"epoch": 0.0052110162478689005,
"grad_norm": 3.4181008338928223,
"learning_rate": 1.1979701719998453e-05,
"loss": 0.8008,
"step": 392
},
{
"epoch": 0.005224309656664484,
"grad_norm": 2.620757579803467,
"learning_rate": 1.1767986192049984e-05,
"loss": 0.6854,
"step": 393
},
{
"epoch": 0.005237603065460068,
"grad_norm": 2.952314853668213,
"learning_rate": 1.1557908396327028e-05,
"loss": 0.8588,
"step": 394
},
{
"epoch": 0.005250896474255652,
"grad_norm": 3.42287015914917,
"learning_rate": 1.134947733186315e-05,
"loss": 0.7585,
"step": 395
},
{
"epoch": 0.005264189883051236,
"grad_norm": 4.159125328063965,
"learning_rate": 1.1142701927151456e-05,
"loss": 0.823,
"step": 396
},
{
"epoch": 0.00527748329184682,
"grad_norm": 2.9435524940490723,
"learning_rate": 1.0937591039762085e-05,
"loss": 0.6073,
"step": 397
},
{
"epoch": 0.005290776700642404,
"grad_norm": 3.4450318813323975,
"learning_rate": 1.0734153455962765e-05,
"loss": 0.8219,
"step": 398
},
{
"epoch": 0.005304070109437988,
"grad_norm": 4.6743621826171875,
"learning_rate": 1.0532397890342505e-05,
"loss": 0.8998,
"step": 399
},
{
"epoch": 0.005317363518233572,
"grad_norm": 6.201481819152832,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.673,
"step": 400
},
{
"epoch": 0.005317363518233572,
"eval_loss": 0.8248791694641113,
"eval_runtime": 6450.9082,
"eval_samples_per_second": 19.64,
"eval_steps_per_second": 4.91,
"step": 400
},
{
"epoch": 0.005330656927029156,
"grad_norm": 1.0595228672027588,
"learning_rate": 1.013396731136465e-05,
"loss": 1.017,
"step": 401
},
{
"epoch": 0.00534395033582474,
"grad_norm": 1.4388854503631592,
"learning_rate": 9.937309365446973e-06,
"loss": 0.918,
"step": 402
},
{
"epoch": 0.005357243744620324,
"grad_norm": 1.3190706968307495,
"learning_rate": 9.742367571857091e-06,
"loss": 0.8082,
"step": 403
},
{
"epoch": 0.0053705371534159075,
"grad_norm": 1.449982762336731,
"learning_rate": 9.549150281252633e-06,
"loss": 0.6463,
"step": 404
},
{
"epoch": 0.005383830562211492,
"grad_norm": 1.861377477645874,
"learning_rate": 9.357665770419244e-06,
"loss": 0.8151,
"step": 405
},
{
"epoch": 0.005397123971007075,
"grad_norm": 1.519214391708374,
"learning_rate": 9.167922241916055e-06,
"loss": 0.7792,
"step": 406
},
{
"epoch": 0.00541041737980266,
"grad_norm": 1.5293023586273193,
"learning_rate": 8.97992782372432e-06,
"loss": 0.9019,
"step": 407
},
{
"epoch": 0.005423710788598243,
"grad_norm": 1.5244901180267334,
"learning_rate": 8.793690568899216e-06,
"loss": 0.9023,
"step": 408
},
{
"epoch": 0.0054370041973938275,
"grad_norm": 1.7790530920028687,
"learning_rate": 8.609218455224893e-06,
"loss": 0.9439,
"step": 409
},
{
"epoch": 0.005450297606189411,
"grad_norm": 1.5161616802215576,
"learning_rate": 8.426519384872733e-06,
"loss": 0.8852,
"step": 410
},
{
"epoch": 0.005463591014984995,
"grad_norm": 1.6171609163284302,
"learning_rate": 8.245601184062852e-06,
"loss": 0.8912,
"step": 411
},
{
"epoch": 0.005476884423780579,
"grad_norm": 1.899787425994873,
"learning_rate": 8.066471602728803e-06,
"loss": 1.0067,
"step": 412
},
{
"epoch": 0.005490177832576163,
"grad_norm": 1.5935102701187134,
"learning_rate": 7.889138314185678e-06,
"loss": 0.8857,
"step": 413
},
{
"epoch": 0.005503471241371747,
"grad_norm": 1.7188307046890259,
"learning_rate": 7.71360891480134e-06,
"loss": 1.0062,
"step": 414
},
{
"epoch": 0.005516764650167331,
"grad_norm": 1.8137848377227783,
"learning_rate": 7.539890923671062e-06,
"loss": 0.8886,
"step": 415
},
{
"epoch": 0.0055300580589629144,
"grad_norm": 1.8021348714828491,
"learning_rate": 7.367991782295391e-06,
"loss": 0.8139,
"step": 416
},
{
"epoch": 0.005543351467758499,
"grad_norm": 1.692603588104248,
"learning_rate": 7.197918854261432e-06,
"loss": 0.7903,
"step": 417
},
{
"epoch": 0.005556644876554082,
"grad_norm": 1.8016855716705322,
"learning_rate": 7.029679424927365e-06,
"loss": 0.8315,
"step": 418
},
{
"epoch": 0.005569938285349667,
"grad_norm": 1.8316609859466553,
"learning_rate": 6.863280701110408e-06,
"loss": 0.7959,
"step": 419
},
{
"epoch": 0.00558323169414525,
"grad_norm": 1.9248038530349731,
"learning_rate": 6.698729810778065e-06,
"loss": 0.9514,
"step": 420
},
{
"epoch": 0.0055965251029408344,
"grad_norm": 1.8567143678665161,
"learning_rate": 6.536033802742813e-06,
"loss": 0.8723,
"step": 421
},
{
"epoch": 0.005609818511736418,
"grad_norm": 1.8281995058059692,
"learning_rate": 6.375199646360142e-06,
"loss": 0.881,
"step": 422
},
{
"epoch": 0.005623111920532002,
"grad_norm": 2.1290082931518555,
"learning_rate": 6.216234231230012e-06,
"loss": 0.7979,
"step": 423
},
{
"epoch": 0.005636405329327586,
"grad_norm": 2.67155122756958,
"learning_rate": 6.059144366901736e-06,
"loss": 0.9295,
"step": 424
},
{
"epoch": 0.00564969873812317,
"grad_norm": 2.3634464740753174,
"learning_rate": 5.903936782582253e-06,
"loss": 0.9214,
"step": 425
},
{
"epoch": 0.005662992146918754,
"grad_norm": 2.1651251316070557,
"learning_rate": 5.750618126847912e-06,
"loss": 0.8525,
"step": 426
},
{
"epoch": 0.005676285555714338,
"grad_norm": 1.97309410572052,
"learning_rate": 5.599194967359639e-06,
"loss": 0.9601,
"step": 427
},
{
"epoch": 0.005689578964509922,
"grad_norm": 2.1008105278015137,
"learning_rate": 5.449673790581611e-06,
"loss": 0.7276,
"step": 428
},
{
"epoch": 0.005702872373305506,
"grad_norm": 2.0735974311828613,
"learning_rate": 5.302061001503394e-06,
"loss": 0.7779,
"step": 429
},
{
"epoch": 0.00571616578210109,
"grad_norm": 1.9534869194030762,
"learning_rate": 5.156362923365588e-06,
"loss": 0.7884,
"step": 430
},
{
"epoch": 0.005729459190896674,
"grad_norm": 2.201770544052124,
"learning_rate": 5.012585797388936e-06,
"loss": 0.7961,
"step": 431
},
{
"epoch": 0.005742752599692258,
"grad_norm": 2.4963033199310303,
"learning_rate": 4.87073578250698e-06,
"loss": 0.9411,
"step": 432
},
{
"epoch": 0.005756046008487841,
"grad_norm": 2.3531322479248047,
"learning_rate": 4.730818955102234e-06,
"loss": 0.8704,
"step": 433
},
{
"epoch": 0.005769339417283426,
"grad_norm": 2.5668766498565674,
"learning_rate": 4.592841308745932e-06,
"loss": 0.7668,
"step": 434
},
{
"epoch": 0.005782632826079009,
"grad_norm": 2.429396152496338,
"learning_rate": 4.456808753941205e-06,
"loss": 0.7915,
"step": 435
},
{
"epoch": 0.005795926234874594,
"grad_norm": 2.1826658248901367,
"learning_rate": 4.322727117869951e-06,
"loss": 0.8068,
"step": 436
},
{
"epoch": 0.005809219643670177,
"grad_norm": 2.3497233390808105,
"learning_rate": 4.190602144143207e-06,
"loss": 0.7689,
"step": 437
},
{
"epoch": 0.005822513052465761,
"grad_norm": 2.6154377460479736,
"learning_rate": 4.06043949255509e-06,
"loss": 0.6609,
"step": 438
},
{
"epoch": 0.005835806461261345,
"grad_norm": 2.4827382564544678,
"learning_rate": 3.932244738840379e-06,
"loss": 0.6727,
"step": 439
},
{
"epoch": 0.005849099870056929,
"grad_norm": 2.957174777984619,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.7521,
"step": 440
},
{
"epoch": 0.005862393278852513,
"grad_norm": 2.6731929779052734,
"learning_rate": 3.681780806244095e-06,
"loss": 0.8004,
"step": 441
},
{
"epoch": 0.005875686687648097,
"grad_norm": 2.5539755821228027,
"learning_rate": 3.5595223564037884e-06,
"loss": 0.7855,
"step": 442
},
{
"epoch": 0.0058889800964436806,
"grad_norm": 2.6112401485443115,
"learning_rate": 3.4392532620598216e-06,
"loss": 0.6713,
"step": 443
},
{
"epoch": 0.005902273505239265,
"grad_norm": 3.1589066982269287,
"learning_rate": 3.3209786751399187e-06,
"loss": 0.8434,
"step": 444
},
{
"epoch": 0.005915566914034848,
"grad_norm": 3.0150818824768066,
"learning_rate": 3.2047036621337236e-06,
"loss": 0.6647,
"step": 445
},
{
"epoch": 0.005928860322830433,
"grad_norm": 2.906002998352051,
"learning_rate": 3.0904332038757977e-06,
"loss": 0.7664,
"step": 446
},
{
"epoch": 0.005942153731626016,
"grad_norm": 3.820185661315918,
"learning_rate": 2.978172195332263e-06,
"loss": 0.8387,
"step": 447
},
{
"epoch": 0.0059554471404216006,
"grad_norm": 3.4882965087890625,
"learning_rate": 2.8679254453910785e-06,
"loss": 0.9115,
"step": 448
},
{
"epoch": 0.005968740549217184,
"grad_norm": 4.0936174392700195,
"learning_rate": 2.759697676656098e-06,
"loss": 0.7122,
"step": 449
},
{
"epoch": 0.005982033958012768,
"grad_norm": 6.082996845245361,
"learning_rate": 2.653493525244721e-06,
"loss": 0.9206,
"step": 450
},
{
"epoch": 0.005995327366808352,
"grad_norm": 0.9247643351554871,
"learning_rate": 2.549317540589308e-06,
"loss": 0.9231,
"step": 451
},
{
"epoch": 0.006008620775603936,
"grad_norm": 1.2527554035186768,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.9569,
"step": 452
},
{
"epoch": 0.00602191418439952,
"grad_norm": 1.998666763305664,
"learning_rate": 2.3470678346851518e-06,
"loss": 0.809,
"step": 453
},
{
"epoch": 0.006035207593195104,
"grad_norm": 1.6121776103973389,
"learning_rate": 2.2490027771406687e-06,
"loss": 1.0757,
"step": 454
},
{
"epoch": 0.006048501001990688,
"grad_norm": 1.6252521276474,
"learning_rate": 2.152983213389559e-06,
"loss": 1.0179,
"step": 455
},
{
"epoch": 0.006061794410786272,
"grad_norm": 1.5165375471115112,
"learning_rate": 2.0590132565903476e-06,
"loss": 0.9696,
"step": 456
},
{
"epoch": 0.006075087819581856,
"grad_norm": 1.8923349380493164,
"learning_rate": 1.9670969321032407e-06,
"loss": 0.7702,
"step": 457
},
{
"epoch": 0.00608838122837744,
"grad_norm": 1.4962996244430542,
"learning_rate": 1.8772381773176417e-06,
"loss": 1.0016,
"step": 458
},
{
"epoch": 0.006101674637173024,
"grad_norm": 1.6529314517974854,
"learning_rate": 1.7894408414835362e-06,
"loss": 0.7777,
"step": 459
},
{
"epoch": 0.0061149680459686075,
"grad_norm": 1.5695680379867554,
"learning_rate": 1.70370868554659e-06,
"loss": 0.9964,
"step": 460
},
{
"epoch": 0.006128261454764192,
"grad_norm": 1.4540382623672485,
"learning_rate": 1.620045381987012e-06,
"loss": 0.8244,
"step": 461
},
{
"epoch": 0.006141554863559775,
"grad_norm": 1.6551907062530518,
"learning_rate": 1.5384545146622852e-06,
"loss": 0.8744,
"step": 462
},
{
"epoch": 0.00615484827235536,
"grad_norm": 1.7763545513153076,
"learning_rate": 1.4589395786535953e-06,
"loss": 0.9266,
"step": 463
},
{
"epoch": 0.006168141681150943,
"grad_norm": 1.7592118978500366,
"learning_rate": 1.3815039801161721e-06,
"loss": 0.8165,
"step": 464
},
{
"epoch": 0.0061814350899465275,
"grad_norm": 1.6234304904937744,
"learning_rate": 1.3061510361333185e-06,
"loss": 0.9257,
"step": 465
},
{
"epoch": 0.006194728498742111,
"grad_norm": 1.7104973793029785,
"learning_rate": 1.232883974574367e-06,
"loss": 0.8413,
"step": 466
},
{
"epoch": 0.006208021907537695,
"grad_norm": 1.8358904123306274,
"learning_rate": 1.1617059339563807e-06,
"loss": 0.9262,
"step": 467
},
{
"epoch": 0.006221315316333279,
"grad_norm": 2.0369277000427246,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.9344,
"step": 468
},
{
"epoch": 0.006234608725128863,
"grad_norm": 1.7839794158935547,
"learning_rate": 1.0256290220474307e-06,
"loss": 0.9029,
"step": 469
},
{
"epoch": 0.006247902133924447,
"grad_norm": 1.9375988245010376,
"learning_rate": 9.607359798384785e-07,
"loss": 0.7894,
"step": 470
},
{
"epoch": 0.006261195542720031,
"grad_norm": 1.928093671798706,
"learning_rate": 8.979436164848088e-07,
"loss": 0.7991,
"step": 471
},
{
"epoch": 0.0062744889515156145,
"grad_norm": 1.8025486469268799,
"learning_rate": 8.372546218022747e-07,
"loss": 0.7644,
"step": 472
},
{
"epoch": 0.006287782360311199,
"grad_norm": 2.0792739391326904,
"learning_rate": 7.786715955054203e-07,
"loss": 0.6734,
"step": 473
},
{
"epoch": 0.006301075769106782,
"grad_norm": 1.997489094734192,
"learning_rate": 7.221970470961125e-07,
"loss": 0.8299,
"step": 474
},
{
"epoch": 0.006314369177902367,
"grad_norm": 2.252878427505493,
"learning_rate": 6.678333957560512e-07,
"loss": 0.8351,
"step": 475
},
{
"epoch": 0.00632766258669795,
"grad_norm": 1.966784954071045,
"learning_rate": 6.15582970243117e-07,
"loss": 0.8078,
"step": 476
},
{
"epoch": 0.0063409559954935345,
"grad_norm": 1.9468001127243042,
"learning_rate": 5.654480087916303e-07,
"loss": 0.8398,
"step": 477
},
{
"epoch": 0.006354249404289118,
"grad_norm": 2.1014404296875,
"learning_rate": 5.174306590164879e-07,
"loss": 0.883,
"step": 478
},
{
"epoch": 0.006367542813084702,
"grad_norm": 1.949217677116394,
"learning_rate": 4.715329778211375e-07,
"loss": 0.7912,
"step": 479
},
{
"epoch": 0.006380836221880286,
"grad_norm": 2.206620931625366,
"learning_rate": 4.277569313094809e-07,
"loss": 0.9008,
"step": 480
},
{
"epoch": 0.00639412963067587,
"grad_norm": 2.2743523120880127,
"learning_rate": 3.8610439470164737e-07,
"loss": 0.7626,
"step": 481
},
{
"epoch": 0.006407423039471454,
"grad_norm": 2.57737135887146,
"learning_rate": 3.465771522536854e-07,
"loss": 0.9584,
"step": 482
},
{
"epoch": 0.006420716448267038,
"grad_norm": 2.0969834327697754,
"learning_rate": 3.09176897181096e-07,
"loss": 0.7054,
"step": 483
},
{
"epoch": 0.006434009857062622,
"grad_norm": 2.5706839561462402,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.8623,
"step": 484
},
{
"epoch": 0.006447303265858206,
"grad_norm": 2.5481481552124023,
"learning_rate": 2.407636663901591e-07,
"loss": 0.8337,
"step": 485
},
{
"epoch": 0.00646059667465379,
"grad_norm": 2.2276320457458496,
"learning_rate": 2.0975362126691712e-07,
"loss": 0.8068,
"step": 486
},
{
"epoch": 0.006473890083449374,
"grad_norm": 2.349933624267578,
"learning_rate": 1.8087642458373134e-07,
"loss": 0.7385,
"step": 487
},
{
"epoch": 0.006487183492244958,
"grad_norm": 2.462743043899536,
"learning_rate": 1.5413331334360182e-07,
"loss": 0.892,
"step": 488
},
{
"epoch": 0.0065004769010405415,
"grad_norm": 2.3636093139648438,
"learning_rate": 1.2952543313240472e-07,
"loss": 0.8818,
"step": 489
},
{
"epoch": 0.006513770309836126,
"grad_norm": 2.944607734680176,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.079,
"step": 490
},
{
"epoch": 0.006527063718631709,
"grad_norm": 2.7098374366760254,
"learning_rate": 8.671949076420882e-08,
"loss": 0.8077,
"step": 491
},
{
"epoch": 0.006540357127427294,
"grad_norm": 3.112506866455078,
"learning_rate": 6.852326227130834e-08,
"loss": 0.8186,
"step": 492
},
{
"epoch": 0.006553650536222877,
"grad_norm": 2.717796802520752,
"learning_rate": 5.246593205699424e-08,
"loss": 0.6142,
"step": 493
},
{
"epoch": 0.0065669439450184615,
"grad_norm": 3.020770788192749,
"learning_rate": 3.8548187963854956e-08,
"loss": 0.8441,
"step": 494
},
{
"epoch": 0.006580237353814045,
"grad_norm": 2.710313081741333,
"learning_rate": 2.6770626181715773e-08,
"loss": 0.7852,
"step": 495
},
{
"epoch": 0.006593530762609629,
"grad_norm": 3.541020154953003,
"learning_rate": 1.7133751222137007e-08,
"loss": 0.7705,
"step": 496
},
{
"epoch": 0.006606824171405213,
"grad_norm": 3.2380614280700684,
"learning_rate": 9.637975896759077e-09,
"loss": 0.9778,
"step": 497
},
{
"epoch": 0.006620117580200797,
"grad_norm": 3.6511924266815186,
"learning_rate": 4.2836212996499865e-09,
"loss": 0.8976,
"step": 498
},
{
"epoch": 0.006633410988996381,
"grad_norm": 4.02791690826416,
"learning_rate": 1.0709167935385455e-09,
"loss": 0.8247,
"step": 499
},
{
"epoch": 0.006646704397791965,
"grad_norm": 5.131423473358154,
"learning_rate": 0.0,
"loss": 0.7257,
"step": 500
},
{
"epoch": 0.006646704397791965,
"eval_loss": 0.8191825747489929,
"eval_runtime": 6452.7093,
"eval_samples_per_second": 19.635,
"eval_steps_per_second": 4.909,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.133284548440228e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}