prxy5608's picture
Training in progress, step 200, checkpoint
7786dc5 verified
{
"best_metric": 1.4534531831741333,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.07390983000739099,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003695491500369549,
"grad_norm": 2.435084104537964,
"learning_rate": 5e-06,
"loss": 2.2967,
"step": 1
},
{
"epoch": 0.0003695491500369549,
"eval_loss": 2.698305606842041,
"eval_runtime": 393.8196,
"eval_samples_per_second": 11.574,
"eval_steps_per_second": 5.787,
"step": 1
},
{
"epoch": 0.0007390983000739098,
"grad_norm": 3.861745595932007,
"learning_rate": 1e-05,
"loss": 3.3565,
"step": 2
},
{
"epoch": 0.0011086474501108647,
"grad_norm": 3.2393386363983154,
"learning_rate": 1.5e-05,
"loss": 2.8022,
"step": 3
},
{
"epoch": 0.0014781966001478197,
"grad_norm": 2.6122796535491943,
"learning_rate": 2e-05,
"loss": 2.364,
"step": 4
},
{
"epoch": 0.0018477457501847746,
"grad_norm": 2.7022292613983154,
"learning_rate": 2.5e-05,
"loss": 2.3463,
"step": 5
},
{
"epoch": 0.0022172949002217295,
"grad_norm": 2.860513687133789,
"learning_rate": 3e-05,
"loss": 2.5053,
"step": 6
},
{
"epoch": 0.0025868440502586844,
"grad_norm": 2.233898878097534,
"learning_rate": 3.5e-05,
"loss": 2.2625,
"step": 7
},
{
"epoch": 0.0029563932002956393,
"grad_norm": 1.9126187562942505,
"learning_rate": 4e-05,
"loss": 2.043,
"step": 8
},
{
"epoch": 0.0033259423503325942,
"grad_norm": 1.8079100847244263,
"learning_rate": 4.5e-05,
"loss": 2.0034,
"step": 9
},
{
"epoch": 0.003695491500369549,
"grad_norm": 1.8058292865753174,
"learning_rate": 5e-05,
"loss": 2.034,
"step": 10
},
{
"epoch": 0.0040650406504065045,
"grad_norm": 2.4231817722320557,
"learning_rate": 5.500000000000001e-05,
"loss": 1.967,
"step": 11
},
{
"epoch": 0.004434589800443459,
"grad_norm": 2.654071092605591,
"learning_rate": 6e-05,
"loss": 2.0163,
"step": 12
},
{
"epoch": 0.004804138950480414,
"grad_norm": 3.64855694770813,
"learning_rate": 6.500000000000001e-05,
"loss": 1.8921,
"step": 13
},
{
"epoch": 0.005173688100517369,
"grad_norm": 3.5109565258026123,
"learning_rate": 7e-05,
"loss": 1.71,
"step": 14
},
{
"epoch": 0.005543237250554324,
"grad_norm": 1.8318647146224976,
"learning_rate": 7.500000000000001e-05,
"loss": 1.5531,
"step": 15
},
{
"epoch": 0.005912786400591279,
"grad_norm": 1.9761453866958618,
"learning_rate": 8e-05,
"loss": 1.6301,
"step": 16
},
{
"epoch": 0.006282335550628234,
"grad_norm": 1.9594025611877441,
"learning_rate": 8.5e-05,
"loss": 1.6569,
"step": 17
},
{
"epoch": 0.0066518847006651885,
"grad_norm": 1.974549412727356,
"learning_rate": 9e-05,
"loss": 1.7811,
"step": 18
},
{
"epoch": 0.007021433850702144,
"grad_norm": 2.240577220916748,
"learning_rate": 9.5e-05,
"loss": 1.4757,
"step": 19
},
{
"epoch": 0.007390983000739098,
"grad_norm": 2.0301589965820312,
"learning_rate": 0.0001,
"loss": 1.7444,
"step": 20
},
{
"epoch": 0.007760532150776054,
"grad_norm": 1.7381553649902344,
"learning_rate": 9.999238475781957e-05,
"loss": 1.6246,
"step": 21
},
{
"epoch": 0.008130081300813009,
"grad_norm": 1.4828835725784302,
"learning_rate": 9.99695413509548e-05,
"loss": 1.6195,
"step": 22
},
{
"epoch": 0.008499630450849963,
"grad_norm": 1.4680097103118896,
"learning_rate": 9.99314767377287e-05,
"loss": 1.6627,
"step": 23
},
{
"epoch": 0.008869179600886918,
"grad_norm": 1.4647470712661743,
"learning_rate": 9.987820251299122e-05,
"loss": 1.6129,
"step": 24
},
{
"epoch": 0.009238728750923873,
"grad_norm": 1.3801729679107666,
"learning_rate": 9.980973490458728e-05,
"loss": 1.5455,
"step": 25
},
{
"epoch": 0.009608277900960829,
"grad_norm": 1.451375126838684,
"learning_rate": 9.972609476841367e-05,
"loss": 1.5873,
"step": 26
},
{
"epoch": 0.009977827050997782,
"grad_norm": 1.2962108850479126,
"learning_rate": 9.962730758206611e-05,
"loss": 1.4516,
"step": 27
},
{
"epoch": 0.010347376201034738,
"grad_norm": 1.3444093465805054,
"learning_rate": 9.951340343707852e-05,
"loss": 1.5001,
"step": 28
},
{
"epoch": 0.010716925351071693,
"grad_norm": 1.4385007619857788,
"learning_rate": 9.938441702975689e-05,
"loss": 1.4878,
"step": 29
},
{
"epoch": 0.011086474501108648,
"grad_norm": 1.4599041938781738,
"learning_rate": 9.924038765061042e-05,
"loss": 1.585,
"step": 30
},
{
"epoch": 0.011456023651145602,
"grad_norm": 1.5100942850112915,
"learning_rate": 9.908135917238321e-05,
"loss": 1.7067,
"step": 31
},
{
"epoch": 0.011825572801182557,
"grad_norm": 1.512452244758606,
"learning_rate": 9.890738003669029e-05,
"loss": 1.419,
"step": 32
},
{
"epoch": 0.012195121951219513,
"grad_norm": 1.3634135723114014,
"learning_rate": 9.871850323926177e-05,
"loss": 1.3955,
"step": 33
},
{
"epoch": 0.012564671101256468,
"grad_norm": 1.35453200340271,
"learning_rate": 9.851478631379982e-05,
"loss": 1.448,
"step": 34
},
{
"epoch": 0.012934220251293422,
"grad_norm": 1.4038221836090088,
"learning_rate": 9.829629131445342e-05,
"loss": 1.4719,
"step": 35
},
{
"epoch": 0.013303769401330377,
"grad_norm": 1.558927059173584,
"learning_rate": 9.806308479691595e-05,
"loss": 1.6463,
"step": 36
},
{
"epoch": 0.013673318551367332,
"grad_norm": 1.4051181077957153,
"learning_rate": 9.781523779815179e-05,
"loss": 1.5087,
"step": 37
},
{
"epoch": 0.014042867701404288,
"grad_norm": 1.3565030097961426,
"learning_rate": 9.755282581475769e-05,
"loss": 1.5591,
"step": 38
},
{
"epoch": 0.014412416851441241,
"grad_norm": 1.3451634645462036,
"learning_rate": 9.727592877996585e-05,
"loss": 1.5179,
"step": 39
},
{
"epoch": 0.014781966001478197,
"grad_norm": 1.3223435878753662,
"learning_rate": 9.698463103929542e-05,
"loss": 1.5824,
"step": 40
},
{
"epoch": 0.015151515151515152,
"grad_norm": 1.373567819595337,
"learning_rate": 9.667902132486009e-05,
"loss": 1.5436,
"step": 41
},
{
"epoch": 0.015521064301552107,
"grad_norm": 1.2605198621749878,
"learning_rate": 9.635919272833938e-05,
"loss": 1.5411,
"step": 42
},
{
"epoch": 0.015890613451589063,
"grad_norm": 1.4660719633102417,
"learning_rate": 9.602524267262203e-05,
"loss": 1.6148,
"step": 43
},
{
"epoch": 0.016260162601626018,
"grad_norm": 1.2017172574996948,
"learning_rate": 9.567727288213005e-05,
"loss": 1.3578,
"step": 44
},
{
"epoch": 0.01662971175166297,
"grad_norm": 1.1809042692184448,
"learning_rate": 9.53153893518325e-05,
"loss": 1.3976,
"step": 45
},
{
"epoch": 0.016999260901699925,
"grad_norm": 1.4069963693618774,
"learning_rate": 9.493970231495835e-05,
"loss": 1.3873,
"step": 46
},
{
"epoch": 0.01736881005173688,
"grad_norm": 1.2957801818847656,
"learning_rate": 9.45503262094184e-05,
"loss": 1.3814,
"step": 47
},
{
"epoch": 0.017738359201773836,
"grad_norm": 1.3341565132141113,
"learning_rate": 9.414737964294636e-05,
"loss": 1.6013,
"step": 48
},
{
"epoch": 0.01810790835181079,
"grad_norm": 1.5730022192001343,
"learning_rate": 9.373098535696979e-05,
"loss": 1.5124,
"step": 49
},
{
"epoch": 0.018477457501847747,
"grad_norm": 1.496046781539917,
"learning_rate": 9.330127018922194e-05,
"loss": 1.6368,
"step": 50
},
{
"epoch": 0.018477457501847747,
"eval_loss": 1.5613301992416382,
"eval_runtime": 395.5851,
"eval_samples_per_second": 11.522,
"eval_steps_per_second": 5.761,
"step": 50
},
{
"epoch": 0.018847006651884702,
"grad_norm": 1.0652014017105103,
"learning_rate": 9.285836503510562e-05,
"loss": 1.4934,
"step": 51
},
{
"epoch": 0.019216555801921657,
"grad_norm": 1.494120717048645,
"learning_rate": 9.24024048078213e-05,
"loss": 2.0637,
"step": 52
},
{
"epoch": 0.01958610495195861,
"grad_norm": 1.5903559923171997,
"learning_rate": 9.193352839727121e-05,
"loss": 2.062,
"step": 53
},
{
"epoch": 0.019955654101995565,
"grad_norm": 1.381100058555603,
"learning_rate": 9.145187862775209e-05,
"loss": 1.708,
"step": 54
},
{
"epoch": 0.02032520325203252,
"grad_norm": 1.1597305536270142,
"learning_rate": 9.09576022144496e-05,
"loss": 1.4551,
"step": 55
},
{
"epoch": 0.020694752402069475,
"grad_norm": 1.1079175472259521,
"learning_rate": 9.045084971874738e-05,
"loss": 1.3862,
"step": 56
},
{
"epoch": 0.02106430155210643,
"grad_norm": 1.207403540611267,
"learning_rate": 8.993177550236464e-05,
"loss": 1.4801,
"step": 57
},
{
"epoch": 0.021433850702143386,
"grad_norm": 1.1541577577590942,
"learning_rate": 8.940053768033609e-05,
"loss": 1.5247,
"step": 58
},
{
"epoch": 0.02180339985218034,
"grad_norm": 1.146065592765808,
"learning_rate": 8.885729807284856e-05,
"loss": 1.5926,
"step": 59
},
{
"epoch": 0.022172949002217297,
"grad_norm": 1.3924938440322876,
"learning_rate": 8.83022221559489e-05,
"loss": 1.7747,
"step": 60
},
{
"epoch": 0.02254249815225425,
"grad_norm": 1.1475406885147095,
"learning_rate": 8.773547901113862e-05,
"loss": 1.455,
"step": 61
},
{
"epoch": 0.022912047302291204,
"grad_norm": 1.1560256481170654,
"learning_rate": 8.715724127386972e-05,
"loss": 1.5225,
"step": 62
},
{
"epoch": 0.02328159645232816,
"grad_norm": 1.2148773670196533,
"learning_rate": 8.656768508095853e-05,
"loss": 1.5871,
"step": 63
},
{
"epoch": 0.023651145602365115,
"grad_norm": 1.1244220733642578,
"learning_rate": 8.596699001693255e-05,
"loss": 1.4763,
"step": 64
},
{
"epoch": 0.02402069475240207,
"grad_norm": 1.316160798072815,
"learning_rate": 8.535533905932738e-05,
"loss": 1.4992,
"step": 65
},
{
"epoch": 0.024390243902439025,
"grad_norm": 1.1677945852279663,
"learning_rate": 8.473291852294987e-05,
"loss": 1.4681,
"step": 66
},
{
"epoch": 0.02475979305247598,
"grad_norm": 1.2005647420883179,
"learning_rate": 8.409991800312493e-05,
"loss": 1.51,
"step": 67
},
{
"epoch": 0.025129342202512936,
"grad_norm": 1.2157069444656372,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3752,
"step": 68
},
{
"epoch": 0.025498891352549888,
"grad_norm": 1.3100887537002563,
"learning_rate": 8.280295144952536e-05,
"loss": 1.5807,
"step": 69
},
{
"epoch": 0.025868440502586843,
"grad_norm": 1.228576421737671,
"learning_rate": 8.213938048432697e-05,
"loss": 1.4003,
"step": 70
},
{
"epoch": 0.0262379896526238,
"grad_norm": 1.1663343906402588,
"learning_rate": 8.146601955249188e-05,
"loss": 1.4567,
"step": 71
},
{
"epoch": 0.026607538802660754,
"grad_norm": 1.1122714281082153,
"learning_rate": 8.07830737662829e-05,
"loss": 1.2368,
"step": 72
},
{
"epoch": 0.02697708795269771,
"grad_norm": 1.2412680387496948,
"learning_rate": 8.009075115760243e-05,
"loss": 1.4419,
"step": 73
},
{
"epoch": 0.027346637102734665,
"grad_norm": 1.3064277172088623,
"learning_rate": 7.938926261462366e-05,
"loss": 1.5736,
"step": 74
},
{
"epoch": 0.02771618625277162,
"grad_norm": 1.2390624284744263,
"learning_rate": 7.86788218175523e-05,
"loss": 1.4572,
"step": 75
},
{
"epoch": 0.028085735402808575,
"grad_norm": 1.1637743711471558,
"learning_rate": 7.795964517353735e-05,
"loss": 1.4894,
"step": 76
},
{
"epoch": 0.028455284552845527,
"grad_norm": 1.2793664932250977,
"learning_rate": 7.723195175075136e-05,
"loss": 1.5662,
"step": 77
},
{
"epoch": 0.028824833702882482,
"grad_norm": 1.136055827140808,
"learning_rate": 7.649596321166024e-05,
"loss": 1.3833,
"step": 78
},
{
"epoch": 0.029194382852919438,
"grad_norm": 1.0858932733535767,
"learning_rate": 7.575190374550272e-05,
"loss": 1.4407,
"step": 79
},
{
"epoch": 0.029563932002956393,
"grad_norm": 1.3306987285614014,
"learning_rate": 7.500000000000001e-05,
"loss": 1.6093,
"step": 80
},
{
"epoch": 0.02993348115299335,
"grad_norm": 1.116605281829834,
"learning_rate": 7.424048101231686e-05,
"loss": 1.3547,
"step": 81
},
{
"epoch": 0.030303030303030304,
"grad_norm": 1.1665490865707397,
"learning_rate": 7.347357813929454e-05,
"loss": 1.4528,
"step": 82
},
{
"epoch": 0.03067257945306726,
"grad_norm": 1.3046156167984009,
"learning_rate": 7.269952498697734e-05,
"loss": 1.5422,
"step": 83
},
{
"epoch": 0.031042128603104215,
"grad_norm": 1.3918095827102661,
"learning_rate": 7.191855733945387e-05,
"loss": 1.4168,
"step": 84
},
{
"epoch": 0.031411677753141166,
"grad_norm": 1.232464075088501,
"learning_rate": 7.113091308703498e-05,
"loss": 1.521,
"step": 85
},
{
"epoch": 0.031781226903178125,
"grad_norm": 1.1840020418167114,
"learning_rate": 7.033683215379002e-05,
"loss": 1.4731,
"step": 86
},
{
"epoch": 0.03215077605321508,
"grad_norm": 1.1614052057266235,
"learning_rate": 6.953655642446368e-05,
"loss": 1.3321,
"step": 87
},
{
"epoch": 0.032520325203252036,
"grad_norm": 1.2166821956634521,
"learning_rate": 6.873032967079561e-05,
"loss": 1.5267,
"step": 88
},
{
"epoch": 0.03288987435328899,
"grad_norm": 1.1253515481948853,
"learning_rate": 6.7918397477265e-05,
"loss": 1.4261,
"step": 89
},
{
"epoch": 0.03325942350332594,
"grad_norm": 1.150708556175232,
"learning_rate": 6.710100716628344e-05,
"loss": 1.4948,
"step": 90
},
{
"epoch": 0.0336289726533629,
"grad_norm": 1.3309208154678345,
"learning_rate": 6.627840772285784e-05,
"loss": 1.5697,
"step": 91
},
{
"epoch": 0.03399852180339985,
"grad_norm": 1.2694978713989258,
"learning_rate": 6.545084971874738e-05,
"loss": 1.4129,
"step": 92
},
{
"epoch": 0.03436807095343681,
"grad_norm": 1.2754215002059937,
"learning_rate": 6.461858523613684e-05,
"loss": 1.4501,
"step": 93
},
{
"epoch": 0.03473762010347376,
"grad_norm": 1.2334794998168945,
"learning_rate": 6.378186779084995e-05,
"loss": 1.5684,
"step": 94
},
{
"epoch": 0.03510716925351072,
"grad_norm": 1.2836718559265137,
"learning_rate": 6.294095225512603e-05,
"loss": 1.5449,
"step": 95
},
{
"epoch": 0.03547671840354767,
"grad_norm": 1.1815903186798096,
"learning_rate": 6.209609477998338e-05,
"loss": 1.5793,
"step": 96
},
{
"epoch": 0.035846267553584624,
"grad_norm": 1.1766142845153809,
"learning_rate": 6.124755271719325e-05,
"loss": 1.3747,
"step": 97
},
{
"epoch": 0.03621581670362158,
"grad_norm": 1.248277187347412,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.4013,
"step": 98
},
{
"epoch": 0.036585365853658534,
"grad_norm": 1.2688381671905518,
"learning_rate": 5.9540449768827246e-05,
"loss": 1.4545,
"step": 99
},
{
"epoch": 0.03695491500369549,
"grad_norm": 1.3674864768981934,
"learning_rate": 5.868240888334653e-05,
"loss": 1.3982,
"step": 100
},
{
"epoch": 0.03695491500369549,
"eval_loss": 1.4993144273757935,
"eval_runtime": 396.2892,
"eval_samples_per_second": 11.502,
"eval_steps_per_second": 5.751,
"step": 100
},
{
"epoch": 0.037324464153732445,
"grad_norm": 1.27482008934021,
"learning_rate": 5.782172325201155e-05,
"loss": 2.1648,
"step": 101
},
{
"epoch": 0.037694013303769404,
"grad_norm": 1.306258201599121,
"learning_rate": 5.695865504800327e-05,
"loss": 1.7843,
"step": 102
},
{
"epoch": 0.038063562453806356,
"grad_norm": 1.2670446634292603,
"learning_rate": 5.6093467170257374e-05,
"loss": 1.7235,
"step": 103
},
{
"epoch": 0.038433111603843315,
"grad_norm": 1.327169418334961,
"learning_rate": 5.522642316338268e-05,
"loss": 1.7302,
"step": 104
},
{
"epoch": 0.038802660753880266,
"grad_norm": 1.2265286445617676,
"learning_rate": 5.435778713738292e-05,
"loss": 1.7124,
"step": 105
},
{
"epoch": 0.03917220990391722,
"grad_norm": 1.0715103149414062,
"learning_rate": 5.348782368720626e-05,
"loss": 1.4115,
"step": 106
},
{
"epoch": 0.03954175905395418,
"grad_norm": 1.0157265663146973,
"learning_rate": 5.26167978121472e-05,
"loss": 1.4177,
"step": 107
},
{
"epoch": 0.03991130820399113,
"grad_norm": 1.0146989822387695,
"learning_rate": 5.174497483512506e-05,
"loss": 1.189,
"step": 108
},
{
"epoch": 0.04028085735402809,
"grad_norm": 0.9786179065704346,
"learning_rate": 5.0872620321864185e-05,
"loss": 1.2692,
"step": 109
},
{
"epoch": 0.04065040650406504,
"grad_norm": 1.1345218420028687,
"learning_rate": 5e-05,
"loss": 1.4911,
"step": 110
},
{
"epoch": 0.041019955654102,
"grad_norm": 1.0837470293045044,
"learning_rate": 4.912737967813583e-05,
"loss": 1.4171,
"step": 111
},
{
"epoch": 0.04138950480413895,
"grad_norm": 1.1276988983154297,
"learning_rate": 4.825502516487497e-05,
"loss": 1.4271,
"step": 112
},
{
"epoch": 0.0417590539541759,
"grad_norm": 1.1881543397903442,
"learning_rate": 4.738320218785281e-05,
"loss": 1.3651,
"step": 113
},
{
"epoch": 0.04212860310421286,
"grad_norm": 1.1217621564865112,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.3373,
"step": 114
},
{
"epoch": 0.04249815225424981,
"grad_norm": 1.0728836059570312,
"learning_rate": 4.564221286261709e-05,
"loss": 1.3906,
"step": 115
},
{
"epoch": 0.04286770140428677,
"grad_norm": 1.0626592636108398,
"learning_rate": 4.477357683661734e-05,
"loss": 1.4145,
"step": 116
},
{
"epoch": 0.043237250554323724,
"grad_norm": 1.2668044567108154,
"learning_rate": 4.390653282974264e-05,
"loss": 1.4449,
"step": 117
},
{
"epoch": 0.04360679970436068,
"grad_norm": 1.1660826206207275,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.4418,
"step": 118
},
{
"epoch": 0.043976348854397634,
"grad_norm": 1.2007546424865723,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.2703,
"step": 119
},
{
"epoch": 0.04434589800443459,
"grad_norm": 1.1209444999694824,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3185,
"step": 120
},
{
"epoch": 0.044715447154471545,
"grad_norm": 1.2639321088790894,
"learning_rate": 4.045955023117276e-05,
"loss": 1.5634,
"step": 121
},
{
"epoch": 0.0450849963045085,
"grad_norm": 1.2275253534317017,
"learning_rate": 3.960441545911204e-05,
"loss": 1.5015,
"step": 122
},
{
"epoch": 0.045454545454545456,
"grad_norm": 1.2644561529159546,
"learning_rate": 3.875244728280676e-05,
"loss": 1.2945,
"step": 123
},
{
"epoch": 0.04582409460458241,
"grad_norm": 1.2975744009017944,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4723,
"step": 124
},
{
"epoch": 0.046193643754619367,
"grad_norm": 1.1532069444656372,
"learning_rate": 3.705904774487396e-05,
"loss": 1.3747,
"step": 125
},
{
"epoch": 0.04656319290465632,
"grad_norm": 1.2729253768920898,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.5121,
"step": 126
},
{
"epoch": 0.04693274205469328,
"grad_norm": 0.9181888103485107,
"learning_rate": 3.5381414763863166e-05,
"loss": 1.2013,
"step": 127
},
{
"epoch": 0.04730229120473023,
"grad_norm": 1.1294840574264526,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4928,
"step": 128
},
{
"epoch": 0.04767184035476718,
"grad_norm": 1.1482758522033691,
"learning_rate": 3.372159227714218e-05,
"loss": 1.427,
"step": 129
},
{
"epoch": 0.04804138950480414,
"grad_norm": 1.2264043092727661,
"learning_rate": 3.289899283371657e-05,
"loss": 1.465,
"step": 130
},
{
"epoch": 0.04841093865484109,
"grad_norm": 1.075649380683899,
"learning_rate": 3.2081602522734986e-05,
"loss": 1.275,
"step": 131
},
{
"epoch": 0.04878048780487805,
"grad_norm": 1.1205912828445435,
"learning_rate": 3.12696703292044e-05,
"loss": 1.4231,
"step": 132
},
{
"epoch": 0.049150036954915,
"grad_norm": 1.2409650087356567,
"learning_rate": 3.046344357553632e-05,
"loss": 1.4638,
"step": 133
},
{
"epoch": 0.04951958610495196,
"grad_norm": 1.162580966949463,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.4062,
"step": 134
},
{
"epoch": 0.04988913525498891,
"grad_norm": 1.081363558769226,
"learning_rate": 2.886908691296504e-05,
"loss": 1.5605,
"step": 135
},
{
"epoch": 0.05025868440502587,
"grad_norm": 1.1237491369247437,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.3619,
"step": 136
},
{
"epoch": 0.050628233555062824,
"grad_norm": 1.189873456954956,
"learning_rate": 2.7300475013022663e-05,
"loss": 1.5284,
"step": 137
},
{
"epoch": 0.050997782705099776,
"grad_norm": 1.0780954360961914,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.4271,
"step": 138
},
{
"epoch": 0.051367331855136734,
"grad_norm": 1.110445499420166,
"learning_rate": 2.575951898768315e-05,
"loss": 1.4199,
"step": 139
},
{
"epoch": 0.051736881005173686,
"grad_norm": 1.1516411304473877,
"learning_rate": 2.500000000000001e-05,
"loss": 1.5769,
"step": 140
},
{
"epoch": 0.052106430155210645,
"grad_norm": 1.0880444049835205,
"learning_rate": 2.4248096254497288e-05,
"loss": 1.4509,
"step": 141
},
{
"epoch": 0.0524759793052476,
"grad_norm": 1.373824119567871,
"learning_rate": 2.350403678833976e-05,
"loss": 1.5812,
"step": 142
},
{
"epoch": 0.052845528455284556,
"grad_norm": 1.1445316076278687,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.3559,
"step": 143
},
{
"epoch": 0.05321507760532151,
"grad_norm": 1.160222053527832,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.5171,
"step": 144
},
{
"epoch": 0.05358462675535846,
"grad_norm": 1.098719596862793,
"learning_rate": 2.132117818244771e-05,
"loss": 1.4258,
"step": 145
},
{
"epoch": 0.05395417590539542,
"grad_norm": 1.310326337814331,
"learning_rate": 2.061073738537635e-05,
"loss": 1.4759,
"step": 146
},
{
"epoch": 0.05432372505543237,
"grad_norm": 1.1144723892211914,
"learning_rate": 1.9909248842397584e-05,
"loss": 1.4351,
"step": 147
},
{
"epoch": 0.05469327420546933,
"grad_norm": 1.3094505071640015,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.4766,
"step": 148
},
{
"epoch": 0.05506282335550628,
"grad_norm": 1.0953831672668457,
"learning_rate": 1.8533980447508137e-05,
"loss": 1.2377,
"step": 149
},
{
"epoch": 0.05543237250554324,
"grad_norm": 1.3045984506607056,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.2559,
"step": 150
},
{
"epoch": 0.05543237250554324,
"eval_loss": 1.4672820568084717,
"eval_runtime": 395.8151,
"eval_samples_per_second": 11.515,
"eval_steps_per_second": 5.758,
"step": 150
},
{
"epoch": 0.05580192165558019,
"grad_norm": 1.876737117767334,
"learning_rate": 1.7197048550474643e-05,
"loss": 1.8515,
"step": 151
},
{
"epoch": 0.05617147080561715,
"grad_norm": 1.6920479536056519,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.9976,
"step": 152
},
{
"epoch": 0.0565410199556541,
"grad_norm": 1.5965746641159058,
"learning_rate": 1.5900081996875083e-05,
"loss": 1.7058,
"step": 153
},
{
"epoch": 0.056910569105691054,
"grad_norm": 1.0820908546447754,
"learning_rate": 1.526708147705013e-05,
"loss": 1.6261,
"step": 154
},
{
"epoch": 0.05728011825572801,
"grad_norm": 1.1893142461776733,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.5562,
"step": 155
},
{
"epoch": 0.057649667405764965,
"grad_norm": 1.0844910144805908,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.4603,
"step": 156
},
{
"epoch": 0.058019216555801924,
"grad_norm": 1.0669087171554565,
"learning_rate": 1.3432314919041478e-05,
"loss": 1.4977,
"step": 157
},
{
"epoch": 0.058388765705838876,
"grad_norm": 0.9656410217285156,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.3458,
"step": 158
},
{
"epoch": 0.058758314855875834,
"grad_norm": 0.9274796843528748,
"learning_rate": 1.22645209888614e-05,
"loss": 1.1323,
"step": 159
},
{
"epoch": 0.059127864005912786,
"grad_norm": 1.142857551574707,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.2885,
"step": 160
},
{
"epoch": 0.05949741315594974,
"grad_norm": 1.0732998847961426,
"learning_rate": 1.1142701927151456e-05,
"loss": 1.424,
"step": 161
},
{
"epoch": 0.0598669623059867,
"grad_norm": 0.9644544124603271,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.1609,
"step": 162
},
{
"epoch": 0.06023651145602365,
"grad_norm": 0.9977406859397888,
"learning_rate": 1.006822449763537e-05,
"loss": 1.379,
"step": 163
},
{
"epoch": 0.06060606060606061,
"grad_norm": 0.9859596490859985,
"learning_rate": 9.549150281252633e-06,
"loss": 1.3819,
"step": 164
},
{
"epoch": 0.06097560975609756,
"grad_norm": 1.5418035984039307,
"learning_rate": 9.042397785550405e-06,
"loss": 1.4524,
"step": 165
},
{
"epoch": 0.06134515890613452,
"grad_norm": 1.2203693389892578,
"learning_rate": 8.548121372247918e-06,
"loss": 1.4579,
"step": 166
},
{
"epoch": 0.06171470805617147,
"grad_norm": 1.2629313468933105,
"learning_rate": 8.066471602728803e-06,
"loss": 1.7015,
"step": 167
},
{
"epoch": 0.06208425720620843,
"grad_norm": 1.1078131198883057,
"learning_rate": 7.597595192178702e-06,
"loss": 1.3415,
"step": 168
},
{
"epoch": 0.06245380635624538,
"grad_norm": 1.163627028465271,
"learning_rate": 7.1416349648943894e-06,
"loss": 1.4821,
"step": 169
},
{
"epoch": 0.06282335550628233,
"grad_norm": 1.1494909524917603,
"learning_rate": 6.698729810778065e-06,
"loss": 1.4676,
"step": 170
},
{
"epoch": 0.06319290465631928,
"grad_norm": 1.1875578165054321,
"learning_rate": 6.269014643030213e-06,
"loss": 1.5624,
"step": 171
},
{
"epoch": 0.06356245380635625,
"grad_norm": 0.978162407875061,
"learning_rate": 5.852620357053651e-06,
"loss": 1.3452,
"step": 172
},
{
"epoch": 0.0639320029563932,
"grad_norm": 1.0990476608276367,
"learning_rate": 5.449673790581611e-06,
"loss": 1.416,
"step": 173
},
{
"epoch": 0.06430155210643015,
"grad_norm": 1.1077399253845215,
"learning_rate": 5.060297685041659e-06,
"loss": 1.4135,
"step": 174
},
{
"epoch": 0.0646711012564671,
"grad_norm": 1.1183117628097534,
"learning_rate": 4.684610648167503e-06,
"loss": 1.5033,
"step": 175
},
{
"epoch": 0.06504065040650407,
"grad_norm": 1.047569990158081,
"learning_rate": 4.322727117869951e-06,
"loss": 1.3875,
"step": 176
},
{
"epoch": 0.06541019955654102,
"grad_norm": 1.253718614578247,
"learning_rate": 3.974757327377981e-06,
"loss": 1.6587,
"step": 177
},
{
"epoch": 0.06577974870657798,
"grad_norm": 1.2737456560134888,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.5391,
"step": 178
},
{
"epoch": 0.06614929785661493,
"grad_norm": 1.211133599281311,
"learning_rate": 3.3209786751399187e-06,
"loss": 1.4662,
"step": 179
},
{
"epoch": 0.06651884700665188,
"grad_norm": 1.1832826137542725,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.4876,
"step": 180
},
{
"epoch": 0.06688839615668885,
"grad_norm": 1.135176420211792,
"learning_rate": 2.724071220034158e-06,
"loss": 1.4133,
"step": 181
},
{
"epoch": 0.0672579453067258,
"grad_norm": 1.2256841659545898,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.5201,
"step": 182
},
{
"epoch": 0.06762749445676275,
"grad_norm": 1.1192513704299927,
"learning_rate": 2.1847622018482283e-06,
"loss": 1.3683,
"step": 183
},
{
"epoch": 0.0679970436067997,
"grad_norm": 1.085292100906372,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3517,
"step": 184
},
{
"epoch": 0.06836659275683665,
"grad_norm": 1.1791199445724487,
"learning_rate": 1.70370868554659e-06,
"loss": 1.397,
"step": 185
},
{
"epoch": 0.06873614190687362,
"grad_norm": 1.0781949758529663,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.2846,
"step": 186
},
{
"epoch": 0.06910569105691057,
"grad_norm": 1.319664716720581,
"learning_rate": 1.2814967607382432e-06,
"loss": 1.5363,
"step": 187
},
{
"epoch": 0.06947524020694752,
"grad_norm": 1.156130075454712,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.5176,
"step": 188
},
{
"epoch": 0.06984478935698447,
"grad_norm": 1.2623462677001953,
"learning_rate": 9.186408276168013e-07,
"loss": 1.4803,
"step": 189
},
{
"epoch": 0.07021433850702144,
"grad_norm": 1.1374601125717163,
"learning_rate": 7.596123493895991e-07,
"loss": 1.5936,
"step": 190
},
{
"epoch": 0.07058388765705839,
"grad_norm": 1.0878794193267822,
"learning_rate": 6.15582970243117e-07,
"loss": 1.2696,
"step": 191
},
{
"epoch": 0.07095343680709534,
"grad_norm": 1.151364803314209,
"learning_rate": 4.865965629214819e-07,
"loss": 1.5469,
"step": 192
},
{
"epoch": 0.0713229859571323,
"grad_norm": 1.1073623895645142,
"learning_rate": 3.7269241793390085e-07,
"loss": 1.4087,
"step": 193
},
{
"epoch": 0.07169253510716925,
"grad_norm": 1.0697261095046997,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.3369,
"step": 194
},
{
"epoch": 0.07206208425720621,
"grad_norm": 1.2926465272903442,
"learning_rate": 1.9026509541272275e-07,
"loss": 1.4226,
"step": 195
},
{
"epoch": 0.07243163340724317,
"grad_norm": 1.2745354175567627,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.4758,
"step": 196
},
{
"epoch": 0.07280118255728012,
"grad_norm": 1.2054541110992432,
"learning_rate": 6.852326227130834e-08,
"loss": 1.365,
"step": 197
},
{
"epoch": 0.07317073170731707,
"grad_norm": 1.1675442457199097,
"learning_rate": 3.04586490452119e-08,
"loss": 1.3514,
"step": 198
},
{
"epoch": 0.07354028085735403,
"grad_norm": 1.187726616859436,
"learning_rate": 7.615242180436522e-09,
"loss": 1.4101,
"step": 199
},
{
"epoch": 0.07390983000739099,
"grad_norm": 1.2909642457962036,
"learning_rate": 0.0,
"loss": 1.477,
"step": 200
},
{
"epoch": 0.07390983000739099,
"eval_loss": 1.4534531831741333,
"eval_runtime": 396.0218,
"eval_samples_per_second": 11.509,
"eval_steps_per_second": 5.755,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.171649799313162e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}