seblaku's picture
Training in progress, step 200, checkpoint
84aa26e verified
{
"best_metric": 1.6925917863845825,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 1.9138755980861244,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009569377990430622,
"grad_norm": 0.38082700967788696,
"learning_rate": 8.000000000000001e-06,
"loss": 1.6292,
"step": 1
},
{
"epoch": 0.009569377990430622,
"eval_loss": 1.831426739692688,
"eval_runtime": 5.8102,
"eval_samples_per_second": 30.464,
"eval_steps_per_second": 7.745,
"step": 1
},
{
"epoch": 0.019138755980861243,
"grad_norm": 0.3949052691459656,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.5218,
"step": 2
},
{
"epoch": 0.028708133971291867,
"grad_norm": 0.4035767614841461,
"learning_rate": 2.4e-05,
"loss": 1.5025,
"step": 3
},
{
"epoch": 0.03827751196172249,
"grad_norm": 0.41593310236930847,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5595,
"step": 4
},
{
"epoch": 0.04784688995215311,
"grad_norm": 0.40817272663116455,
"learning_rate": 4e-05,
"loss": 1.4547,
"step": 5
},
{
"epoch": 0.05741626794258373,
"grad_norm": 0.42838597297668457,
"learning_rate": 4.8e-05,
"loss": 1.6265,
"step": 6
},
{
"epoch": 0.06698564593301436,
"grad_norm": 0.45393508672714233,
"learning_rate": 5.6e-05,
"loss": 1.6049,
"step": 7
},
{
"epoch": 0.07655502392344497,
"grad_norm": 0.43397629261016846,
"learning_rate": 6.400000000000001e-05,
"loss": 1.5147,
"step": 8
},
{
"epoch": 0.0861244019138756,
"grad_norm": 0.45276832580566406,
"learning_rate": 7.2e-05,
"loss": 1.5187,
"step": 9
},
{
"epoch": 0.09569377990430622,
"grad_norm": 0.4829266369342804,
"learning_rate": 8e-05,
"loss": 1.5023,
"step": 10
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.5055172443389893,
"learning_rate": 7.999453219969877e-05,
"loss": 1.5217,
"step": 11
},
{
"epoch": 0.11483253588516747,
"grad_norm": 0.5195921659469604,
"learning_rate": 7.997813029363704e-05,
"loss": 1.4627,
"step": 12
},
{
"epoch": 0.12440191387559808,
"grad_norm": 0.5627723932266235,
"learning_rate": 7.99507987659322e-05,
"loss": 1.5943,
"step": 13
},
{
"epoch": 0.1339712918660287,
"grad_norm": 0.561619758605957,
"learning_rate": 7.991254508875098e-05,
"loss": 1.633,
"step": 14
},
{
"epoch": 0.14354066985645933,
"grad_norm": 0.5991867780685425,
"learning_rate": 7.98633797202668e-05,
"loss": 1.7317,
"step": 15
},
{
"epoch": 0.15311004784688995,
"grad_norm": 0.6589527130126953,
"learning_rate": 7.980331610180046e-05,
"loss": 1.7673,
"step": 16
},
{
"epoch": 0.16267942583732056,
"grad_norm": 0.6777735948562622,
"learning_rate": 7.973237065414553e-05,
"loss": 1.7703,
"step": 17
},
{
"epoch": 0.1722488038277512,
"grad_norm": 0.7253565192222595,
"learning_rate": 7.965056277307902e-05,
"loss": 1.887,
"step": 18
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.7007018327713013,
"learning_rate": 7.955791482405875e-05,
"loss": 1.8466,
"step": 19
},
{
"epoch": 0.19138755980861244,
"grad_norm": 0.7932828068733215,
"learning_rate": 7.94544521361089e-05,
"loss": 1.9501,
"step": 20
},
{
"epoch": 0.20095693779904306,
"grad_norm": 0.8723483085632324,
"learning_rate": 7.93402029948953e-05,
"loss": 1.8275,
"step": 21
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.9875766038894653,
"learning_rate": 7.921519863499239e-05,
"loss": 1.9756,
"step": 22
},
{
"epoch": 0.22009569377990432,
"grad_norm": 1.1991780996322632,
"learning_rate": 7.907947323134398e-05,
"loss": 2.0454,
"step": 23
},
{
"epoch": 0.22966507177033493,
"grad_norm": 1.3916829824447632,
"learning_rate": 7.893306388992023e-05,
"loss": 2.1839,
"step": 24
},
{
"epoch": 0.23923444976076555,
"grad_norm": 2.0087339878082275,
"learning_rate": 7.877601063757323e-05,
"loss": 1.8768,
"step": 25
},
{
"epoch": 0.24880382775119617,
"grad_norm": 3.617227554321289,
"learning_rate": 7.860835641109395e-05,
"loss": 2.2618,
"step": 26
},
{
"epoch": 0.2583732057416268,
"grad_norm": 0.3412221074104309,
"learning_rate": 7.843014704547393e-05,
"loss": 1.3234,
"step": 27
},
{
"epoch": 0.2679425837320574,
"grad_norm": 0.42941951751708984,
"learning_rate": 7.824143126137431e-05,
"loss": 1.4562,
"step": 28
},
{
"epoch": 0.27751196172248804,
"grad_norm": 0.44896551966667175,
"learning_rate": 7.804226065180615e-05,
"loss": 1.6191,
"step": 29
},
{
"epoch": 0.28708133971291866,
"grad_norm": 0.4686514437198639,
"learning_rate": 7.783268966802539e-05,
"loss": 1.5335,
"step": 30
},
{
"epoch": 0.2966507177033493,
"grad_norm": 0.41287559270858765,
"learning_rate": 7.761277560464645e-05,
"loss": 1.5145,
"step": 31
},
{
"epoch": 0.3062200956937799,
"grad_norm": 0.3899138271808624,
"learning_rate": 7.738257858397844e-05,
"loss": 1.5435,
"step": 32
},
{
"epoch": 0.3157894736842105,
"grad_norm": 0.38428395986557007,
"learning_rate": 7.71421615395883e-05,
"loss": 1.4655,
"step": 33
},
{
"epoch": 0.3253588516746411,
"grad_norm": 0.4115948975086212,
"learning_rate": 7.68915901990954e-05,
"loss": 1.4021,
"step": 34
},
{
"epoch": 0.3349282296650718,
"grad_norm": 0.428444504737854,
"learning_rate": 7.663093306620231e-05,
"loss": 1.5792,
"step": 35
},
{
"epoch": 0.3444976076555024,
"grad_norm": 0.4278987646102905,
"learning_rate": 7.636026140196651e-05,
"loss": 1.4603,
"step": 36
},
{
"epoch": 0.35406698564593303,
"grad_norm": 0.45422014594078064,
"learning_rate": 7.607964920531837e-05,
"loss": 1.5074,
"step": 37
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.4825778007507324,
"learning_rate": 7.578917319283055e-05,
"loss": 1.6137,
"step": 38
},
{
"epoch": 0.37320574162679426,
"grad_norm": 0.49149173498153687,
"learning_rate": 7.548891277774448e-05,
"loss": 1.5871,
"step": 39
},
{
"epoch": 0.3827751196172249,
"grad_norm": 0.5421543121337891,
"learning_rate": 7.517895004825956e-05,
"loss": 1.6206,
"step": 40
},
{
"epoch": 0.3923444976076555,
"grad_norm": 0.5325545072555542,
"learning_rate": 7.48593697450911e-05,
"loss": 1.5887,
"step": 41
},
{
"epoch": 0.4019138755980861,
"grad_norm": 0.583401620388031,
"learning_rate": 7.453025923830296e-05,
"loss": 1.8583,
"step": 42
},
{
"epoch": 0.41148325358851673,
"grad_norm": 0.618493378162384,
"learning_rate": 7.419170850342156e-05,
"loss": 1.7723,
"step": 43
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.6902540922164917,
"learning_rate": 7.384381009683742e-05,
"loss": 1.8642,
"step": 44
},
{
"epoch": 0.430622009569378,
"grad_norm": 0.6965833902359009,
"learning_rate": 7.348665913050115e-05,
"loss": 1.8881,
"step": 45
},
{
"epoch": 0.44019138755980863,
"grad_norm": 0.7248061299324036,
"learning_rate": 7.312035324592081e-05,
"loss": 1.8599,
"step": 46
},
{
"epoch": 0.44976076555023925,
"grad_norm": 0.7599021196365356,
"learning_rate": 7.274499258746771e-05,
"loss": 1.919,
"step": 47
},
{
"epoch": 0.45933014354066987,
"grad_norm": 0.8597122430801392,
"learning_rate": 7.236067977499791e-05,
"loss": 1.8464,
"step": 48
},
{
"epoch": 0.4688995215311005,
"grad_norm": 0.9372475147247314,
"learning_rate": 7.196751987579699e-05,
"loss": 1.8919,
"step": 49
},
{
"epoch": 0.4784688995215311,
"grad_norm": 1.1424167156219482,
"learning_rate": 7.156562037585576e-05,
"loss": 1.875,
"step": 50
},
{
"epoch": 0.4784688995215311,
"eval_loss": 1.7351869344711304,
"eval_runtime": 5.7379,
"eval_samples_per_second": 30.848,
"eval_steps_per_second": 7.843,
"step": 50
},
{
"epoch": 0.4880382775119617,
"grad_norm": 1.656938076019287,
"learning_rate": 7.11550911504845e-05,
"loss": 1.9099,
"step": 51
},
{
"epoch": 0.49760765550239233,
"grad_norm": 2.4753801822662354,
"learning_rate": 7.073604443427437e-05,
"loss": 1.6779,
"step": 52
},
{
"epoch": 0.507177033492823,
"grad_norm": 0.29961133003234863,
"learning_rate": 7.03085947904134e-05,
"loss": 1.4577,
"step": 53
},
{
"epoch": 0.5167464114832536,
"grad_norm": 0.3351958394050598,
"learning_rate": 6.987285907936617e-05,
"loss": 1.5448,
"step": 54
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.3630099296569824,
"learning_rate": 6.942895642692527e-05,
"loss": 1.425,
"step": 55
},
{
"epoch": 0.5358851674641149,
"grad_norm": 0.3872605860233307,
"learning_rate": 6.897700819164357e-05,
"loss": 1.5064,
"step": 56
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.3808247148990631,
"learning_rate": 6.851713793165589e-05,
"loss": 1.5932,
"step": 57
},
{
"epoch": 0.5550239234449761,
"grad_norm": 0.39085471630096436,
"learning_rate": 6.804947137089955e-05,
"loss": 1.4699,
"step": 58
},
{
"epoch": 0.5645933014354066,
"grad_norm": 0.38267335295677185,
"learning_rate": 6.757413636474263e-05,
"loss": 1.4573,
"step": 59
},
{
"epoch": 0.5741626794258373,
"grad_norm": 0.3885556757450104,
"learning_rate": 6.709126286502965e-05,
"loss": 1.477,
"step": 60
},
{
"epoch": 0.583732057416268,
"grad_norm": 0.40890273451805115,
"learning_rate": 6.660098288455393e-05,
"loss": 1.4652,
"step": 61
},
{
"epoch": 0.5933014354066986,
"grad_norm": 0.4209669828414917,
"learning_rate": 6.610343046096674e-05,
"loss": 1.5038,
"step": 62
},
{
"epoch": 0.6028708133971292,
"grad_norm": 0.420719176530838,
"learning_rate": 6.559874162013267e-05,
"loss": 1.5592,
"step": 63
},
{
"epoch": 0.6124401913875598,
"grad_norm": 0.4601193070411682,
"learning_rate": 6.508705433894149e-05,
"loss": 1.6004,
"step": 64
},
{
"epoch": 0.6220095693779905,
"grad_norm": 0.4896854758262634,
"learning_rate": 6.456850850758673e-05,
"loss": 1.6569,
"step": 65
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.4936942756175995,
"learning_rate": 6.404324589132101e-05,
"loss": 1.6673,
"step": 66
},
{
"epoch": 0.6411483253588517,
"grad_norm": 0.5432229042053223,
"learning_rate": 6.351141009169893e-05,
"loss": 1.6781,
"step": 67
},
{
"epoch": 0.6507177033492823,
"grad_norm": 0.5662530064582825,
"learning_rate": 6.297314650731775e-05,
"loss": 1.7307,
"step": 68
},
{
"epoch": 0.6602870813397129,
"grad_norm": 0.6015027761459351,
"learning_rate": 6.242860229406692e-05,
"loss": 1.7204,
"step": 69
},
{
"epoch": 0.6698564593301436,
"grad_norm": 0.632143497467041,
"learning_rate": 6.18779263248971e-05,
"loss": 1.7661,
"step": 70
},
{
"epoch": 0.6794258373205742,
"grad_norm": 0.6799759268760681,
"learning_rate": 6.132126914911976e-05,
"loss": 1.8595,
"step": 71
},
{
"epoch": 0.6889952153110048,
"grad_norm": 0.7476505041122437,
"learning_rate": 6.075878295124861e-05,
"loss": 1.7854,
"step": 72
},
{
"epoch": 0.6985645933014354,
"grad_norm": 0.8099026083946228,
"learning_rate": 6.019062150939376e-05,
"loss": 1.8917,
"step": 73
},
{
"epoch": 0.7081339712918661,
"grad_norm": 0.8964723348617554,
"learning_rate": 5.9616940153220336e-05,
"loss": 1.9968,
"step": 74
},
{
"epoch": 0.7177033492822966,
"grad_norm": 0.9372945427894592,
"learning_rate": 5.903789572148295e-05,
"loss": 2.0208,
"step": 75
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.2482417821884155,
"learning_rate": 5.845364651914752e-05,
"loss": 2.1952,
"step": 76
},
{
"epoch": 0.7368421052631579,
"grad_norm": 1.5300464630126953,
"learning_rate": 5.786435227411227e-05,
"loss": 1.7757,
"step": 77
},
{
"epoch": 0.7464114832535885,
"grad_norm": 2.1026394367218018,
"learning_rate": 5.727017409353971e-05,
"loss": 1.6011,
"step": 78
},
{
"epoch": 0.7559808612440191,
"grad_norm": 0.2756762206554413,
"learning_rate": 5.667127441981162e-05,
"loss": 1.4248,
"step": 79
},
{
"epoch": 0.7655502392344498,
"grad_norm": 0.3412489891052246,
"learning_rate": 5.606781698611879e-05,
"loss": 1.5389,
"step": 80
},
{
"epoch": 0.7751196172248804,
"grad_norm": 0.3573220670223236,
"learning_rate": 5.5459966771698096e-05,
"loss": 1.506,
"step": 81
},
{
"epoch": 0.784688995215311,
"grad_norm": 0.40763863921165466,
"learning_rate": 5.4847889956728834e-05,
"loss": 1.5875,
"step": 82
},
{
"epoch": 0.7942583732057417,
"grad_norm": 0.40273529291152954,
"learning_rate": 5.423175387690067e-05,
"loss": 1.4032,
"step": 83
},
{
"epoch": 0.8038277511961722,
"grad_norm": 0.41646698117256165,
"learning_rate": 5.361172697766573e-05,
"loss": 1.6233,
"step": 84
},
{
"epoch": 0.8133971291866029,
"grad_norm": 0.40466055274009705,
"learning_rate": 5.298797876818735e-05,
"loss": 1.4895,
"step": 85
},
{
"epoch": 0.8229665071770335,
"grad_norm": 0.42662107944488525,
"learning_rate": 5.23606797749979e-05,
"loss": 1.482,
"step": 86
},
{
"epoch": 0.8325358851674641,
"grad_norm": 0.44496554136276245,
"learning_rate": 5.17300014953786e-05,
"loss": 1.5595,
"step": 87
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.41382166743278503,
"learning_rate": 5.109611635047379e-05,
"loss": 1.3534,
"step": 88
},
{
"epoch": 0.8516746411483254,
"grad_norm": 0.44714534282684326,
"learning_rate": 5.04591976381528e-05,
"loss": 1.4973,
"step": 89
},
{
"epoch": 0.861244019138756,
"grad_norm": 0.4594970643520355,
"learning_rate": 4.981941948563197e-05,
"loss": 1.7519,
"step": 90
},
{
"epoch": 0.8708133971291866,
"grad_norm": 0.4741780161857605,
"learning_rate": 4.9176956801870065e-05,
"loss": 1.5083,
"step": 91
},
{
"epoch": 0.8803827751196173,
"grad_norm": 0.49775177240371704,
"learning_rate": 4.853198522974988e-05,
"loss": 1.6772,
"step": 92
},
{
"epoch": 0.8899521531100478,
"grad_norm": 0.5491089820861816,
"learning_rate": 4.788468109805921e-05,
"loss": 1.7706,
"step": 93
},
{
"epoch": 0.8995215311004785,
"grad_norm": 0.5795297622680664,
"learning_rate": 4.7235221373284407e-05,
"loss": 1.7992,
"step": 94
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.5943451523780823,
"learning_rate": 4.658378361122936e-05,
"loss": 1.7066,
"step": 95
},
{
"epoch": 0.9186602870813397,
"grad_norm": 0.6468049883842468,
"learning_rate": 4.593054590847368e-05,
"loss": 1.8196,
"step": 96
},
{
"epoch": 0.9282296650717703,
"grad_norm": 0.7615320086479187,
"learning_rate": 4.5275686853682765e-05,
"loss": 2.0393,
"step": 97
},
{
"epoch": 0.937799043062201,
"grad_norm": 0.7791320085525513,
"learning_rate": 4.4619385478783456e-05,
"loss": 1.9799,
"step": 98
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.870513379573822,
"learning_rate": 4.396182121001852e-05,
"loss": 2.058,
"step": 99
},
{
"epoch": 0.9569377990430622,
"grad_norm": 0.9276971220970154,
"learning_rate": 4.33031738188933e-05,
"loss": 1.8242,
"step": 100
},
{
"epoch": 0.9569377990430622,
"eval_loss": 1.6937828063964844,
"eval_runtime": 5.7937,
"eval_samples_per_second": 30.551,
"eval_steps_per_second": 7.767,
"step": 100
},
{
"epoch": 0.9665071770334929,
"grad_norm": 1.0401110649108887,
"learning_rate": 4.264362337302798e-05,
"loss": 1.9696,
"step": 101
},
{
"epoch": 0.9760765550239234,
"grad_norm": 1.2967274188995361,
"learning_rate": 4.1983350186928894e-05,
"loss": 2.0238,
"step": 102
},
{
"epoch": 0.9856459330143541,
"grad_norm": 1.6171687841415405,
"learning_rate": 4.132253477269233e-05,
"loss": 1.7819,
"step": 103
},
{
"epoch": 0.9952153110047847,
"grad_norm": 2.1571907997131348,
"learning_rate": 4.0661357790654345e-05,
"loss": 1.4887,
"step": 104
},
{
"epoch": 1.0047846889952152,
"grad_norm": 0.6212150454521179,
"learning_rate": 4e-05,
"loss": 2.2897,
"step": 105
},
{
"epoch": 1.014354066985646,
"grad_norm": 0.2727871239185333,
"learning_rate": 3.933864220934566e-05,
"loss": 1.3308,
"step": 106
},
{
"epoch": 1.0239234449760766,
"grad_norm": 0.3046855330467224,
"learning_rate": 3.8677465227307676e-05,
"loss": 1.4615,
"step": 107
},
{
"epoch": 1.0334928229665072,
"grad_norm": 0.31469711661338806,
"learning_rate": 3.8016649813071106e-05,
"loss": 1.3841,
"step": 108
},
{
"epoch": 1.0430622009569377,
"grad_norm": 0.32190945744514465,
"learning_rate": 3.735637662697203e-05,
"loss": 1.3789,
"step": 109
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.3380140960216522,
"learning_rate": 3.669682618110671e-05,
"loss": 1.437,
"step": 110
},
{
"epoch": 1.062200956937799,
"grad_norm": 0.3436225652694702,
"learning_rate": 3.6038178789981494e-05,
"loss": 1.2847,
"step": 111
},
{
"epoch": 1.0717703349282297,
"grad_norm": 0.36676591634750366,
"learning_rate": 3.538061452121656e-05,
"loss": 1.297,
"step": 112
},
{
"epoch": 1.0813397129186604,
"grad_norm": 0.39319589734077454,
"learning_rate": 3.472431314631724e-05,
"loss": 1.489,
"step": 113
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.38813480734825134,
"learning_rate": 3.406945409152632e-05,
"loss": 1.4377,
"step": 114
},
{
"epoch": 1.1004784688995215,
"grad_norm": 0.41558539867401123,
"learning_rate": 3.341621638877064e-05,
"loss": 1.4287,
"step": 115
},
{
"epoch": 1.1100478468899522,
"grad_norm": 0.40909650921821594,
"learning_rate": 3.276477862671562e-05,
"loss": 1.441,
"step": 116
},
{
"epoch": 1.1196172248803828,
"grad_norm": 0.45959237217903137,
"learning_rate": 3.21153189019408e-05,
"loss": 1.6774,
"step": 117
},
{
"epoch": 1.1291866028708135,
"grad_norm": 0.46516451239585876,
"learning_rate": 3.146801477025013e-05,
"loss": 1.4372,
"step": 118
},
{
"epoch": 1.138755980861244,
"grad_norm": 0.5069877505302429,
"learning_rate": 3.082304319812994e-05,
"loss": 1.642,
"step": 119
},
{
"epoch": 1.1483253588516746,
"grad_norm": 0.5341264009475708,
"learning_rate": 3.0180580514368037e-05,
"loss": 1.6585,
"step": 120
},
{
"epoch": 1.1578947368421053,
"grad_norm": 0.56055748462677,
"learning_rate": 2.9540802361847212e-05,
"loss": 1.6557,
"step": 121
},
{
"epoch": 1.167464114832536,
"grad_norm": 0.5781103372573853,
"learning_rate": 2.890388364952623e-05,
"loss": 1.6671,
"step": 122
},
{
"epoch": 1.1770334928229664,
"grad_norm": 0.6097198724746704,
"learning_rate": 2.8269998504621416e-05,
"loss": 1.6451,
"step": 123
},
{
"epoch": 1.186602870813397,
"grad_norm": 0.6518456339836121,
"learning_rate": 2.7639320225002108e-05,
"loss": 1.6557,
"step": 124
},
{
"epoch": 1.1961722488038278,
"grad_norm": 0.6572650671005249,
"learning_rate": 2.7012021231812666e-05,
"loss": 1.4739,
"step": 125
},
{
"epoch": 1.2057416267942584,
"grad_norm": 0.783046543598175,
"learning_rate": 2.638827302233428e-05,
"loss": 1.8914,
"step": 126
},
{
"epoch": 1.215311004784689,
"grad_norm": 0.808749794960022,
"learning_rate": 2.576824612309934e-05,
"loss": 1.6836,
"step": 127
},
{
"epoch": 1.2248803827751196,
"grad_norm": 0.9802379012107849,
"learning_rate": 2.5152110043271166e-05,
"loss": 1.6596,
"step": 128
},
{
"epoch": 1.2344497607655502,
"grad_norm": 1.2099603414535522,
"learning_rate": 2.454003322830192e-05,
"loss": 1.5137,
"step": 129
},
{
"epoch": 1.244019138755981,
"grad_norm": 1.4862251281738281,
"learning_rate": 2.393218301388123e-05,
"loss": 1.1418,
"step": 130
},
{
"epoch": 1.2535885167464116,
"grad_norm": 1.0337613821029663,
"learning_rate": 2.3328725580188395e-05,
"loss": 1.2111,
"step": 131
},
{
"epoch": 1.263157894736842,
"grad_norm": 0.33979716897010803,
"learning_rate": 2.272982590646029e-05,
"loss": 1.4224,
"step": 132
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.37942126393318176,
"learning_rate": 2.2135647725887744e-05,
"loss": 1.4692,
"step": 133
},
{
"epoch": 1.2822966507177034,
"grad_norm": 0.3820892572402954,
"learning_rate": 2.1546353480852495e-05,
"loss": 1.3997,
"step": 134
},
{
"epoch": 1.291866028708134,
"grad_norm": 0.37589502334594727,
"learning_rate": 2.096210427851706e-05,
"loss": 1.44,
"step": 135
},
{
"epoch": 1.3014354066985647,
"grad_norm": 0.39906907081604004,
"learning_rate": 2.038305984677969e-05,
"loss": 1.4383,
"step": 136
},
{
"epoch": 1.3110047846889952,
"grad_norm": 0.3934566378593445,
"learning_rate": 1.9809378490606264e-05,
"loss": 1.3633,
"step": 137
},
{
"epoch": 1.3205741626794258,
"grad_norm": 0.4008135497570038,
"learning_rate": 1.9241217048751406e-05,
"loss": 1.4147,
"step": 138
},
{
"epoch": 1.3301435406698565,
"grad_norm": 0.4096076488494873,
"learning_rate": 1.867873085088026e-05,
"loss": 1.3079,
"step": 139
},
{
"epoch": 1.339712918660287,
"grad_norm": 0.4406437873840332,
"learning_rate": 1.8122073675102935e-05,
"loss": 1.4559,
"step": 140
},
{
"epoch": 1.3492822966507176,
"grad_norm": 0.42370909452438354,
"learning_rate": 1.75713977059331e-05,
"loss": 1.3705,
"step": 141
},
{
"epoch": 1.3588516746411483,
"grad_norm": 0.4583252966403961,
"learning_rate": 1.702685349268226e-05,
"loss": 1.4909,
"step": 142
},
{
"epoch": 1.368421052631579,
"grad_norm": 0.44213107228279114,
"learning_rate": 1.648858990830108e-05,
"loss": 1.3054,
"step": 143
},
{
"epoch": 1.3779904306220097,
"grad_norm": 0.46722662448883057,
"learning_rate": 1.5956754108678996e-05,
"loss": 1.443,
"step": 144
},
{
"epoch": 1.38755980861244,
"grad_norm": 0.48926499485969543,
"learning_rate": 1.5431491492413288e-05,
"loss": 1.4217,
"step": 145
},
{
"epoch": 1.3971291866028708,
"grad_norm": 0.5186731219291687,
"learning_rate": 1.491294566105852e-05,
"loss": 1.567,
"step": 146
},
{
"epoch": 1.4066985645933014,
"grad_norm": 0.5692178010940552,
"learning_rate": 1.4401258379867335e-05,
"loss": 1.6171,
"step": 147
},
{
"epoch": 1.4162679425837321,
"grad_norm": 0.5609941482543945,
"learning_rate": 1.3896569539033253e-05,
"loss": 1.5476,
"step": 148
},
{
"epoch": 1.4258373205741628,
"grad_norm": 0.6115127801895142,
"learning_rate": 1.3399017115446067e-05,
"loss": 1.5292,
"step": 149
},
{
"epoch": 1.4354066985645932,
"grad_norm": 0.6445640325546265,
"learning_rate": 1.2908737134970367e-05,
"loss": 1.6103,
"step": 150
},
{
"epoch": 1.4354066985645932,
"eval_loss": 1.6925917863845825,
"eval_runtime": 5.7417,
"eval_samples_per_second": 30.827,
"eval_steps_per_second": 7.837,
"step": 150
},
{
"epoch": 1.444976076555024,
"grad_norm": 0.6974514722824097,
"learning_rate": 1.242586363525737e-05,
"loss": 1.7167,
"step": 151
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.7227963805198669,
"learning_rate": 1.1950528629100457e-05,
"loss": 1.6717,
"step": 152
},
{
"epoch": 1.464114832535885,
"grad_norm": 0.8344351053237915,
"learning_rate": 1.1482862068344121e-05,
"loss": 1.686,
"step": 153
},
{
"epoch": 1.4736842105263157,
"grad_norm": 0.9203869104385376,
"learning_rate": 1.1022991808356442e-05,
"loss": 1.5159,
"step": 154
},
{
"epoch": 1.4832535885167464,
"grad_norm": 1.1346211433410645,
"learning_rate": 1.0571043573074737e-05,
"loss": 1.5358,
"step": 155
},
{
"epoch": 1.492822966507177,
"grad_norm": 1.6541485786437988,
"learning_rate": 1.0127140920633857e-05,
"loss": 1.1705,
"step": 156
},
{
"epoch": 1.5023923444976077,
"grad_norm": 1.685089349746704,
"learning_rate": 9.69140520958662e-06,
"loss": 1.1268,
"step": 157
},
{
"epoch": 1.5119617224880382,
"grad_norm": 0.30736255645751953,
"learning_rate": 9.263955565725648e-06,
"loss": 1.3637,
"step": 158
},
{
"epoch": 1.5215311004784688,
"grad_norm": 0.3349371552467346,
"learning_rate": 8.844908849515509e-06,
"loss": 1.474,
"step": 159
},
{
"epoch": 1.5311004784688995,
"grad_norm": 0.35800015926361084,
"learning_rate": 8.434379624144261e-06,
"loss": 1.5093,
"step": 160
},
{
"epoch": 1.5406698564593302,
"grad_norm": 0.37739279866218567,
"learning_rate": 8.032480124203013e-06,
"loss": 1.4268,
"step": 161
},
{
"epoch": 1.5502392344497609,
"grad_norm": 0.40975770354270935,
"learning_rate": 7.639320225002106e-06,
"loss": 1.5343,
"step": 162
},
{
"epoch": 1.5598086124401913,
"grad_norm": 0.3935057520866394,
"learning_rate": 7.255007412532307e-06,
"loss": 1.4853,
"step": 163
},
{
"epoch": 1.569377990430622,
"grad_norm": 0.4058459401130676,
"learning_rate": 6.8796467540791986e-06,
"loss": 1.3923,
"step": 164
},
{
"epoch": 1.5789473684210527,
"grad_norm": 0.4203774631023407,
"learning_rate": 6.513340869498859e-06,
"loss": 1.3381,
"step": 165
},
{
"epoch": 1.588516746411483,
"grad_norm": 0.42906367778778076,
"learning_rate": 6.1561899031625794e-06,
"loss": 1.4494,
"step": 166
},
{
"epoch": 1.598086124401914,
"grad_norm": 0.43578189611434937,
"learning_rate": 5.808291496578435e-06,
"loss": 1.2433,
"step": 167
},
{
"epoch": 1.6076555023923444,
"grad_norm": 0.4657846987247467,
"learning_rate": 5.469740761697044e-06,
"loss": 1.5992,
"step": 168
},
{
"epoch": 1.6172248803827751,
"grad_norm": 0.49452725052833557,
"learning_rate": 5.140630254908905e-06,
"loss": 1.392,
"step": 169
},
{
"epoch": 1.6267942583732058,
"grad_norm": 0.5063107013702393,
"learning_rate": 4.821049951740442e-06,
"loss": 1.6072,
"step": 170
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.5106216073036194,
"learning_rate": 4.511087222255528e-06,
"loss": 1.3298,
"step": 171
},
{
"epoch": 1.6459330143540671,
"grad_norm": 0.5595565438270569,
"learning_rate": 4.2108268071694616e-06,
"loss": 1.5702,
"step": 172
},
{
"epoch": 1.6555023923444976,
"grad_norm": 0.5791851282119751,
"learning_rate": 3.9203507946816445e-06,
"loss": 1.505,
"step": 173
},
{
"epoch": 1.6650717703349283,
"grad_norm": 0.6414560079574585,
"learning_rate": 3.6397385980335e-06,
"loss": 1.6376,
"step": 174
},
{
"epoch": 1.674641148325359,
"grad_norm": 0.6914544105529785,
"learning_rate": 3.3690669337977e-06,
"loss": 1.7964,
"step": 175
},
{
"epoch": 1.6842105263157894,
"grad_norm": 0.7928903698921204,
"learning_rate": 3.1084098009046106e-06,
"loss": 1.8557,
"step": 176
},
{
"epoch": 1.69377990430622,
"grad_norm": 0.8021854162216187,
"learning_rate": 2.8578384604117217e-06,
"loss": 1.6362,
"step": 177
},
{
"epoch": 1.7033492822966507,
"grad_norm": 0.7553149461746216,
"learning_rate": 2.6174214160215704e-06,
"loss": 1.342,
"step": 178
},
{
"epoch": 1.7129186602870812,
"grad_norm": 0.9696720242500305,
"learning_rate": 2.3872243953535535e-06,
"loss": 1.7634,
"step": 179
},
{
"epoch": 1.722488038277512,
"grad_norm": 1.1475272178649902,
"learning_rate": 2.1673103319746146e-06,
"loss": 1.7356,
"step": 180
},
{
"epoch": 1.7320574162679425,
"grad_norm": 1.3815126419067383,
"learning_rate": 1.957739348193859e-06,
"loss": 1.6326,
"step": 181
},
{
"epoch": 1.7416267942583732,
"grad_norm": 2.003146171569824,
"learning_rate": 1.7585687386256944e-06,
"loss": 1.5472,
"step": 182
},
{
"epoch": 1.7511961722488039,
"grad_norm": 1.2215324640274048,
"learning_rate": 1.5698529545260744e-06,
"loss": 1.1225,
"step": 183
},
{
"epoch": 1.7607655502392343,
"grad_norm": 0.32720524072647095,
"learning_rate": 1.3916435889060575e-06,
"loss": 1.4342,
"step": 184
},
{
"epoch": 1.7703349282296652,
"grad_norm": 0.34399574995040894,
"learning_rate": 1.2239893624267852e-06,
"loss": 1.512,
"step": 185
},
{
"epoch": 1.7799043062200957,
"grad_norm": 0.3406524956226349,
"learning_rate": 1.0669361100797704e-06,
"loss": 1.2191,
"step": 186
},
{
"epoch": 1.7894736842105263,
"grad_norm": 0.3624950349330902,
"learning_rate": 9.205267686560293e-07,
"loss": 1.4136,
"step": 187
},
{
"epoch": 1.799043062200957,
"grad_norm": 0.4109777808189392,
"learning_rate": 7.848013650076258e-07,
"loss": 1.5427,
"step": 188
},
{
"epoch": 1.8086124401913874,
"grad_norm": 0.39658841490745544,
"learning_rate": 6.597970051047053e-07,
"loss": 1.3736,
"step": 189
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.41146281361579895,
"learning_rate": 5.455478638911071e-07,
"loss": 1.445,
"step": 190
},
{
"epoch": 1.8277511961722488,
"grad_norm": 0.410113126039505,
"learning_rate": 4.420851759412603e-07,
"loss": 1.2919,
"step": 191
},
{
"epoch": 1.8373205741626795,
"grad_norm": 0.43137213587760925,
"learning_rate": 3.4943722692099224e-07,
"loss": 1.3197,
"step": 192
},
{
"epoch": 1.8468899521531101,
"grad_norm": 0.43355488777160645,
"learning_rate": 2.676293458544743e-07,
"loss": 1.3578,
"step": 193
},
{
"epoch": 1.8564593301435406,
"grad_norm": 0.4816869795322418,
"learning_rate": 1.9668389819954338e-07,
"loss": 1.4214,
"step": 194
},
{
"epoch": 1.8660287081339713,
"grad_norm": 0.48422351479530334,
"learning_rate": 1.3662027973320614e-07,
"loss": 1.3847,
"step": 195
},
{
"epoch": 1.875598086124402,
"grad_norm": 0.522202730178833,
"learning_rate": 8.745491124901861e-08,
"loss": 1.5238,
"step": 196
},
{
"epoch": 1.8851674641148324,
"grad_norm": 0.5350428223609924,
"learning_rate": 4.920123406781052e-08,
"loss": 1.5597,
"step": 197
},
{
"epoch": 1.8947368421052633,
"grad_norm": 0.5734149813652039,
"learning_rate": 2.1869706362958044e-08,
"loss": 1.5932,
"step": 198
},
{
"epoch": 1.9043062200956937,
"grad_norm": 0.6013189554214478,
"learning_rate": 5.467800301239834e-09,
"loss": 1.6291,
"step": 199
},
{
"epoch": 1.9138755980861244,
"grad_norm": 0.6234821677207947,
"learning_rate": 0.0,
"loss": 1.4807,
"step": 200
},
{
"epoch": 1.9138755980861244,
"eval_loss": 1.7030987739562988,
"eval_runtime": 5.7485,
"eval_samples_per_second": 30.79,
"eval_steps_per_second": 7.828,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.57896697905152e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}