semcoder_1030 / trainer_state.json
Robin Ding
1030 model updates
1bf86fc
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.994236311239193,
"eval_steps": 20,
"global_step": 346,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011527377521613832,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.7662,
"step": 2
},
{
"epoch": 0.023054755043227664,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.7773,
"step": 4
},
{
"epoch": 0.0345821325648415,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.7593,
"step": 6
},
{
"epoch": 0.04610951008645533,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.795,
"step": 8
},
{
"epoch": 0.05763688760806916,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.7765,
"step": 10
},
{
"epoch": 0.069164265129683,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.7819,
"step": 12
},
{
"epoch": 0.08069164265129683,
"grad_norm": 1.0932377576828003,
"learning_rate": 2.5e-06,
"loss": 0.7426,
"step": 14
},
{
"epoch": 0.09221902017291066,
"grad_norm": 1.0143316984176636,
"learning_rate": 7.5e-06,
"loss": 0.7667,
"step": 16
},
{
"epoch": 0.1037463976945245,
"grad_norm": 0.5817523002624512,
"learning_rate": 1.25e-05,
"loss": 0.6819,
"step": 18
},
{
"epoch": 0.11527377521613832,
"grad_norm": 0.7819337248802185,
"learning_rate": 1.5e-05,
"loss": 0.6171,
"step": 20
},
{
"epoch": 0.11527377521613832,
"eval_loss": 0.5882565379142761,
"eval_runtime": 219.1309,
"eval_samples_per_second": 21.325,
"eval_steps_per_second": 0.169,
"step": 20
},
{
"epoch": 0.12680115273775217,
"grad_norm": 0.4540252089500427,
"learning_rate": 2e-05,
"loss": 0.5573,
"step": 22
},
{
"epoch": 0.138328530259366,
"grad_norm": 0.31643402576446533,
"learning_rate": 2.5e-05,
"loss": 0.5615,
"step": 24
},
{
"epoch": 0.14985590778097982,
"grad_norm": 0.2575995922088623,
"learning_rate": 3e-05,
"loss": 0.5225,
"step": 26
},
{
"epoch": 0.16138328530259366,
"grad_norm": 0.1953422576189041,
"learning_rate": 3.5e-05,
"loss": 0.5141,
"step": 28
},
{
"epoch": 0.1729106628242075,
"grad_norm": 0.17974254488945007,
"learning_rate": 4e-05,
"loss": 0.508,
"step": 30
},
{
"epoch": 0.1844380403458213,
"grad_norm": 0.16816851496696472,
"learning_rate": 4.5e-05,
"loss": 0.4763,
"step": 32
},
{
"epoch": 0.19596541786743515,
"grad_norm": 0.16267944872379303,
"learning_rate": 5e-05,
"loss": 0.4757,
"step": 34
},
{
"epoch": 0.207492795389049,
"grad_norm": 0.1356152594089508,
"learning_rate": 4.9995821084255035e-05,
"loss": 0.4714,
"step": 36
},
{
"epoch": 0.21902017291066284,
"grad_norm": 0.12293241173028946,
"learning_rate": 4.998328588931674e-05,
"loss": 0.4777,
"step": 38
},
{
"epoch": 0.23054755043227665,
"grad_norm": 0.12494528293609619,
"learning_rate": 4.996239907149833e-05,
"loss": 0.4649,
"step": 40
},
{
"epoch": 0.23054755043227665,
"eval_loss": 0.45945191383361816,
"eval_runtime": 163.1604,
"eval_samples_per_second": 28.641,
"eval_steps_per_second": 0.227,
"step": 40
},
{
"epoch": 0.2420749279538905,
"grad_norm": 0.11425463855266571,
"learning_rate": 4.993316838939995e-05,
"loss": 0.4508,
"step": 42
},
{
"epoch": 0.25360230547550433,
"grad_norm": 0.11498431116342545,
"learning_rate": 4.9895604701026735e-05,
"loss": 0.4433,
"step": 44
},
{
"epoch": 0.26512968299711814,
"grad_norm": 0.12802694737911224,
"learning_rate": 4.9849721959755464e-05,
"loss": 0.451,
"step": 46
},
{
"epoch": 0.276657060518732,
"grad_norm": 0.11723160743713379,
"learning_rate": 4.979553720915145e-05,
"loss": 0.4344,
"step": 48
},
{
"epoch": 0.2881844380403458,
"grad_norm": 0.11519418656826019,
"learning_rate": 4.9733070576637606e-05,
"loss": 0.4464,
"step": 50
},
{
"epoch": 0.29971181556195964,
"grad_norm": 0.1040327250957489,
"learning_rate": 4.9662345266017836e-05,
"loss": 0.4367,
"step": 52
},
{
"epoch": 0.3112391930835735,
"grad_norm": 0.10918751358985901,
"learning_rate": 4.9583387548857836e-05,
"loss": 0.4411,
"step": 54
},
{
"epoch": 0.3227665706051873,
"grad_norm": 0.11520207673311234,
"learning_rate": 4.949622675472627e-05,
"loss": 0.4119,
"step": 56
},
{
"epoch": 0.33429394812680113,
"grad_norm": 0.11275804787874222,
"learning_rate": 4.940089526030003e-05,
"loss": 0.4387,
"step": 58
},
{
"epoch": 0.345821325648415,
"grad_norm": 0.11030508577823639,
"learning_rate": 4.92974284773376e-05,
"loss": 0.4265,
"step": 60
},
{
"epoch": 0.345821325648415,
"eval_loss": 0.4273378252983093,
"eval_runtime": 163.6226,
"eval_samples_per_second": 28.56,
"eval_steps_per_second": 0.226,
"step": 60
},
{
"epoch": 0.3573487031700288,
"grad_norm": 0.1050853505730629,
"learning_rate": 4.9185864839525065e-05,
"loss": 0.4217,
"step": 62
},
{
"epoch": 0.3688760806916426,
"grad_norm": 0.1106187105178833,
"learning_rate": 4.906624578819954e-05,
"loss": 0.4244,
"step": 64
},
{
"epoch": 0.3804034582132565,
"grad_norm": 0.1203250139951706,
"learning_rate": 4.893861575695544e-05,
"loss": 0.4161,
"step": 66
},
{
"epoch": 0.3919308357348703,
"grad_norm": 0.13240748643875122,
"learning_rate": 4.880302215513915e-05,
"loss": 0.4205,
"step": 68
},
{
"epoch": 0.4034582132564842,
"grad_norm": 0.11961999535560608,
"learning_rate": 4.865951535023847e-05,
"loss": 0.4152,
"step": 70
},
{
"epoch": 0.414985590778098,
"grad_norm": 0.10617737472057343,
"learning_rate": 4.850814864917309e-05,
"loss": 0.4151,
"step": 72
},
{
"epoch": 0.4265129682997118,
"grad_norm": 0.1053657978773117,
"learning_rate": 4.834897827849325e-05,
"loss": 0.4286,
"step": 74
},
{
"epoch": 0.43804034582132567,
"grad_norm": 0.1263289898633957,
"learning_rate": 4.818206336349391e-05,
"loss": 0.4206,
"step": 76
},
{
"epoch": 0.4495677233429395,
"grad_norm": 0.11088111996650696,
"learning_rate": 4.8007465906252065e-05,
"loss": 0.4127,
"step": 78
},
{
"epoch": 0.4610951008645533,
"grad_norm": 0.10416624695062637,
"learning_rate": 4.782525076259556e-05,
"loss": 0.3924,
"step": 80
},
{
"epoch": 0.4610951008645533,
"eval_loss": 0.41243475675582886,
"eval_runtime": 163.0434,
"eval_samples_per_second": 28.661,
"eval_steps_per_second": 0.227,
"step": 80
},
{
"epoch": 0.47262247838616717,
"grad_norm": 0.109275221824646,
"learning_rate": 4.763548561801175e-05,
"loss": 0.418,
"step": 82
},
{
"epoch": 0.484149855907781,
"grad_norm": 0.11440867185592651,
"learning_rate": 4.743824096250513e-05,
"loss": 0.4054,
"step": 84
},
{
"epoch": 0.4956772334293948,
"grad_norm": 0.11359245330095291,
"learning_rate": 4.723359006441316e-05,
"loss": 0.3969,
"step": 86
},
{
"epoch": 0.5072046109510087,
"grad_norm": 0.11708330363035202,
"learning_rate": 4.7021608943190105e-05,
"loss": 0.4052,
"step": 88
},
{
"epoch": 0.5187319884726225,
"grad_norm": 0.1086309477686882,
"learning_rate": 4.680237634116884e-05,
"loss": 0.4055,
"step": 90
},
{
"epoch": 0.5302593659942363,
"grad_norm": 0.1133558601140976,
"learning_rate": 4.6575973694311364e-05,
"loss": 0.4016,
"step": 92
},
{
"epoch": 0.5417867435158501,
"grad_norm": 0.11118675768375397,
"learning_rate": 4.6342485101958634e-05,
"loss": 0.4137,
"step": 94
},
{
"epoch": 0.553314121037464,
"grad_norm": 0.1130242571234703,
"learning_rate": 4.610199729559106e-05,
"loss": 0.4125,
"step": 96
},
{
"epoch": 0.5648414985590778,
"grad_norm": 0.10623504966497421,
"learning_rate": 4.585459960661137e-05,
"loss": 0.3989,
"step": 98
},
{
"epoch": 0.5763688760806917,
"grad_norm": 0.10171157866716385,
"learning_rate": 4.5600383933161586e-05,
"loss": 0.4099,
"step": 100
},
{
"epoch": 0.5763688760806917,
"eval_loss": 0.40298977494239807,
"eval_runtime": 163.0144,
"eval_samples_per_second": 28.666,
"eval_steps_per_second": 0.227,
"step": 100
},
{
"epoch": 0.5878962536023055,
"grad_norm": 0.1252756267786026,
"learning_rate": 4.5339444705986636e-05,
"loss": 0.4047,
"step": 102
},
{
"epoch": 0.5994236311239193,
"grad_norm": 0.12425834685564041,
"learning_rate": 4.507187885335716e-05,
"loss": 0.4044,
"step": 104
},
{
"epoch": 0.6109510086455331,
"grad_norm": 0.12188662588596344,
"learning_rate": 4.479778576506467e-05,
"loss": 0.3985,
"step": 106
},
{
"epoch": 0.622478386167147,
"grad_norm": 0.1297188252210617,
"learning_rate": 4.451726725550226e-05,
"loss": 0.4105,
"step": 108
},
{
"epoch": 0.6340057636887608,
"grad_norm": 0.1405544877052307,
"learning_rate": 4.423042752584471e-05,
"loss": 0.3943,
"step": 110
},
{
"epoch": 0.6455331412103746,
"grad_norm": 0.1427697390317917,
"learning_rate": 4.393737312534203e-05,
"loss": 0.3929,
"step": 112
},
{
"epoch": 0.6570605187319885,
"grad_norm": 0.12234554439783096,
"learning_rate": 4.363821291174073e-05,
"loss": 0.385,
"step": 114
},
{
"epoch": 0.6685878962536023,
"grad_norm": 0.11913135647773743,
"learning_rate": 4.3333058010847665e-05,
"loss": 0.3944,
"step": 116
},
{
"epoch": 0.6801152737752162,
"grad_norm": 0.10236409306526184,
"learning_rate": 4.302202177525126e-05,
"loss": 0.4088,
"step": 118
},
{
"epoch": 0.69164265129683,
"grad_norm": 0.1148187592625618,
"learning_rate": 4.270521974221572e-05,
"loss": 0.3925,
"step": 120
},
{
"epoch": 0.69164265129683,
"eval_loss": 0.39593446254730225,
"eval_runtime": 163.197,
"eval_samples_per_second": 28.634,
"eval_steps_per_second": 0.227,
"step": 120
},
{
"epoch": 0.7031700288184438,
"grad_norm": 0.1020105630159378,
"learning_rate": 4.238276959076362e-05,
"loss": 0.3959,
"step": 122
},
{
"epoch": 0.7146974063400576,
"grad_norm": 0.09900052100419998,
"learning_rate": 4.205479109796293e-05,
"loss": 0.3938,
"step": 124
},
{
"epoch": 0.7262247838616714,
"grad_norm": 0.11457400768995285,
"learning_rate": 4.172140609443478e-05,
"loss": 0.3941,
"step": 126
},
{
"epoch": 0.7377521613832853,
"grad_norm": 0.12308034300804138,
"learning_rate": 4.138273841909831e-05,
"loss": 0.3773,
"step": 128
},
{
"epoch": 0.7492795389048992,
"grad_norm": 0.11765991151332855,
"learning_rate": 4.1038913873169606e-05,
"loss": 0.3908,
"step": 130
},
{
"epoch": 0.760806916426513,
"grad_norm": 0.11604655534029007,
"learning_rate": 4.069006017343161e-05,
"loss": 0.4024,
"step": 132
},
{
"epoch": 0.7723342939481268,
"grad_norm": 0.10507753491401672,
"learning_rate": 4.033630690479266e-05,
"loss": 0.3874,
"step": 134
},
{
"epoch": 0.7838616714697406,
"grad_norm": 0.11194345355033875,
"learning_rate": 3.99777854721509e-05,
"loss": 0.3902,
"step": 136
},
{
"epoch": 0.7953890489913544,
"grad_norm": 0.10498375445604324,
"learning_rate": 3.961462905158275e-05,
"loss": 0.3922,
"step": 138
},
{
"epoch": 0.8069164265129684,
"grad_norm": 0.10028143227100372,
"learning_rate": 3.924697254087344e-05,
"loss": 0.3884,
"step": 140
},
{
"epoch": 0.8069164265129684,
"eval_loss": 0.3904818892478943,
"eval_runtime": 165.7335,
"eval_samples_per_second": 28.196,
"eval_steps_per_second": 0.223,
"step": 140
},
{
"epoch": 0.8184438040345822,
"grad_norm": 0.10579356551170349,
"learning_rate": 3.887495250940798e-05,
"loss": 0.3945,
"step": 142
},
{
"epoch": 0.829971181556196,
"grad_norm": 0.10865680873394012,
"learning_rate": 3.8498707147441186e-05,
"loss": 0.396,
"step": 144
},
{
"epoch": 0.8414985590778098,
"grad_norm": 0.10962694138288498,
"learning_rate": 3.8118376214765724e-05,
"loss": 0.3925,
"step": 146
},
{
"epoch": 0.8530259365994236,
"grad_norm": 0.14297883212566376,
"learning_rate": 3.773410098879694e-05,
"loss": 0.3862,
"step": 148
},
{
"epoch": 0.8645533141210374,
"grad_norm": 0.12033689022064209,
"learning_rate": 3.734602421209414e-05,
"loss": 0.3923,
"step": 150
},
{
"epoch": 0.8760806916426513,
"grad_norm": 0.11992188543081284,
"learning_rate": 3.6954290039337544e-05,
"loss": 0.3864,
"step": 152
},
{
"epoch": 0.8876080691642652,
"grad_norm": 0.1202910915017128,
"learning_rate": 3.6559043983780695e-05,
"loss": 0.3965,
"step": 154
},
{
"epoch": 0.899135446685879,
"grad_norm": 0.1139882281422615,
"learning_rate": 3.6160432863198227e-05,
"loss": 0.3867,
"step": 156
},
{
"epoch": 0.9106628242074928,
"grad_norm": 0.10837512463331223,
"learning_rate": 3.575860474534907e-05,
"loss": 0.3734,
"step": 158
},
{
"epoch": 0.9221902017291066,
"grad_norm": 0.11608846485614777,
"learning_rate": 3.535370889297532e-05,
"loss": 0.3856,
"step": 160
},
{
"epoch": 0.9221902017291066,
"eval_loss": 0.3864249289035797,
"eval_runtime": 162.9187,
"eval_samples_per_second": 28.683,
"eval_steps_per_second": 0.227,
"step": 160
},
{
"epoch": 0.9337175792507204,
"grad_norm": 0.11061898618936539,
"learning_rate": 3.494589570835719e-05,
"loss": 0.3874,
"step": 162
},
{
"epoch": 0.9452449567723343,
"grad_norm": 0.10494677722454071,
"learning_rate": 3.4535316677444745e-05,
"loss": 0.3862,
"step": 164
},
{
"epoch": 0.9567723342939481,
"grad_norm": 0.11414439976215363,
"learning_rate": 3.412212431358704e-05,
"loss": 0.382,
"step": 166
},
{
"epoch": 0.968299711815562,
"grad_norm": 0.12404631823301315,
"learning_rate": 3.3706472100879635e-05,
"loss": 0.3893,
"step": 168
},
{
"epoch": 0.9798270893371758,
"grad_norm": 0.11317316442728043,
"learning_rate": 3.3288514437151505e-05,
"loss": 0.379,
"step": 170
},
{
"epoch": 0.9913544668587896,
"grad_norm": 0.11200971901416779,
"learning_rate": 3.286840657661259e-05,
"loss": 0.3829,
"step": 172
},
{
"epoch": 1.0028818443804035,
"grad_norm": 0.11575999855995178,
"learning_rate": 3.2446304572183155e-05,
"loss": 0.3757,
"step": 174
},
{
"epoch": 1.0144092219020173,
"grad_norm": 0.1118895411491394,
"learning_rate": 3.2022365217526515e-05,
"loss": 0.3487,
"step": 176
},
{
"epoch": 1.0259365994236311,
"grad_norm": 0.11941341310739517,
"learning_rate": 3.159674598880658e-05,
"loss": 0.3611,
"step": 178
},
{
"epoch": 1.037463976945245,
"grad_norm": 0.12434028834104538,
"learning_rate": 3.116960498619191e-05,
"loss": 0.3527,
"step": 180
},
{
"epoch": 1.037463976945245,
"eval_loss": 0.38433459401130676,
"eval_runtime": 167.462,
"eval_samples_per_second": 27.905,
"eval_steps_per_second": 0.221,
"step": 180
},
{
"epoch": 1.0489913544668588,
"grad_norm": 0.1257391721010208,
"learning_rate": 3.0741100875127956e-05,
"loss": 0.346,
"step": 182
},
{
"epoch": 1.0605187319884726,
"grad_norm": 0.1072888970375061,
"learning_rate": 3.0311392827399266e-05,
"loss": 0.351,
"step": 184
},
{
"epoch": 1.0720461095100864,
"grad_norm": 0.11550486832857132,
"learning_rate": 2.9880640462003766e-05,
"loss": 0.3584,
"step": 186
},
{
"epoch": 1.0835734870317002,
"grad_norm": 0.11017844080924988,
"learning_rate": 2.944900378586073e-05,
"loss": 0.3528,
"step": 188
},
{
"epoch": 1.0951008645533142,
"grad_norm": 0.11351864784955978,
"learning_rate": 2.901664313437478e-05,
"loss": 0.3548,
"step": 190
},
{
"epoch": 1.106628242074928,
"grad_norm": 0.11179786920547485,
"learning_rate": 2.8583719111877844e-05,
"loss": 0.3433,
"step": 192
},
{
"epoch": 1.1181556195965419,
"grad_norm": 0.10140883177518845,
"learning_rate": 2.8150392531971137e-05,
"loss": 0.3524,
"step": 194
},
{
"epoch": 1.1296829971181557,
"grad_norm": 0.10281901806592941,
"learning_rate": 2.7716824357789567e-05,
"loss": 0.3527,
"step": 196
},
{
"epoch": 1.1412103746397695,
"grad_norm": 0.09435153007507324,
"learning_rate": 2.728317564221044e-05,
"loss": 0.3521,
"step": 198
},
{
"epoch": 1.1527377521613833,
"grad_norm": 0.10692735016345978,
"learning_rate": 2.684960746802887e-05,
"loss": 0.3635,
"step": 200
},
{
"epoch": 1.1527377521613833,
"eval_loss": 0.3818141520023346,
"eval_runtime": 163.2711,
"eval_samples_per_second": 28.621,
"eval_steps_per_second": 0.227,
"step": 200
},
{
"epoch": 1.1642651296829971,
"grad_norm": 0.10303134471178055,
"learning_rate": 2.6416280888122165e-05,
"loss": 0.341,
"step": 202
},
{
"epoch": 1.175792507204611,
"grad_norm": 0.1047578901052475,
"learning_rate": 2.5983356865625224e-05,
"loss": 0.3483,
"step": 204
},
{
"epoch": 1.1873198847262247,
"grad_norm": 0.09823092073202133,
"learning_rate": 2.5550996214139283e-05,
"loss": 0.3469,
"step": 206
},
{
"epoch": 1.1988472622478386,
"grad_norm": 0.10456107556819916,
"learning_rate": 2.511935953799624e-05,
"loss": 0.3498,
"step": 208
},
{
"epoch": 1.2103746397694524,
"grad_norm": 0.10154879838228226,
"learning_rate": 2.4688607172600742e-05,
"loss": 0.3494,
"step": 210
},
{
"epoch": 1.2219020172910664,
"grad_norm": 0.10301995277404785,
"learning_rate": 2.4258899124872063e-05,
"loss": 0.344,
"step": 212
},
{
"epoch": 1.23342939481268,
"grad_norm": 0.10198619216680527,
"learning_rate": 2.3830395013808088e-05,
"loss": 0.3453,
"step": 214
},
{
"epoch": 1.244956772334294,
"grad_norm": 0.10618147999048233,
"learning_rate": 2.3403254011193433e-05,
"loss": 0.3492,
"step": 216
},
{
"epoch": 1.2564841498559078,
"grad_norm": 0.10644455999135971,
"learning_rate": 2.2977634782473493e-05,
"loss": 0.3365,
"step": 218
},
{
"epoch": 1.2680115273775217,
"grad_norm": 0.10750509053468704,
"learning_rate": 2.2553695427816847e-05,
"loss": 0.3527,
"step": 220
},
{
"epoch": 1.2680115273775217,
"eval_loss": 0.37958407402038574,
"eval_runtime": 162.0488,
"eval_samples_per_second": 28.837,
"eval_steps_per_second": 0.228,
"step": 220
},
{
"epoch": 1.2795389048991355,
"grad_norm": 0.10969573259353638,
"learning_rate": 2.2131593423387414e-05,
"loss": 0.3388,
"step": 222
},
{
"epoch": 1.2910662824207493,
"grad_norm": 0.11101813614368439,
"learning_rate": 2.1711485562848504e-05,
"loss": 0.3417,
"step": 224
},
{
"epoch": 1.302593659942363,
"grad_norm": 0.10727708041667938,
"learning_rate": 2.1293527899120374e-05,
"loss": 0.3478,
"step": 226
},
{
"epoch": 1.314121037463977,
"grad_norm": 0.11611964553594589,
"learning_rate": 2.0877875686412966e-05,
"loss": 0.34,
"step": 228
},
{
"epoch": 1.3256484149855907,
"grad_norm": 0.11851979792118073,
"learning_rate": 2.0464683322555267e-05,
"loss": 0.3413,
"step": 230
},
{
"epoch": 1.3371757925072045,
"grad_norm": 0.11880774050951004,
"learning_rate": 2.0054104291642818e-05,
"loss": 0.3479,
"step": 232
},
{
"epoch": 1.3487031700288186,
"grad_norm": 0.10287457704544067,
"learning_rate": 1.964629110702469e-05,
"loss": 0.3434,
"step": 234
},
{
"epoch": 1.3602305475504322,
"grad_norm": 0.10474801808595657,
"learning_rate": 1.924139525465093e-05,
"loss": 0.338,
"step": 236
},
{
"epoch": 1.3717579250720462,
"grad_norm": 0.10204192996025085,
"learning_rate": 1.8839567136801772e-05,
"loss": 0.3369,
"step": 238
},
{
"epoch": 1.38328530259366,
"grad_norm": 0.1050105094909668,
"learning_rate": 1.8440956016219314e-05,
"loss": 0.3407,
"step": 240
},
{
"epoch": 1.38328530259366,
"eval_loss": 0.3772360384464264,
"eval_runtime": 161.7846,
"eval_samples_per_second": 28.884,
"eval_steps_per_second": 0.229,
"step": 240
},
{
"epoch": 1.3948126801152738,
"grad_norm": 0.10698170959949493,
"learning_rate": 1.8045709960662465e-05,
"loss": 0.355,
"step": 242
},
{
"epoch": 1.4063400576368876,
"grad_norm": 0.09979532659053802,
"learning_rate": 1.765397578790587e-05,
"loss": 0.3419,
"step": 244
},
{
"epoch": 1.4178674351585014,
"grad_norm": 0.09883076697587967,
"learning_rate": 1.7265899011203065e-05,
"loss": 0.3415,
"step": 246
},
{
"epoch": 1.4293948126801153,
"grad_norm": 0.10558874905109406,
"learning_rate": 1.6881623785234285e-05,
"loss": 0.353,
"step": 248
},
{
"epoch": 1.440922190201729,
"grad_norm": 0.1028595045208931,
"learning_rate": 1.650129285255882e-05,
"loss": 0.3537,
"step": 250
},
{
"epoch": 1.4524495677233429,
"grad_norm": 0.10255461186170578,
"learning_rate": 1.6125047490592034e-05,
"loss": 0.3393,
"step": 252
},
{
"epoch": 1.4639769452449567,
"grad_norm": 0.10520190000534058,
"learning_rate": 1.5753027459126566e-05,
"loss": 0.3469,
"step": 254
},
{
"epoch": 1.4755043227665707,
"grad_norm": 0.10457232594490051,
"learning_rate": 1.5385370948417256e-05,
"loss": 0.3548,
"step": 256
},
{
"epoch": 1.4870317002881843,
"grad_norm": 0.10024940967559814,
"learning_rate": 1.5022214527849105e-05,
"loss": 0.3403,
"step": 258
},
{
"epoch": 1.4985590778097984,
"grad_norm": 0.09729419648647308,
"learning_rate": 1.4663693095207343e-05,
"loss": 0.3372,
"step": 260
},
{
"epoch": 1.4985590778097984,
"eval_loss": 0.3750576674938202,
"eval_runtime": 161.7883,
"eval_samples_per_second": 28.883,
"eval_steps_per_second": 0.229,
"step": 260
},
{
"epoch": 1.510086455331412,
"grad_norm": 0.09922486543655396,
"learning_rate": 1.43099398265684e-05,
"loss": 0.329,
"step": 262
},
{
"epoch": 1.521613832853026,
"grad_norm": 0.10505367070436478,
"learning_rate": 1.3961086126830403e-05,
"loss": 0.3404,
"step": 264
},
{
"epoch": 1.5331412103746398,
"grad_norm": 0.10513550788164139,
"learning_rate": 1.3617261580901691e-05,
"loss": 0.3436,
"step": 266
},
{
"epoch": 1.5446685878962536,
"grad_norm": 0.10580945760011673,
"learning_rate": 1.3278593905565224e-05,
"loss": 0.3448,
"step": 268
},
{
"epoch": 1.5561959654178674,
"grad_norm": 0.0965629443526268,
"learning_rate": 1.2945208902037071e-05,
"loss": 0.3323,
"step": 270
},
{
"epoch": 1.5677233429394812,
"grad_norm": 0.1028643250465393,
"learning_rate": 1.261723040923638e-05,
"loss": 0.341,
"step": 272
},
{
"epoch": 1.579250720461095,
"grad_norm": 0.10219055414199829,
"learning_rate": 1.2294780257784277e-05,
"loss": 0.3337,
"step": 274
},
{
"epoch": 1.5907780979827089,
"grad_norm": 0.10359715670347214,
"learning_rate": 1.1977978224748735e-05,
"loss": 0.3388,
"step": 276
},
{
"epoch": 1.602305475504323,
"grad_norm": 0.09491585195064545,
"learning_rate": 1.166694198915234e-05,
"loss": 0.3404,
"step": 278
},
{
"epoch": 1.6138328530259365,
"grad_norm": 0.10194993764162064,
"learning_rate": 1.1361787088259273e-05,
"loss": 0.3453,
"step": 280
},
{
"epoch": 1.6138328530259365,
"eval_loss": 0.3727053105831146,
"eval_runtime": 161.5061,
"eval_samples_per_second": 28.934,
"eval_steps_per_second": 0.229,
"step": 280
},
{
"epoch": 1.6253602305475505,
"grad_norm": 0.09746123105287552,
"learning_rate": 1.1062626874657977e-05,
"loss": 0.3372,
"step": 282
},
{
"epoch": 1.6368876080691641,
"grad_norm": 0.1048843190073967,
"learning_rate": 1.0769572474155296e-05,
"loss": 0.3415,
"step": 284
},
{
"epoch": 1.6484149855907781,
"grad_norm": 0.10008594393730164,
"learning_rate": 1.0482732744497742e-05,
"loss": 0.3332,
"step": 286
},
{
"epoch": 1.659942363112392,
"grad_norm": 0.09605535864830017,
"learning_rate": 1.020221423493533e-05,
"loss": 0.3355,
"step": 288
},
{
"epoch": 1.6714697406340058,
"grad_norm": 0.0957765281200409,
"learning_rate": 9.928121146642841e-06,
"loss": 0.3511,
"step": 290
},
{
"epoch": 1.6829971181556196,
"grad_norm": 0.10064635425806046,
"learning_rate": 9.660555294013373e-06,
"loss": 0.3462,
"step": 292
},
{
"epoch": 1.6945244956772334,
"grad_norm": 0.09759914875030518,
"learning_rate": 9.399616066838415e-06,
"loss": 0.3476,
"step": 294
},
{
"epoch": 1.7060518731988472,
"grad_norm": 0.1018439456820488,
"learning_rate": 9.145400393388629e-06,
"loss": 0.3542,
"step": 296
},
{
"epoch": 1.717579250720461,
"grad_norm": 0.10098982602357864,
"learning_rate": 8.898002704408944e-06,
"loss": 0.3474,
"step": 298
},
{
"epoch": 1.729106628242075,
"grad_norm": 0.09507515281438828,
"learning_rate": 8.657514898041381e-06,
"loss": 0.3408,
"step": 300
},
{
"epoch": 1.729106628242075,
"eval_loss": 0.37131258845329285,
"eval_runtime": 161.848,
"eval_samples_per_second": 28.873,
"eval_steps_per_second": 0.229,
"step": 300
},
{
"epoch": 1.7406340057636887,
"grad_norm": 0.09314768761396408,
"learning_rate": 8.424026305688643e-06,
"loss": 0.341,
"step": 302
},
{
"epoch": 1.7521613832853027,
"grad_norm": 0.09930098056793213,
"learning_rate": 8.197623658831165e-06,
"loss": 0.3397,
"step": 304
},
{
"epoch": 1.7636887608069163,
"grad_norm": 0.10165336728096008,
"learning_rate": 7.978391056809904e-06,
"loss": 0.3419,
"step": 306
},
{
"epoch": 1.7752161383285303,
"grad_norm": 0.09817034006118774,
"learning_rate": 7.766409935586837e-06,
"loss": 0.3456,
"step": 308
},
{
"epoch": 1.7867435158501441,
"grad_norm": 0.10100841522216797,
"learning_rate": 7.5617590374948745e-06,
"loss": 0.3464,
"step": 310
},
{
"epoch": 1.798270893371758,
"grad_norm": 0.10015368461608887,
"learning_rate": 7.364514381988254e-06,
"loss": 0.3402,
"step": 312
},
{
"epoch": 1.8097982708933718,
"grad_norm": 0.10384070128202438,
"learning_rate": 7.174749237404444e-06,
"loss": 0.3312,
"step": 314
},
{
"epoch": 1.8213256484149856,
"grad_norm": 0.10439052432775497,
"learning_rate": 6.992534093747942e-06,
"loss": 0.3333,
"step": 316
},
{
"epoch": 1.8328530259365994,
"grad_norm": 0.09687253832817078,
"learning_rate": 6.8179366365061e-06,
"loss": 0.3358,
"step": 318
},
{
"epoch": 1.8443804034582132,
"grad_norm": 0.10308189690113068,
"learning_rate": 6.651021721506756e-06,
"loss": 0.3342,
"step": 320
},
{
"epoch": 1.8443804034582132,
"eval_loss": 0.3700437545776367,
"eval_runtime": 161.117,
"eval_samples_per_second": 29.004,
"eval_steps_per_second": 0.23,
"step": 320
},
{
"epoch": 1.8559077809798272,
"grad_norm": 0.09917671233415604,
"learning_rate": 6.491851350826915e-06,
"loss": 0.35,
"step": 322
},
{
"epoch": 1.8674351585014408,
"grad_norm": 0.09794910252094269,
"learning_rate": 6.340484649761529e-06,
"loss": 0.3382,
"step": 324
},
{
"epoch": 1.8789625360230549,
"grad_norm": 0.10062103718519211,
"learning_rate": 6.196977844860848e-06,
"loss": 0.3313,
"step": 326
},
{
"epoch": 1.8904899135446684,
"grad_norm": 0.09653456509113312,
"learning_rate": 6.061384243044564e-06,
"loss": 0.3364,
"step": 328
},
{
"epoch": 1.9020172910662825,
"grad_norm": 0.09764689952135086,
"learning_rate": 5.933754211800459e-06,
"loss": 0.332,
"step": 330
},
{
"epoch": 1.9135446685878963,
"grad_norm": 0.09577332437038422,
"learning_rate": 5.814135160474937e-06,
"loss": 0.339,
"step": 332
},
{
"epoch": 1.92507204610951,
"grad_norm": 0.0969650149345398,
"learning_rate": 5.7025715226624036e-06,
"loss": 0.3301,
"step": 334
},
{
"epoch": 1.936599423631124,
"grad_norm": 0.09393922984600067,
"learning_rate": 5.5991047396999735e-06,
"loss": 0.3374,
"step": 336
},
{
"epoch": 1.9481268011527377,
"grad_norm": 0.11683030426502228,
"learning_rate": 5.503773245273734e-06,
"loss": 0.3424,
"step": 338
},
{
"epoch": 1.9596541786743515,
"grad_norm": 0.09428226202726364,
"learning_rate": 5.41661245114217e-06,
"loss": 0.332,
"step": 340
},
{
"epoch": 1.9596541786743515,
"eval_loss": 0.36898180842399597,
"eval_runtime": 161.0051,
"eval_samples_per_second": 29.024,
"eval_steps_per_second": 0.23,
"step": 340
},
{
"epoch": 1.9711815561959654,
"grad_norm": 0.09828232228755951,
"learning_rate": 5.337654733982173e-06,
"loss": 0.3386,
"step": 342
},
{
"epoch": 1.9827089337175794,
"grad_norm": 0.09727545082569122,
"learning_rate": 5.2669294233624e-06,
"loss": 0.331,
"step": 344
},
{
"epoch": 1.994236311239193,
"grad_norm": 0.09713173657655716,
"learning_rate": 5.204462790848549e-06,
"loss": 0.3478,
"step": 346
},
{
"epoch": 1.994236311239193,
"step": 346,
"total_flos": 388981471576064.0,
"train_loss": 0.3994986457976303,
"train_runtime": 42730.5756,
"train_samples_per_second": 4.155,
"train_steps_per_second": 0.008
}
],
"logging_steps": 2,
"max_steps": 346,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 388981471576064.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}