prxy5607's picture
Training in progress, step 200, checkpoint
b51a50e verified
{
"best_metric": 1.7265242338180542,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.04020706639191838,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002010353319595919,
"grad_norm": 0.7243201732635498,
"learning_rate": 1e-05,
"loss": 1.9001,
"step": 1
},
{
"epoch": 0.0002010353319595919,
"eval_loss": 2.11344051361084,
"eval_runtime": 171.9074,
"eval_samples_per_second": 48.736,
"eval_steps_per_second": 12.187,
"step": 1
},
{
"epoch": 0.0004020706639191838,
"grad_norm": 0.9003947377204895,
"learning_rate": 2e-05,
"loss": 1.8396,
"step": 2
},
{
"epoch": 0.0006031059958787757,
"grad_norm": 0.9272552728652954,
"learning_rate": 3e-05,
"loss": 1.8755,
"step": 3
},
{
"epoch": 0.0008041413278383676,
"grad_norm": 1.0478748083114624,
"learning_rate": 4e-05,
"loss": 1.7778,
"step": 4
},
{
"epoch": 0.0010051766597979595,
"grad_norm": 0.9338437914848328,
"learning_rate": 5e-05,
"loss": 1.8533,
"step": 5
},
{
"epoch": 0.0012062119917575513,
"grad_norm": 0.9951044321060181,
"learning_rate": 6e-05,
"loss": 1.8048,
"step": 6
},
{
"epoch": 0.0014072473237171433,
"grad_norm": 0.8657851219177246,
"learning_rate": 7e-05,
"loss": 1.8549,
"step": 7
},
{
"epoch": 0.0016082826556767353,
"grad_norm": 0.7345765233039856,
"learning_rate": 8e-05,
"loss": 1.8697,
"step": 8
},
{
"epoch": 0.001809317987636327,
"grad_norm": 0.9524029493331909,
"learning_rate": 9e-05,
"loss": 1.8195,
"step": 9
},
{
"epoch": 0.002010353319595919,
"grad_norm": 0.9118521809577942,
"learning_rate": 0.0001,
"loss": 1.8523,
"step": 10
},
{
"epoch": 0.002211388651555511,
"grad_norm": 0.8495756387710571,
"learning_rate": 9.999316524962345e-05,
"loss": 1.816,
"step": 11
},
{
"epoch": 0.0024124239835151026,
"grad_norm": 0.7486765384674072,
"learning_rate": 9.997266286704631e-05,
"loss": 1.7435,
"step": 12
},
{
"epoch": 0.002613459315474695,
"grad_norm": 0.871289849281311,
"learning_rate": 9.993849845741524e-05,
"loss": 1.8233,
"step": 13
},
{
"epoch": 0.0028144946474342866,
"grad_norm": 0.7088490724563599,
"learning_rate": 9.989068136093873e-05,
"loss": 1.7216,
"step": 14
},
{
"epoch": 0.0030155299793938784,
"grad_norm": 0.8408116698265076,
"learning_rate": 9.98292246503335e-05,
"loss": 1.8027,
"step": 15
},
{
"epoch": 0.0032165653113534706,
"grad_norm": 0.7949049472808838,
"learning_rate": 9.975414512725057e-05,
"loss": 1.835,
"step": 16
},
{
"epoch": 0.0034176006433130623,
"grad_norm": 0.9442782402038574,
"learning_rate": 9.966546331768191e-05,
"loss": 1.9566,
"step": 17
},
{
"epoch": 0.003618635975272654,
"grad_norm": 0.7910245656967163,
"learning_rate": 9.956320346634876e-05,
"loss": 1.7318,
"step": 18
},
{
"epoch": 0.003819671307232246,
"grad_norm": 0.8084377646446228,
"learning_rate": 9.944739353007344e-05,
"loss": 1.7165,
"step": 19
},
{
"epoch": 0.004020706639191838,
"grad_norm": 0.8121070861816406,
"learning_rate": 9.931806517013612e-05,
"loss": 1.9654,
"step": 20
},
{
"epoch": 0.0042217419711514294,
"grad_norm": 0.9022719264030457,
"learning_rate": 9.917525374361912e-05,
"loss": 1.8507,
"step": 21
},
{
"epoch": 0.004422777303111022,
"grad_norm": 0.7618189454078674,
"learning_rate": 9.901899829374047e-05,
"loss": 1.7183,
"step": 22
},
{
"epoch": 0.004623812635070614,
"grad_norm": 0.6915072202682495,
"learning_rate": 9.884934153917997e-05,
"loss": 1.8579,
"step": 23
},
{
"epoch": 0.004824847967030205,
"grad_norm": 0.7315471768379211,
"learning_rate": 9.86663298624003e-05,
"loss": 1.8386,
"step": 24
},
{
"epoch": 0.005025883298989797,
"grad_norm": 0.7275452017784119,
"learning_rate": 9.847001329696653e-05,
"loss": 1.772,
"step": 25
},
{
"epoch": 0.00522691863094939,
"grad_norm": 0.7768784761428833,
"learning_rate": 9.826044551386744e-05,
"loss": 1.7954,
"step": 26
},
{
"epoch": 0.005427953962908981,
"grad_norm": 0.8576830625534058,
"learning_rate": 9.803768380684242e-05,
"loss": 1.8411,
"step": 27
},
{
"epoch": 0.005628989294868573,
"grad_norm": 0.7678402066230774,
"learning_rate": 9.780178907671789e-05,
"loss": 1.769,
"step": 28
},
{
"epoch": 0.005830024626828165,
"grad_norm": 1.0145021677017212,
"learning_rate": 9.755282581475769e-05,
"loss": 2.0439,
"step": 29
},
{
"epoch": 0.006031059958787757,
"grad_norm": 0.9616433382034302,
"learning_rate": 9.729086208503174e-05,
"loss": 1.8919,
"step": 30
},
{
"epoch": 0.006232095290747349,
"grad_norm": 0.7848714590072632,
"learning_rate": 9.701596950580806e-05,
"loss": 1.8079,
"step": 31
},
{
"epoch": 0.006433130622706941,
"grad_norm": 0.8140174150466919,
"learning_rate": 9.672822322997305e-05,
"loss": 1.7669,
"step": 32
},
{
"epoch": 0.0066341659546665325,
"grad_norm": 0.890622615814209,
"learning_rate": 9.642770192448536e-05,
"loss": 1.6712,
"step": 33
},
{
"epoch": 0.006835201286626125,
"grad_norm": 0.872776210308075,
"learning_rate": 9.611448774886924e-05,
"loss": 1.8183,
"step": 34
},
{
"epoch": 0.007036236618585716,
"grad_norm": 0.9921485781669617,
"learning_rate": 9.578866633275288e-05,
"loss": 1.8694,
"step": 35
},
{
"epoch": 0.007237271950545308,
"grad_norm": 1.0803183317184448,
"learning_rate": 9.545032675245813e-05,
"loss": 1.9187,
"step": 36
},
{
"epoch": 0.0074383072825049005,
"grad_norm": 0.9387599229812622,
"learning_rate": 9.509956150664796e-05,
"loss": 1.9963,
"step": 37
},
{
"epoch": 0.007639342614464492,
"grad_norm": 1.0187156200408936,
"learning_rate": 9.473646649103818e-05,
"loss": 1.7119,
"step": 38
},
{
"epoch": 0.007840377946424084,
"grad_norm": 1.048580527305603,
"learning_rate": 9.43611409721806e-05,
"loss": 1.7683,
"step": 39
},
{
"epoch": 0.008041413278383676,
"grad_norm": 0.9831770658493042,
"learning_rate": 9.397368756032445e-05,
"loss": 1.7932,
"step": 40
},
{
"epoch": 0.008242448610343268,
"grad_norm": 1.0285784006118774,
"learning_rate": 9.357421218136386e-05,
"loss": 1.8248,
"step": 41
},
{
"epoch": 0.008443483942302859,
"grad_norm": 1.1290544271469116,
"learning_rate": 9.316282404787871e-05,
"loss": 1.9735,
"step": 42
},
{
"epoch": 0.008644519274262451,
"grad_norm": 1.0321779251098633,
"learning_rate": 9.273963562927695e-05,
"loss": 1.783,
"step": 43
},
{
"epoch": 0.008845554606222043,
"grad_norm": 1.100907802581787,
"learning_rate": 9.230476262104677e-05,
"loss": 1.7801,
"step": 44
},
{
"epoch": 0.009046589938181636,
"grad_norm": 1.203137993812561,
"learning_rate": 9.185832391312644e-05,
"loss": 1.9669,
"step": 45
},
{
"epoch": 0.009247625270141228,
"grad_norm": 1.23589289188385,
"learning_rate": 9.140044155740101e-05,
"loss": 1.6836,
"step": 46
},
{
"epoch": 0.00944866060210082,
"grad_norm": 1.3507293462753296,
"learning_rate": 9.093124073433463e-05,
"loss": 1.6699,
"step": 47
},
{
"epoch": 0.00964969593406041,
"grad_norm": 1.4106907844543457,
"learning_rate": 9.045084971874738e-05,
"loss": 1.7651,
"step": 48
},
{
"epoch": 0.009850731266020003,
"grad_norm": 1.957142949104309,
"learning_rate": 8.995939984474624e-05,
"loss": 1.8771,
"step": 49
},
{
"epoch": 0.010051766597979595,
"grad_norm": 2.773338556289673,
"learning_rate": 8.945702546981969e-05,
"loss": 2.0025,
"step": 50
},
{
"epoch": 0.010051766597979595,
"eval_loss": 1.9373520612716675,
"eval_runtime": 172.4358,
"eval_samples_per_second": 48.586,
"eval_steps_per_second": 12.149,
"step": 50
},
{
"epoch": 0.010252801929939187,
"grad_norm": 1.9833588600158691,
"learning_rate": 8.894386393810563e-05,
"loss": 1.8957,
"step": 51
},
{
"epoch": 0.01045383726189878,
"grad_norm": 1.938567876815796,
"learning_rate": 8.842005554284296e-05,
"loss": 1.9646,
"step": 52
},
{
"epoch": 0.010654872593858371,
"grad_norm": 0.9525935649871826,
"learning_rate": 8.788574348801675e-05,
"loss": 1.67,
"step": 53
},
{
"epoch": 0.010855907925817962,
"grad_norm": 1.0226001739501953,
"learning_rate": 8.73410738492077e-05,
"loss": 1.8589,
"step": 54
},
{
"epoch": 0.011056943257777554,
"grad_norm": 0.9939088821411133,
"learning_rate": 8.678619553365659e-05,
"loss": 1.7025,
"step": 55
},
{
"epoch": 0.011257978589737146,
"grad_norm": 0.672572135925293,
"learning_rate": 8.622126023955446e-05,
"loss": 1.7452,
"step": 56
},
{
"epoch": 0.011459013921696739,
"grad_norm": 0.7350982427597046,
"learning_rate": 8.564642241456986e-05,
"loss": 1.7313,
"step": 57
},
{
"epoch": 0.01166004925365633,
"grad_norm": 0.6969625949859619,
"learning_rate": 8.506183921362443e-05,
"loss": 1.7895,
"step": 58
},
{
"epoch": 0.011861084585615921,
"grad_norm": 0.813930094242096,
"learning_rate": 8.44676704559283e-05,
"loss": 1.7949,
"step": 59
},
{
"epoch": 0.012062119917575513,
"grad_norm": 0.6331385374069214,
"learning_rate": 8.386407858128706e-05,
"loss": 1.6823,
"step": 60
},
{
"epoch": 0.012263155249535106,
"grad_norm": 0.6768167018890381,
"learning_rate": 8.32512286056924e-05,
"loss": 1.5996,
"step": 61
},
{
"epoch": 0.012464190581494698,
"grad_norm": 0.7389916181564331,
"learning_rate": 8.262928807620843e-05,
"loss": 1.724,
"step": 62
},
{
"epoch": 0.01266522591345429,
"grad_norm": 0.7186094522476196,
"learning_rate": 8.199842702516583e-05,
"loss": 1.7208,
"step": 63
},
{
"epoch": 0.012866261245413882,
"grad_norm": 0.6613526344299316,
"learning_rate": 8.135881792367686e-05,
"loss": 1.7751,
"step": 64
},
{
"epoch": 0.013067296577373473,
"grad_norm": 0.7740375399589539,
"learning_rate": 8.07106356344834e-05,
"loss": 1.7576,
"step": 65
},
{
"epoch": 0.013268331909333065,
"grad_norm": 0.6247599124908447,
"learning_rate": 8.005405736415126e-05,
"loss": 1.7399,
"step": 66
},
{
"epoch": 0.013469367241292657,
"grad_norm": 0.7394496202468872,
"learning_rate": 7.938926261462366e-05,
"loss": 1.7569,
"step": 67
},
{
"epoch": 0.01367040257325225,
"grad_norm": 0.7518988847732544,
"learning_rate": 7.871643313414718e-05,
"loss": 1.8087,
"step": 68
},
{
"epoch": 0.013871437905211842,
"grad_norm": 0.768896222114563,
"learning_rate": 7.803575286758364e-05,
"loss": 1.7186,
"step": 69
},
{
"epoch": 0.014072473237171432,
"grad_norm": 0.6294169425964355,
"learning_rate": 7.734740790612136e-05,
"loss": 1.6522,
"step": 70
},
{
"epoch": 0.014273508569131024,
"grad_norm": 0.6437097787857056,
"learning_rate": 7.66515864363997e-05,
"loss": 1.7515,
"step": 71
},
{
"epoch": 0.014474543901090616,
"grad_norm": 0.7687767744064331,
"learning_rate": 7.594847868906076e-05,
"loss": 1.922,
"step": 72
},
{
"epoch": 0.014675579233050209,
"grad_norm": 0.8429214954376221,
"learning_rate": 7.52382768867422e-05,
"loss": 1.8244,
"step": 73
},
{
"epoch": 0.014876614565009801,
"grad_norm": 0.7580492496490479,
"learning_rate": 7.452117519152542e-05,
"loss": 1.7377,
"step": 74
},
{
"epoch": 0.015077649896969393,
"grad_norm": 0.7352554798126221,
"learning_rate": 7.379736965185368e-05,
"loss": 1.6991,
"step": 75
},
{
"epoch": 0.015278685228928984,
"grad_norm": 0.7524585723876953,
"learning_rate": 7.30670581489344e-05,
"loss": 1.6867,
"step": 76
},
{
"epoch": 0.015479720560888576,
"grad_norm": 0.870159387588501,
"learning_rate": 7.233044034264034e-05,
"loss": 1.684,
"step": 77
},
{
"epoch": 0.015680755892848168,
"grad_norm": 0.7836048603057861,
"learning_rate": 7.158771761692464e-05,
"loss": 1.7913,
"step": 78
},
{
"epoch": 0.01588179122480776,
"grad_norm": 0.7694597840309143,
"learning_rate": 7.083909302476453e-05,
"loss": 1.8434,
"step": 79
},
{
"epoch": 0.016082826556767352,
"grad_norm": 0.8581750392913818,
"learning_rate": 7.008477123264848e-05,
"loss": 1.7675,
"step": 80
},
{
"epoch": 0.016283861888726943,
"grad_norm": 0.8228153586387634,
"learning_rate": 6.932495846462261e-05,
"loss": 1.9144,
"step": 81
},
{
"epoch": 0.016484897220686537,
"grad_norm": 0.9851665496826172,
"learning_rate": 6.855986244591104e-05,
"loss": 1.7136,
"step": 82
},
{
"epoch": 0.016685932552646127,
"grad_norm": 1.0223902463912964,
"learning_rate": 6.778969234612584e-05,
"loss": 1.8572,
"step": 83
},
{
"epoch": 0.016886967884605718,
"grad_norm": 0.8929843902587891,
"learning_rate": 6.701465872208216e-05,
"loss": 1.6725,
"step": 84
},
{
"epoch": 0.017088003216565312,
"grad_norm": 0.8399107456207275,
"learning_rate": 6.623497346023418e-05,
"loss": 1.6636,
"step": 85
},
{
"epoch": 0.017289038548524902,
"grad_norm": 0.7982478737831116,
"learning_rate": 6.545084971874738e-05,
"loss": 1.7159,
"step": 86
},
{
"epoch": 0.017490073880484496,
"grad_norm": 0.9400783181190491,
"learning_rate": 6.466250186922325e-05,
"loss": 1.8073,
"step": 87
},
{
"epoch": 0.017691109212444087,
"grad_norm": 0.9305055141448975,
"learning_rate": 6.387014543809223e-05,
"loss": 1.7618,
"step": 88
},
{
"epoch": 0.01789214454440368,
"grad_norm": 0.8941248059272766,
"learning_rate": 6.307399704769099e-05,
"loss": 1.7527,
"step": 89
},
{
"epoch": 0.01809317987636327,
"grad_norm": 1.0266920328140259,
"learning_rate": 6.227427435703997e-05,
"loss": 1.6766,
"step": 90
},
{
"epoch": 0.01829421520832286,
"grad_norm": 1.0415492057800293,
"learning_rate": 6.147119600233758e-05,
"loss": 1.818,
"step": 91
},
{
"epoch": 0.018495250540282455,
"grad_norm": 1.000577449798584,
"learning_rate": 6.066498153718735e-05,
"loss": 1.8711,
"step": 92
},
{
"epoch": 0.018696285872242046,
"grad_norm": 1.1230899095535278,
"learning_rate": 5.985585137257401e-05,
"loss": 1.9138,
"step": 93
},
{
"epoch": 0.01889732120420164,
"grad_norm": 1.0591013431549072,
"learning_rate": 5.90440267166055e-05,
"loss": 1.7242,
"step": 94
},
{
"epoch": 0.01909835653616123,
"grad_norm": 1.121505618095398,
"learning_rate": 5.8229729514036705e-05,
"loss": 1.7318,
"step": 95
},
{
"epoch": 0.01929939186812082,
"grad_norm": 1.2818670272827148,
"learning_rate": 5.74131823855921e-05,
"loss": 1.8267,
"step": 96
},
{
"epoch": 0.019500427200080415,
"grad_norm": 1.2918139696121216,
"learning_rate": 5.6594608567103456e-05,
"loss": 2.0023,
"step": 97
},
{
"epoch": 0.019701462532040005,
"grad_norm": 1.3619199991226196,
"learning_rate": 5.577423184847932e-05,
"loss": 1.6922,
"step": 98
},
{
"epoch": 0.0199024978639996,
"grad_norm": 1.9346954822540283,
"learning_rate": 5.495227651252315e-05,
"loss": 1.7747,
"step": 99
},
{
"epoch": 0.02010353319595919,
"grad_norm": 2.650932550430298,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.7894,
"step": 100
},
{
"epoch": 0.02010353319595919,
"eval_loss": 1.814341425895691,
"eval_runtime": 172.5177,
"eval_samples_per_second": 48.563,
"eval_steps_per_second": 12.144,
"step": 100
},
{
"epoch": 0.02030456852791878,
"grad_norm": 0.9297945499420166,
"learning_rate": 5.330452921628497e-05,
"loss": 1.8183,
"step": 101
},
{
"epoch": 0.020505603859878374,
"grad_norm": 0.939454197883606,
"learning_rate": 5.247918773366112e-05,
"loss": 1.8074,
"step": 102
},
{
"epoch": 0.020706639191837965,
"grad_norm": 1.0974127054214478,
"learning_rate": 5.165316846586541e-05,
"loss": 1.8272,
"step": 103
},
{
"epoch": 0.02090767452379756,
"grad_norm": 0.8772820234298706,
"learning_rate": 5.0826697238317935e-05,
"loss": 1.808,
"step": 104
},
{
"epoch": 0.02110870985575715,
"grad_norm": 0.8370904326438904,
"learning_rate": 5e-05,
"loss": 1.7561,
"step": 105
},
{
"epoch": 0.021309745187716743,
"grad_norm": 0.619301974773407,
"learning_rate": 4.917330276168208e-05,
"loss": 1.5667,
"step": 106
},
{
"epoch": 0.021510780519676333,
"grad_norm": 0.6837291717529297,
"learning_rate": 4.834683153413459e-05,
"loss": 1.6399,
"step": 107
},
{
"epoch": 0.021711815851635924,
"grad_norm": 0.5568907260894775,
"learning_rate": 4.7520812266338885e-05,
"loss": 1.5673,
"step": 108
},
{
"epoch": 0.021912851183595518,
"grad_norm": 0.5481559038162231,
"learning_rate": 4.669547078371504e-05,
"loss": 1.6878,
"step": 109
},
{
"epoch": 0.02211388651555511,
"grad_norm": 0.5510746240615845,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.7258,
"step": 110
},
{
"epoch": 0.022314921847514702,
"grad_norm": 0.6050065159797668,
"learning_rate": 4.504772348747687e-05,
"loss": 1.7121,
"step": 111
},
{
"epoch": 0.022515957179474293,
"grad_norm": 0.6477292776107788,
"learning_rate": 4.4225768151520694e-05,
"loss": 1.624,
"step": 112
},
{
"epoch": 0.022716992511433883,
"grad_norm": 0.6289533972740173,
"learning_rate": 4.3405391432896555e-05,
"loss": 1.754,
"step": 113
},
{
"epoch": 0.022918027843393477,
"grad_norm": 0.654630184173584,
"learning_rate": 4.2586817614407895e-05,
"loss": 1.6769,
"step": 114
},
{
"epoch": 0.023119063175353068,
"grad_norm": 0.6162823438644409,
"learning_rate": 4.17702704859633e-05,
"loss": 1.5744,
"step": 115
},
{
"epoch": 0.02332009850731266,
"grad_norm": 0.6766645908355713,
"learning_rate": 4.095597328339452e-05,
"loss": 1.7471,
"step": 116
},
{
"epoch": 0.023521133839272252,
"grad_norm": 0.6917820572853088,
"learning_rate": 4.0144148627425993e-05,
"loss": 1.5096,
"step": 117
},
{
"epoch": 0.023722169171231842,
"grad_norm": 0.6780633330345154,
"learning_rate": 3.933501846281267e-05,
"loss": 1.7633,
"step": 118
},
{
"epoch": 0.023923204503191436,
"grad_norm": 0.675075888633728,
"learning_rate": 3.852880399766243e-05,
"loss": 1.7795,
"step": 119
},
{
"epoch": 0.024124239835151027,
"grad_norm": 0.642783522605896,
"learning_rate": 3.772572564296005e-05,
"loss": 1.7796,
"step": 120
},
{
"epoch": 0.02432527516711062,
"grad_norm": 0.7685930728912354,
"learning_rate": 3.6926002952309016e-05,
"loss": 1.7594,
"step": 121
},
{
"epoch": 0.02452631049907021,
"grad_norm": 0.641385555267334,
"learning_rate": 3.612985456190778e-05,
"loss": 1.7159,
"step": 122
},
{
"epoch": 0.024727345831029802,
"grad_norm": 0.6726094484329224,
"learning_rate": 3.533749813077677e-05,
"loss": 1.6944,
"step": 123
},
{
"epoch": 0.024928381162989396,
"grad_norm": 0.625813901424408,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.6797,
"step": 124
},
{
"epoch": 0.025129416494948986,
"grad_norm": 0.7406529188156128,
"learning_rate": 3.3765026539765834e-05,
"loss": 1.5463,
"step": 125
},
{
"epoch": 0.02533045182690858,
"grad_norm": 0.7634907364845276,
"learning_rate": 3.298534127791785e-05,
"loss": 1.654,
"step": 126
},
{
"epoch": 0.02553148715886817,
"grad_norm": 0.7529230713844299,
"learning_rate": 3.221030765387417e-05,
"loss": 1.7993,
"step": 127
},
{
"epoch": 0.025732522490827765,
"grad_norm": 0.843103289604187,
"learning_rate": 3.144013755408895e-05,
"loss": 1.7806,
"step": 128
},
{
"epoch": 0.025933557822787355,
"grad_norm": 0.8046737909317017,
"learning_rate": 3.0675041535377405e-05,
"loss": 1.645,
"step": 129
},
{
"epoch": 0.026134593154746946,
"grad_norm": 0.6955248713493347,
"learning_rate": 2.991522876735154e-05,
"loss": 1.7116,
"step": 130
},
{
"epoch": 0.02633562848670654,
"grad_norm": 0.6817566156387329,
"learning_rate": 2.916090697523549e-05,
"loss": 1.8035,
"step": 131
},
{
"epoch": 0.02653666381866613,
"grad_norm": 0.709463357925415,
"learning_rate": 2.8412282383075363e-05,
"loss": 1.5567,
"step": 132
},
{
"epoch": 0.026737699150625724,
"grad_norm": 0.8133819103240967,
"learning_rate": 2.766955965735968e-05,
"loss": 1.6997,
"step": 133
},
{
"epoch": 0.026938734482585314,
"grad_norm": 0.7387455105781555,
"learning_rate": 2.693294185106562e-05,
"loss": 1.6745,
"step": 134
},
{
"epoch": 0.027139769814544905,
"grad_norm": 0.7375905513763428,
"learning_rate": 2.6202630348146324e-05,
"loss": 1.8471,
"step": 135
},
{
"epoch": 0.0273408051465045,
"grad_norm": 0.7787903547286987,
"learning_rate": 2.547882480847461e-05,
"loss": 1.7531,
"step": 136
},
{
"epoch": 0.02754184047846409,
"grad_norm": 0.8154503107070923,
"learning_rate": 2.476172311325783e-05,
"loss": 1.7624,
"step": 137
},
{
"epoch": 0.027742875810423683,
"grad_norm": 0.903148889541626,
"learning_rate": 2.405152131093926e-05,
"loss": 1.7726,
"step": 138
},
{
"epoch": 0.027943911142383274,
"grad_norm": 0.8761181235313416,
"learning_rate": 2.3348413563600325e-05,
"loss": 1.722,
"step": 139
},
{
"epoch": 0.028144946474342864,
"grad_norm": 1.0418727397918701,
"learning_rate": 2.2652592093878666e-05,
"loss": 1.6932,
"step": 140
},
{
"epoch": 0.028345981806302458,
"grad_norm": 0.9624373316764832,
"learning_rate": 2.196424713241637e-05,
"loss": 1.7963,
"step": 141
},
{
"epoch": 0.02854701713826205,
"grad_norm": 0.9782199859619141,
"learning_rate": 2.128356686585282e-05,
"loss": 1.9029,
"step": 142
},
{
"epoch": 0.028748052470221643,
"grad_norm": 1.1240954399108887,
"learning_rate": 2.061073738537635e-05,
"loss": 1.8169,
"step": 143
},
{
"epoch": 0.028949087802181233,
"grad_norm": 1.2213478088378906,
"learning_rate": 1.9945942635848748e-05,
"loss": 1.8265,
"step": 144
},
{
"epoch": 0.029150123134140827,
"grad_norm": 1.1300071477890015,
"learning_rate": 1.928936436551661e-05,
"loss": 1.8799,
"step": 145
},
{
"epoch": 0.029351158466100417,
"grad_norm": 1.1099785566329956,
"learning_rate": 1.8641182076323148e-05,
"loss": 1.8078,
"step": 146
},
{
"epoch": 0.029552193798060008,
"grad_norm": 1.209250807762146,
"learning_rate": 1.800157297483417e-05,
"loss": 1.7618,
"step": 147
},
{
"epoch": 0.029753229130019602,
"grad_norm": 1.5932096242904663,
"learning_rate": 1.7370711923791567e-05,
"loss": 1.9006,
"step": 148
},
{
"epoch": 0.029954264461979192,
"grad_norm": 1.4094241857528687,
"learning_rate": 1.6748771394307585e-05,
"loss": 1.4656,
"step": 149
},
{
"epoch": 0.030155299793938786,
"grad_norm": 1.903329610824585,
"learning_rate": 1.6135921418712956e-05,
"loss": 1.8197,
"step": 150
},
{
"epoch": 0.030155299793938786,
"eval_loss": 1.7307214736938477,
"eval_runtime": 172.1064,
"eval_samples_per_second": 48.679,
"eval_steps_per_second": 12.173,
"step": 150
},
{
"epoch": 0.030356335125898377,
"grad_norm": 0.5426835417747498,
"learning_rate": 1.553232954407171e-05,
"loss": 1.882,
"step": 151
},
{
"epoch": 0.030557370457857967,
"grad_norm": 0.6195335984230042,
"learning_rate": 1.4938160786375572e-05,
"loss": 1.7597,
"step": 152
},
{
"epoch": 0.03075840578981756,
"grad_norm": 0.6173190474510193,
"learning_rate": 1.435357758543015e-05,
"loss": 1.8735,
"step": 153
},
{
"epoch": 0.03095944112177715,
"grad_norm": 0.585121750831604,
"learning_rate": 1.3778739760445552e-05,
"loss": 1.847,
"step": 154
},
{
"epoch": 0.031160476453736746,
"grad_norm": 0.671636164188385,
"learning_rate": 1.3213804466343421e-05,
"loss": 1.7611,
"step": 155
},
{
"epoch": 0.031361511785696336,
"grad_norm": 0.5902316570281982,
"learning_rate": 1.2658926150792322e-05,
"loss": 1.7677,
"step": 156
},
{
"epoch": 0.03156254711765593,
"grad_norm": 0.7377857565879822,
"learning_rate": 1.2114256511983274e-05,
"loss": 1.791,
"step": 157
},
{
"epoch": 0.03176358244961552,
"grad_norm": 0.5817550420761108,
"learning_rate": 1.157994445715706e-05,
"loss": 1.7041,
"step": 158
},
{
"epoch": 0.03196461778157511,
"grad_norm": 0.5559086799621582,
"learning_rate": 1.1056136061894384e-05,
"loss": 1.6919,
"step": 159
},
{
"epoch": 0.032165653113534705,
"grad_norm": 0.6213694214820862,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.7895,
"step": 160
},
{
"epoch": 0.0323666884454943,
"grad_norm": 0.6221588850021362,
"learning_rate": 1.0040600155253765e-05,
"loss": 1.656,
"step": 161
},
{
"epoch": 0.032567723777453886,
"grad_norm": 0.6950201988220215,
"learning_rate": 9.549150281252633e-06,
"loss": 1.7278,
"step": 162
},
{
"epoch": 0.03276875910941348,
"grad_norm": 0.6161099672317505,
"learning_rate": 9.068759265665384e-06,
"loss": 1.7036,
"step": 163
},
{
"epoch": 0.032969794441373074,
"grad_norm": 0.6655071377754211,
"learning_rate": 8.599558442598998e-06,
"loss": 1.6563,
"step": 164
},
{
"epoch": 0.03317082977333266,
"grad_norm": 0.604499340057373,
"learning_rate": 8.141676086873572e-06,
"loss": 1.7613,
"step": 165
},
{
"epoch": 0.033371865105292255,
"grad_norm": 0.6050909757614136,
"learning_rate": 7.695237378953223e-06,
"loss": 1.7498,
"step": 166
},
{
"epoch": 0.03357290043725185,
"grad_norm": 0.6911782622337341,
"learning_rate": 7.260364370723044e-06,
"loss": 1.6402,
"step": 167
},
{
"epoch": 0.033773935769211436,
"grad_norm": 0.594603955745697,
"learning_rate": 6.837175952121306e-06,
"loss": 1.7153,
"step": 168
},
{
"epoch": 0.03397497110117103,
"grad_norm": 0.666424036026001,
"learning_rate": 6.425787818636131e-06,
"loss": 1.5678,
"step": 169
},
{
"epoch": 0.034176006433130623,
"grad_norm": 0.6311551928520203,
"learning_rate": 6.026312439675552e-06,
"loss": 1.6372,
"step": 170
},
{
"epoch": 0.03437704176509022,
"grad_norm": 0.7468748688697815,
"learning_rate": 5.6388590278194096e-06,
"loss": 1.6793,
"step": 171
},
{
"epoch": 0.034578077097049804,
"grad_norm": 0.6775615215301514,
"learning_rate": 5.263533508961827e-06,
"loss": 1.6193,
"step": 172
},
{
"epoch": 0.0347791124290094,
"grad_norm": 0.7296246290206909,
"learning_rate": 4.900438493352055e-06,
"loss": 1.7235,
"step": 173
},
{
"epoch": 0.03498014776096899,
"grad_norm": 0.679608166217804,
"learning_rate": 4.549673247541875e-06,
"loss": 1.6635,
"step": 174
},
{
"epoch": 0.03518118309292858,
"grad_norm": 0.7250195741653442,
"learning_rate": 4.2113336672471245e-06,
"loss": 1.7259,
"step": 175
},
{
"epoch": 0.03538221842488817,
"grad_norm": 0.7076656818389893,
"learning_rate": 3.885512251130763e-06,
"loss": 1.6594,
"step": 176
},
{
"epoch": 0.03558325375684777,
"grad_norm": 0.7382538914680481,
"learning_rate": 3.5722980755146517e-06,
"loss": 1.7468,
"step": 177
},
{
"epoch": 0.03578428908880736,
"grad_norm": 0.7084540724754333,
"learning_rate": 3.271776770026963e-06,
"loss": 1.654,
"step": 178
},
{
"epoch": 0.03598532442076695,
"grad_norm": 0.6778978109359741,
"learning_rate": 2.9840304941919415e-06,
"loss": 1.7026,
"step": 179
},
{
"epoch": 0.03618635975272654,
"grad_norm": 0.7010304927825928,
"learning_rate": 2.7091379149682685e-06,
"loss": 1.7371,
"step": 180
},
{
"epoch": 0.036387395084686136,
"grad_norm": 0.715516984462738,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.6457,
"step": 181
},
{
"epoch": 0.03658843041664572,
"grad_norm": 0.7306588292121887,
"learning_rate": 2.1982109232821178e-06,
"loss": 1.6073,
"step": 182
},
{
"epoch": 0.03678946574860532,
"grad_norm": 0.8096541166305542,
"learning_rate": 1.962316193157593e-06,
"loss": 1.6594,
"step": 183
},
{
"epoch": 0.03699050108056491,
"grad_norm": 0.7755445241928101,
"learning_rate": 1.7395544861325718e-06,
"loss": 1.6198,
"step": 184
},
{
"epoch": 0.0371915364125245,
"grad_norm": 0.8347325325012207,
"learning_rate": 1.5299867030334814e-06,
"loss": 1.7142,
"step": 185
},
{
"epoch": 0.03739257174448409,
"grad_norm": 0.8304764032363892,
"learning_rate": 1.333670137599713e-06,
"loss": 1.7268,
"step": 186
},
{
"epoch": 0.037593607076443686,
"grad_norm": 0.8034808039665222,
"learning_rate": 1.1506584608200367e-06,
"loss": 1.7494,
"step": 187
},
{
"epoch": 0.03779464240840328,
"grad_norm": 0.8983921408653259,
"learning_rate": 9.810017062595322e-07,
"loss": 1.9552,
"step": 188
},
{
"epoch": 0.03799567774036287,
"grad_norm": 0.8856582045555115,
"learning_rate": 8.247462563808817e-07,
"loss": 1.7872,
"step": 189
},
{
"epoch": 0.03819671307232246,
"grad_norm": 0.9080292582511902,
"learning_rate": 6.819348298638839e-07,
"loss": 1.7907,
"step": 190
},
{
"epoch": 0.038397748404282055,
"grad_norm": 0.8408676385879517,
"learning_rate": 5.526064699265753e-07,
"loss": 1.8401,
"step": 191
},
{
"epoch": 0.03859878373624164,
"grad_norm": 0.9034141302108765,
"learning_rate": 4.367965336512403e-07,
"loss": 1.7922,
"step": 192
},
{
"epoch": 0.038799819068201236,
"grad_norm": 1.0305664539337158,
"learning_rate": 3.3453668231809286e-07,
"loss": 1.8243,
"step": 193
},
{
"epoch": 0.03900085440016083,
"grad_norm": 1.0020116567611694,
"learning_rate": 2.458548727494292e-07,
"loss": 1.6539,
"step": 194
},
{
"epoch": 0.039201889732120423,
"grad_norm": 1.2176718711853027,
"learning_rate": 1.7077534966650766e-07,
"loss": 1.806,
"step": 195
},
{
"epoch": 0.03940292506408001,
"grad_norm": 1.1750303506851196,
"learning_rate": 1.0931863906127327e-07,
"loss": 1.8205,
"step": 196
},
{
"epoch": 0.039603960396039604,
"grad_norm": 1.0933748483657837,
"learning_rate": 6.150154258476315e-08,
"loss": 1.6813,
"step": 197
},
{
"epoch": 0.0398049957279992,
"grad_norm": 1.3606339693069458,
"learning_rate": 2.7337132953697554e-08,
"loss": 1.7703,
"step": 198
},
{
"epoch": 0.040006031059958785,
"grad_norm": 1.5506101846694946,
"learning_rate": 6.834750376549792e-09,
"loss": 1.7552,
"step": 199
},
{
"epoch": 0.04020706639191838,
"grad_norm": 2.0531558990478516,
"learning_rate": 0.0,
"loss": 1.6624,
"step": 200
},
{
"epoch": 0.04020706639191838,
"eval_loss": 1.7265242338180542,
"eval_runtime": 172.5701,
"eval_samples_per_second": 48.548,
"eval_steps_per_second": 12.14,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.324957270887629e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}