lesso's picture
Training in progress, step 200, checkpoint
6070183 verified
raw
history blame
36.9 kB
{
"best_metric": 1.0406328439712524,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.26972353337828725,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0013486176668914363,
"grad_norm": 0.21627512574195862,
"learning_rate": 1.0100000000000002e-05,
"loss": 1.4367,
"step": 1
},
{
"epoch": 0.0013486176668914363,
"eval_loss": 1.471815586090088,
"eval_runtime": 53.4341,
"eval_samples_per_second": 186.959,
"eval_steps_per_second": 5.858,
"step": 1
},
{
"epoch": 0.0026972353337828725,
"grad_norm": 0.2622290849685669,
"learning_rate": 2.0200000000000003e-05,
"loss": 1.3156,
"step": 2
},
{
"epoch": 0.004045853000674309,
"grad_norm": 0.35485848784446716,
"learning_rate": 3.0299999999999998e-05,
"loss": 1.1943,
"step": 3
},
{
"epoch": 0.005394470667565745,
"grad_norm": 0.4785490930080414,
"learning_rate": 4.0400000000000006e-05,
"loss": 1.2155,
"step": 4
},
{
"epoch": 0.006743088334457181,
"grad_norm": 0.6672747135162354,
"learning_rate": 5.05e-05,
"loss": 1.4539,
"step": 5
},
{
"epoch": 0.008091706001348618,
"grad_norm": 0.9162051677703857,
"learning_rate": 6.0599999999999996e-05,
"loss": 1.7231,
"step": 6
},
{
"epoch": 0.009440323668240054,
"grad_norm": 0.18377305567264557,
"learning_rate": 7.07e-05,
"loss": 1.3547,
"step": 7
},
{
"epoch": 0.01078894133513149,
"grad_norm": 0.2544221878051758,
"learning_rate": 8.080000000000001e-05,
"loss": 1.3042,
"step": 8
},
{
"epoch": 0.012137559002022926,
"grad_norm": 0.19323669373989105,
"learning_rate": 9.09e-05,
"loss": 1.1225,
"step": 9
},
{
"epoch": 0.013486176668914362,
"grad_norm": 0.1609629988670349,
"learning_rate": 0.000101,
"loss": 1.0866,
"step": 10
},
{
"epoch": 0.014834794335805798,
"grad_norm": 0.15839025378227234,
"learning_rate": 0.00010046842105263158,
"loss": 1.14,
"step": 11
},
{
"epoch": 0.016183412002697236,
"grad_norm": 0.2729446291923523,
"learning_rate": 9.993684210526315e-05,
"loss": 1.2721,
"step": 12
},
{
"epoch": 0.01753202966958867,
"grad_norm": 0.1378893405199051,
"learning_rate": 9.940526315789473e-05,
"loss": 1.4004,
"step": 13
},
{
"epoch": 0.018880647336480108,
"grad_norm": 0.10929320007562637,
"learning_rate": 9.887368421052632e-05,
"loss": 1.2109,
"step": 14
},
{
"epoch": 0.020229265003371546,
"grad_norm": 0.10563603043556213,
"learning_rate": 9.83421052631579e-05,
"loss": 1.104,
"step": 15
},
{
"epoch": 0.02157788267026298,
"grad_norm": 0.12585799396038055,
"learning_rate": 9.781052631578948e-05,
"loss": 1.0363,
"step": 16
},
{
"epoch": 0.022926500337154418,
"grad_norm": 0.18216858804225922,
"learning_rate": 9.727894736842106e-05,
"loss": 1.0688,
"step": 17
},
{
"epoch": 0.024275118004045852,
"grad_norm": 0.23790468275547028,
"learning_rate": 9.674736842105263e-05,
"loss": 1.2079,
"step": 18
},
{
"epoch": 0.02562373567093729,
"grad_norm": 0.2768643796443939,
"learning_rate": 9.621578947368421e-05,
"loss": 1.3613,
"step": 19
},
{
"epoch": 0.026972353337828724,
"grad_norm": 0.0732409730553627,
"learning_rate": 9.568421052631578e-05,
"loss": 1.2757,
"step": 20
},
{
"epoch": 0.028320971004720162,
"grad_norm": 0.09180562943220139,
"learning_rate": 9.515263157894737e-05,
"loss": 1.1038,
"step": 21
},
{
"epoch": 0.029669588671611596,
"grad_norm": 0.10616059601306915,
"learning_rate": 9.462105263157895e-05,
"loss": 0.9891,
"step": 22
},
{
"epoch": 0.031018206338503034,
"grad_norm": 0.10895205289125443,
"learning_rate": 9.408947368421054e-05,
"loss": 1.014,
"step": 23
},
{
"epoch": 0.03236682400539447,
"grad_norm": 0.1348455399274826,
"learning_rate": 9.355789473684211e-05,
"loss": 1.1653,
"step": 24
},
{
"epoch": 0.033715441672285906,
"grad_norm": 0.21156223118305206,
"learning_rate": 9.302631578947369e-05,
"loss": 1.2391,
"step": 25
},
{
"epoch": 0.03506405933917734,
"grad_norm": 0.06843415647745132,
"learning_rate": 9.249473684210526e-05,
"loss": 1.2372,
"step": 26
},
{
"epoch": 0.03641267700606878,
"grad_norm": 0.08744656294584274,
"learning_rate": 9.196315789473685e-05,
"loss": 1.0868,
"step": 27
},
{
"epoch": 0.037761294672960216,
"grad_norm": 0.09496009349822998,
"learning_rate": 9.143157894736843e-05,
"loss": 0.9572,
"step": 28
},
{
"epoch": 0.03910991233985165,
"grad_norm": 0.10309787094593048,
"learning_rate": 9.09e-05,
"loss": 0.9295,
"step": 29
},
{
"epoch": 0.04045853000674309,
"grad_norm": 0.11773687601089478,
"learning_rate": 9.036842105263158e-05,
"loss": 1.0578,
"step": 30
},
{
"epoch": 0.041807147673634526,
"grad_norm": 0.1371300369501114,
"learning_rate": 8.983684210526316e-05,
"loss": 1.1868,
"step": 31
},
{
"epoch": 0.04315576534052596,
"grad_norm": 0.06277325749397278,
"learning_rate": 8.930526315789474e-05,
"loss": 1.2414,
"step": 32
},
{
"epoch": 0.044504383007417395,
"grad_norm": 0.07237385958433151,
"learning_rate": 8.877368421052632e-05,
"loss": 1.1902,
"step": 33
},
{
"epoch": 0.045853000674308836,
"grad_norm": 0.06515516340732574,
"learning_rate": 8.82421052631579e-05,
"loss": 1.0562,
"step": 34
},
{
"epoch": 0.04720161834120027,
"grad_norm": 0.06620181351900101,
"learning_rate": 8.771052631578948e-05,
"loss": 1.0002,
"step": 35
},
{
"epoch": 0.048550236008091704,
"grad_norm": 0.08385398983955383,
"learning_rate": 8.717894736842105e-05,
"loss": 0.9889,
"step": 36
},
{
"epoch": 0.049898853674983146,
"grad_norm": 0.11940331012010574,
"learning_rate": 8.664736842105263e-05,
"loss": 1.141,
"step": 37
},
{
"epoch": 0.05124747134187458,
"grad_norm": 0.07352506369352341,
"learning_rate": 8.61157894736842e-05,
"loss": 1.1912,
"step": 38
},
{
"epoch": 0.052596089008766014,
"grad_norm": 0.04435489699244499,
"learning_rate": 8.55842105263158e-05,
"loss": 1.173,
"step": 39
},
{
"epoch": 0.05394470667565745,
"grad_norm": 0.05147892236709595,
"learning_rate": 8.505263157894737e-05,
"loss": 1.0617,
"step": 40
},
{
"epoch": 0.05529332434254889,
"grad_norm": 0.05309430509805679,
"learning_rate": 8.452105263157896e-05,
"loss": 1.0096,
"step": 41
},
{
"epoch": 0.056641942009440324,
"grad_norm": 0.07032801955938339,
"learning_rate": 8.398947368421053e-05,
"loss": 1.0089,
"step": 42
},
{
"epoch": 0.05799055967633176,
"grad_norm": 0.11153686046600342,
"learning_rate": 8.345789473684211e-05,
"loss": 1.0679,
"step": 43
},
{
"epoch": 0.05933917734322319,
"grad_norm": 0.13126641511917114,
"learning_rate": 8.292631578947368e-05,
"loss": 1.1456,
"step": 44
},
{
"epoch": 0.060687795010114634,
"grad_norm": 0.04901754483580589,
"learning_rate": 8.239473684210526e-05,
"loss": 1.2107,
"step": 45
},
{
"epoch": 0.06203641267700607,
"grad_norm": 0.05341381952166557,
"learning_rate": 8.186315789473683e-05,
"loss": 1.063,
"step": 46
},
{
"epoch": 0.0633850303438975,
"grad_norm": 0.04969592019915581,
"learning_rate": 8.133157894736842e-05,
"loss": 0.9465,
"step": 47
},
{
"epoch": 0.06473364801078894,
"grad_norm": 0.06398074328899384,
"learning_rate": 8.080000000000001e-05,
"loss": 0.9775,
"step": 48
},
{
"epoch": 0.06608226567768037,
"grad_norm": 0.10114479809999466,
"learning_rate": 8.026842105263159e-05,
"loss": 1.102,
"step": 49
},
{
"epoch": 0.06743088334457181,
"grad_norm": 0.17890770733356476,
"learning_rate": 7.973684210526316e-05,
"loss": 1.1313,
"step": 50
},
{
"epoch": 0.06743088334457181,
"eval_loss": 1.0796537399291992,
"eval_runtime": 54.0867,
"eval_samples_per_second": 184.703,
"eval_steps_per_second": 5.787,
"step": 50
},
{
"epoch": 0.06877950101146325,
"grad_norm": 0.048019178211688995,
"learning_rate": 7.920526315789474e-05,
"loss": 1.191,
"step": 51
},
{
"epoch": 0.07012811867835468,
"grad_norm": 0.060450755059719086,
"learning_rate": 7.867368421052631e-05,
"loss": 1.1022,
"step": 52
},
{
"epoch": 0.07147673634524612,
"grad_norm": 0.05912817269563675,
"learning_rate": 7.814210526315789e-05,
"loss": 1.0136,
"step": 53
},
{
"epoch": 0.07282535401213756,
"grad_norm": 0.06376615911722183,
"learning_rate": 7.761052631578946e-05,
"loss": 1.0121,
"step": 54
},
{
"epoch": 0.07417397167902899,
"grad_norm": 0.08349180966615677,
"learning_rate": 7.707894736842105e-05,
"loss": 1.027,
"step": 55
},
{
"epoch": 0.07552258934592043,
"grad_norm": 0.11868201196193695,
"learning_rate": 7.654736842105264e-05,
"loss": 1.1483,
"step": 56
},
{
"epoch": 0.07687120701281187,
"grad_norm": 0.05987722799181938,
"learning_rate": 7.601578947368422e-05,
"loss": 1.224,
"step": 57
},
{
"epoch": 0.0782198246797033,
"grad_norm": 0.05628606677055359,
"learning_rate": 7.548421052631579e-05,
"loss": 1.1976,
"step": 58
},
{
"epoch": 0.07956844234659474,
"grad_norm": 0.05528495833277702,
"learning_rate": 7.495263157894737e-05,
"loss": 1.0255,
"step": 59
},
{
"epoch": 0.08091706001348618,
"grad_norm": 0.05953006073832512,
"learning_rate": 7.442105263157894e-05,
"loss": 0.9483,
"step": 60
},
{
"epoch": 0.08226567768037761,
"grad_norm": 0.08074503391981125,
"learning_rate": 7.388947368421053e-05,
"loss": 1.0486,
"step": 61
},
{
"epoch": 0.08361429534726905,
"grad_norm": 0.12140597403049469,
"learning_rate": 7.335789473684211e-05,
"loss": 1.073,
"step": 62
},
{
"epoch": 0.08496291301416048,
"grad_norm": 0.0714363381266594,
"learning_rate": 7.282631578947368e-05,
"loss": 1.1274,
"step": 63
},
{
"epoch": 0.08631153068105192,
"grad_norm": 0.045137494802474976,
"learning_rate": 7.229473684210527e-05,
"loss": 1.1541,
"step": 64
},
{
"epoch": 0.08766014834794336,
"grad_norm": 0.04826880246400833,
"learning_rate": 7.176315789473685e-05,
"loss": 1.0013,
"step": 65
},
{
"epoch": 0.08900876601483479,
"grad_norm": 0.04936565086245537,
"learning_rate": 7.123157894736842e-05,
"loss": 0.9097,
"step": 66
},
{
"epoch": 0.09035738368172623,
"grad_norm": 0.06744600087404251,
"learning_rate": 7.07e-05,
"loss": 1.0108,
"step": 67
},
{
"epoch": 0.09170600134861767,
"grad_norm": 0.09931527823209763,
"learning_rate": 7.016842105263159e-05,
"loss": 1.1002,
"step": 68
},
{
"epoch": 0.0930546190155091,
"grad_norm": 0.1660183072090149,
"learning_rate": 6.963684210526316e-05,
"loss": 1.2398,
"step": 69
},
{
"epoch": 0.09440323668240054,
"grad_norm": 0.043010879307985306,
"learning_rate": 6.910526315789474e-05,
"loss": 1.2157,
"step": 70
},
{
"epoch": 0.09575185434929198,
"grad_norm": 0.05472362041473389,
"learning_rate": 6.857368421052631e-05,
"loss": 1.0815,
"step": 71
},
{
"epoch": 0.09710047201618341,
"grad_norm": 0.05692880228161812,
"learning_rate": 6.80421052631579e-05,
"loss": 0.9715,
"step": 72
},
{
"epoch": 0.09844908968307485,
"grad_norm": 0.06206513196229935,
"learning_rate": 6.751052631578948e-05,
"loss": 0.9359,
"step": 73
},
{
"epoch": 0.09979770734996629,
"grad_norm": 0.08632377535104752,
"learning_rate": 6.697894736842105e-05,
"loss": 1.0308,
"step": 74
},
{
"epoch": 0.10114632501685772,
"grad_norm": 0.15457108616828918,
"learning_rate": 6.644736842105264e-05,
"loss": 1.0636,
"step": 75
},
{
"epoch": 0.10249494268374916,
"grad_norm": 0.04528992623090744,
"learning_rate": 6.591578947368422e-05,
"loss": 1.2468,
"step": 76
},
{
"epoch": 0.10384356035064059,
"grad_norm": 0.056389302015304565,
"learning_rate": 6.538421052631579e-05,
"loss": 1.1375,
"step": 77
},
{
"epoch": 0.10519217801753203,
"grad_norm": 0.05551513656973839,
"learning_rate": 6.485263157894737e-05,
"loss": 0.9668,
"step": 78
},
{
"epoch": 0.10654079568442347,
"grad_norm": 0.06396491080522537,
"learning_rate": 6.432105263157894e-05,
"loss": 1.0258,
"step": 79
},
{
"epoch": 0.1078894133513149,
"grad_norm": 0.082362100481987,
"learning_rate": 6.378947368421053e-05,
"loss": 1.018,
"step": 80
},
{
"epoch": 0.10923803101820634,
"grad_norm": 0.12386704236268997,
"learning_rate": 6.32578947368421e-05,
"loss": 1.0616,
"step": 81
},
{
"epoch": 0.11058664868509778,
"grad_norm": 0.05446777492761612,
"learning_rate": 6.27263157894737e-05,
"loss": 1.2725,
"step": 82
},
{
"epoch": 0.11193526635198921,
"grad_norm": 0.05156746134161949,
"learning_rate": 6.219473684210527e-05,
"loss": 1.1709,
"step": 83
},
{
"epoch": 0.11328388401888065,
"grad_norm": 0.052307192236185074,
"learning_rate": 6.166315789473685e-05,
"loss": 1.0124,
"step": 84
},
{
"epoch": 0.11463250168577209,
"grad_norm": 0.05781957507133484,
"learning_rate": 6.113157894736842e-05,
"loss": 0.9626,
"step": 85
},
{
"epoch": 0.11598111935266352,
"grad_norm": 0.07445007562637329,
"learning_rate": 6.0599999999999996e-05,
"loss": 0.9916,
"step": 86
},
{
"epoch": 0.11732973701955496,
"grad_norm": 0.10682767629623413,
"learning_rate": 6.006842105263158e-05,
"loss": 1.0941,
"step": 87
},
{
"epoch": 0.11867835468644639,
"grad_norm": 0.06501512974500656,
"learning_rate": 5.953684210526315e-05,
"loss": 1.2743,
"step": 88
},
{
"epoch": 0.12002697235333783,
"grad_norm": 0.04342595487833023,
"learning_rate": 5.900526315789474e-05,
"loss": 1.2165,
"step": 89
},
{
"epoch": 0.12137559002022927,
"grad_norm": 0.050615645945072174,
"learning_rate": 5.847368421052632e-05,
"loss": 1.075,
"step": 90
},
{
"epoch": 0.1227242076871207,
"grad_norm": 0.04985492676496506,
"learning_rate": 5.79421052631579e-05,
"loss": 1.0211,
"step": 91
},
{
"epoch": 0.12407282535401214,
"grad_norm": 0.06781808286905289,
"learning_rate": 5.7410526315789475e-05,
"loss": 1.0205,
"step": 92
},
{
"epoch": 0.12542144302090358,
"grad_norm": 0.10249257832765579,
"learning_rate": 5.687894736842105e-05,
"loss": 1.0659,
"step": 93
},
{
"epoch": 0.126770060687795,
"grad_norm": 0.11543702334165573,
"learning_rate": 5.6347368421052625e-05,
"loss": 1.2193,
"step": 94
},
{
"epoch": 0.12811867835468643,
"grad_norm": 0.04040480777621269,
"learning_rate": 5.5815789473684214e-05,
"loss": 1.1971,
"step": 95
},
{
"epoch": 0.1294672960215779,
"grad_norm": 0.05076085031032562,
"learning_rate": 5.5284210526315796e-05,
"loss": 1.0762,
"step": 96
},
{
"epoch": 0.13081591368846932,
"grad_norm": 0.05482475459575653,
"learning_rate": 5.475263157894737e-05,
"loss": 0.9416,
"step": 97
},
{
"epoch": 0.13216453135536074,
"grad_norm": 0.06342153996229172,
"learning_rate": 5.422105263157895e-05,
"loss": 0.9427,
"step": 98
},
{
"epoch": 0.1335131490222522,
"grad_norm": 0.08709488809108734,
"learning_rate": 5.368947368421053e-05,
"loss": 1.0427,
"step": 99
},
{
"epoch": 0.13486176668914363,
"grad_norm": 0.16469943523406982,
"learning_rate": 5.3157894736842104e-05,
"loss": 1.0271,
"step": 100
},
{
"epoch": 0.13486176668914363,
"eval_loss": 1.050193190574646,
"eval_runtime": 53.6302,
"eval_samples_per_second": 186.276,
"eval_steps_per_second": 5.836,
"step": 100
},
{
"epoch": 0.13621038435603505,
"grad_norm": 0.04247555881738663,
"learning_rate": 5.262631578947368e-05,
"loss": 1.2646,
"step": 101
},
{
"epoch": 0.1375590020229265,
"grad_norm": 0.05166122689843178,
"learning_rate": 5.209473684210527e-05,
"loss": 1.0923,
"step": 102
},
{
"epoch": 0.13890761968981793,
"grad_norm": 0.05303310602903366,
"learning_rate": 5.1563157894736844e-05,
"loss": 0.976,
"step": 103
},
{
"epoch": 0.14025623735670936,
"grad_norm": 0.06052260845899582,
"learning_rate": 5.1031578947368426e-05,
"loss": 0.9538,
"step": 104
},
{
"epoch": 0.14160485502360082,
"grad_norm": 0.08307327330112457,
"learning_rate": 5.05e-05,
"loss": 1.0063,
"step": 105
},
{
"epoch": 0.14295347269049224,
"grad_norm": 0.12679146230220795,
"learning_rate": 4.9968421052631576e-05,
"loss": 1.0593,
"step": 106
},
{
"epoch": 0.14430209035738367,
"grad_norm": 0.04843525588512421,
"learning_rate": 4.943684210526316e-05,
"loss": 1.1775,
"step": 107
},
{
"epoch": 0.14565070802427513,
"grad_norm": 0.05228450894355774,
"learning_rate": 4.890526315789474e-05,
"loss": 1.1158,
"step": 108
},
{
"epoch": 0.14699932569116655,
"grad_norm": 0.04866788908839226,
"learning_rate": 4.8373684210526316e-05,
"loss": 0.9987,
"step": 109
},
{
"epoch": 0.14834794335805798,
"grad_norm": 0.058781273663043976,
"learning_rate": 4.784210526315789e-05,
"loss": 0.9326,
"step": 110
},
{
"epoch": 0.14969656102494944,
"grad_norm": 0.07556451857089996,
"learning_rate": 4.731052631578947e-05,
"loss": 0.9832,
"step": 111
},
{
"epoch": 0.15104517869184086,
"grad_norm": 0.10884501785039902,
"learning_rate": 4.6778947368421055e-05,
"loss": 1.0545,
"step": 112
},
{
"epoch": 0.1523937963587323,
"grad_norm": 0.06518316268920898,
"learning_rate": 4.624736842105263e-05,
"loss": 1.2283,
"step": 113
},
{
"epoch": 0.15374241402562375,
"grad_norm": 0.04359838366508484,
"learning_rate": 4.571578947368421e-05,
"loss": 1.212,
"step": 114
},
{
"epoch": 0.15509103169251517,
"grad_norm": 0.0501161552965641,
"learning_rate": 4.518421052631579e-05,
"loss": 1.0282,
"step": 115
},
{
"epoch": 0.1564396493594066,
"grad_norm": 0.0502970926463604,
"learning_rate": 4.465263157894737e-05,
"loss": 0.9603,
"step": 116
},
{
"epoch": 0.15778826702629806,
"grad_norm": 0.07286638021469116,
"learning_rate": 4.412105263157895e-05,
"loss": 0.9425,
"step": 117
},
{
"epoch": 0.15913688469318948,
"grad_norm": 0.10421205312013626,
"learning_rate": 4.358947368421053e-05,
"loss": 1.133,
"step": 118
},
{
"epoch": 0.1604855023600809,
"grad_norm": 0.14437124133110046,
"learning_rate": 4.30578947368421e-05,
"loss": 1.2071,
"step": 119
},
{
"epoch": 0.16183412002697237,
"grad_norm": 0.042188599705696106,
"learning_rate": 4.2526315789473685e-05,
"loss": 1.1616,
"step": 120
},
{
"epoch": 0.1631827376938638,
"grad_norm": 0.05063289403915405,
"learning_rate": 4.199473684210527e-05,
"loss": 1.0771,
"step": 121
},
{
"epoch": 0.16453135536075522,
"grad_norm": 0.05022171884775162,
"learning_rate": 4.146315789473684e-05,
"loss": 0.9249,
"step": 122
},
{
"epoch": 0.16587997302764665,
"grad_norm": 0.06187805160880089,
"learning_rate": 4.093157894736842e-05,
"loss": 0.8931,
"step": 123
},
{
"epoch": 0.1672285906945381,
"grad_norm": 0.08338179439306259,
"learning_rate": 4.0400000000000006e-05,
"loss": 1.0437,
"step": 124
},
{
"epoch": 0.16857720836142953,
"grad_norm": 0.14240223169326782,
"learning_rate": 3.986842105263158e-05,
"loss": 1.0804,
"step": 125
},
{
"epoch": 0.16992582602832096,
"grad_norm": 0.04588592052459717,
"learning_rate": 3.933684210526316e-05,
"loss": 1.2134,
"step": 126
},
{
"epoch": 0.1712744436952124,
"grad_norm": 0.05231839790940285,
"learning_rate": 3.880526315789473e-05,
"loss": 1.1285,
"step": 127
},
{
"epoch": 0.17262306136210384,
"grad_norm": 0.05098731443285942,
"learning_rate": 3.827368421052632e-05,
"loss": 0.9549,
"step": 128
},
{
"epoch": 0.17397167902899527,
"grad_norm": 0.06304141134023666,
"learning_rate": 3.7742105263157896e-05,
"loss": 0.9422,
"step": 129
},
{
"epoch": 0.17532029669588672,
"grad_norm": 0.08062802255153656,
"learning_rate": 3.721052631578947e-05,
"loss": 0.979,
"step": 130
},
{
"epoch": 0.17666891436277815,
"grad_norm": 0.1293242871761322,
"learning_rate": 3.6678947368421054e-05,
"loss": 1.0799,
"step": 131
},
{
"epoch": 0.17801753202966958,
"grad_norm": 0.05265602096915245,
"learning_rate": 3.6147368421052636e-05,
"loss": 1.163,
"step": 132
},
{
"epoch": 0.17936614969656103,
"grad_norm": 0.05232784524559975,
"learning_rate": 3.561578947368421e-05,
"loss": 1.2166,
"step": 133
},
{
"epoch": 0.18071476736345246,
"grad_norm": 0.05148407816886902,
"learning_rate": 3.508421052631579e-05,
"loss": 0.9896,
"step": 134
},
{
"epoch": 0.1820633850303439,
"grad_norm": 0.05892392247915268,
"learning_rate": 3.455263157894737e-05,
"loss": 0.9502,
"step": 135
},
{
"epoch": 0.18341200269723534,
"grad_norm": 0.07530000805854797,
"learning_rate": 3.402105263157895e-05,
"loss": 0.9775,
"step": 136
},
{
"epoch": 0.18476062036412677,
"grad_norm": 0.11473643779754639,
"learning_rate": 3.3489473684210526e-05,
"loss": 1.0452,
"step": 137
},
{
"epoch": 0.1861092380310182,
"grad_norm": 0.0745122954249382,
"learning_rate": 3.295789473684211e-05,
"loss": 1.2439,
"step": 138
},
{
"epoch": 0.18745785569790965,
"grad_norm": 0.04255448654294014,
"learning_rate": 3.242631578947368e-05,
"loss": 1.256,
"step": 139
},
{
"epoch": 0.18880647336480108,
"grad_norm": 0.052311863750219345,
"learning_rate": 3.1894736842105265e-05,
"loss": 1.0707,
"step": 140
},
{
"epoch": 0.1901550910316925,
"grad_norm": 0.05190080404281616,
"learning_rate": 3.136315789473685e-05,
"loss": 0.9218,
"step": 141
},
{
"epoch": 0.19150370869858396,
"grad_norm": 0.06837556511163712,
"learning_rate": 3.083157894736842e-05,
"loss": 1.0064,
"step": 142
},
{
"epoch": 0.1928523263654754,
"grad_norm": 0.10460904985666275,
"learning_rate": 3.0299999999999998e-05,
"loss": 1.0641,
"step": 143
},
{
"epoch": 0.19420094403236682,
"grad_norm": 0.12943609058856964,
"learning_rate": 2.9768421052631577e-05,
"loss": 1.0647,
"step": 144
},
{
"epoch": 0.19554956169925827,
"grad_norm": 0.039514608681201935,
"learning_rate": 2.923684210526316e-05,
"loss": 1.1794,
"step": 145
},
{
"epoch": 0.1968981793661497,
"grad_norm": 0.05353543907403946,
"learning_rate": 2.8705263157894737e-05,
"loss": 1.0881,
"step": 146
},
{
"epoch": 0.19824679703304113,
"grad_norm": 0.05306176468729973,
"learning_rate": 2.8173684210526313e-05,
"loss": 0.9148,
"step": 147
},
{
"epoch": 0.19959541469993258,
"grad_norm": 0.06411965191364288,
"learning_rate": 2.7642105263157898e-05,
"loss": 0.9264,
"step": 148
},
{
"epoch": 0.200944032366824,
"grad_norm": 0.09908481687307358,
"learning_rate": 2.7110526315789473e-05,
"loss": 0.993,
"step": 149
},
{
"epoch": 0.20229265003371544,
"grad_norm": 0.15593570470809937,
"learning_rate": 2.6578947368421052e-05,
"loss": 1.0838,
"step": 150
},
{
"epoch": 0.20229265003371544,
"eval_loss": 1.0475801229476929,
"eval_runtime": 53.1979,
"eval_samples_per_second": 187.789,
"eval_steps_per_second": 5.884,
"step": 150
},
{
"epoch": 0.20364126770060686,
"grad_norm": 0.04346940666437149,
"learning_rate": 2.6047368421052634e-05,
"loss": 1.2374,
"step": 151
},
{
"epoch": 0.20498988536749832,
"grad_norm": 0.05166761204600334,
"learning_rate": 2.5515789473684213e-05,
"loss": 1.122,
"step": 152
},
{
"epoch": 0.20633850303438975,
"grad_norm": 0.050422292202711105,
"learning_rate": 2.4984210526315788e-05,
"loss": 0.9183,
"step": 153
},
{
"epoch": 0.20768712070128117,
"grad_norm": 0.064626045525074,
"learning_rate": 2.445263157894737e-05,
"loss": 0.9469,
"step": 154
},
{
"epoch": 0.20903573836817263,
"grad_norm": 0.08242257684469223,
"learning_rate": 2.3921052631578946e-05,
"loss": 0.9811,
"step": 155
},
{
"epoch": 0.21038435603506406,
"grad_norm": 0.12588949501514435,
"learning_rate": 2.3389473684210528e-05,
"loss": 1.0624,
"step": 156
},
{
"epoch": 0.21173297370195548,
"grad_norm": 0.049975764006376266,
"learning_rate": 2.2857894736842106e-05,
"loss": 1.2219,
"step": 157
},
{
"epoch": 0.21308159136884694,
"grad_norm": 0.05141577497124672,
"learning_rate": 2.2326315789473685e-05,
"loss": 1.2365,
"step": 158
},
{
"epoch": 0.21443020903573837,
"grad_norm": 0.05125826224684715,
"learning_rate": 2.1794736842105264e-05,
"loss": 1.0173,
"step": 159
},
{
"epoch": 0.2157788267026298,
"grad_norm": 0.057574521750211716,
"learning_rate": 2.1263157894736842e-05,
"loss": 0.9291,
"step": 160
},
{
"epoch": 0.21712744436952125,
"grad_norm": 0.0720609799027443,
"learning_rate": 2.073157894736842e-05,
"loss": 0.9638,
"step": 161
},
{
"epoch": 0.21847606203641268,
"grad_norm": 0.10732440650463104,
"learning_rate": 2.0200000000000003e-05,
"loss": 1.0801,
"step": 162
},
{
"epoch": 0.2198246797033041,
"grad_norm": 0.06644244492053986,
"learning_rate": 1.966842105263158e-05,
"loss": 1.1543,
"step": 163
},
{
"epoch": 0.22117329737019556,
"grad_norm": 0.045897357165813446,
"learning_rate": 1.913684210526316e-05,
"loss": 1.1867,
"step": 164
},
{
"epoch": 0.222521915037087,
"grad_norm": 0.05033602565526962,
"learning_rate": 1.8605263157894736e-05,
"loss": 1.0082,
"step": 165
},
{
"epoch": 0.22387053270397841,
"grad_norm": 0.05498496815562248,
"learning_rate": 1.8073684210526318e-05,
"loss": 0.9098,
"step": 166
},
{
"epoch": 0.22521915037086987,
"grad_norm": 0.07342254370450974,
"learning_rate": 1.7542105263157897e-05,
"loss": 0.962,
"step": 167
},
{
"epoch": 0.2265677680377613,
"grad_norm": 0.1001623272895813,
"learning_rate": 1.7010526315789475e-05,
"loss": 1.0317,
"step": 168
},
{
"epoch": 0.22791638570465272,
"grad_norm": 0.11887964606285095,
"learning_rate": 1.6478947368421054e-05,
"loss": 1.2236,
"step": 169
},
{
"epoch": 0.22926500337154418,
"grad_norm": 0.04175285995006561,
"learning_rate": 1.5947368421052633e-05,
"loss": 1.1866,
"step": 170
},
{
"epoch": 0.2306136210384356,
"grad_norm": 0.05170948803424835,
"learning_rate": 1.541578947368421e-05,
"loss": 1.0739,
"step": 171
},
{
"epoch": 0.23196223870532703,
"grad_norm": 0.05022229626774788,
"learning_rate": 1.4884210526315788e-05,
"loss": 0.9708,
"step": 172
},
{
"epoch": 0.2333108563722185,
"grad_norm": 0.0649142786860466,
"learning_rate": 1.4352631578947369e-05,
"loss": 0.9386,
"step": 173
},
{
"epoch": 0.23465947403910992,
"grad_norm": 0.09284715354442596,
"learning_rate": 1.3821052631578949e-05,
"loss": 1.016,
"step": 174
},
{
"epoch": 0.23600809170600134,
"grad_norm": 0.18021555244922638,
"learning_rate": 1.3289473684210526e-05,
"loss": 1.0997,
"step": 175
},
{
"epoch": 0.23735670937289277,
"grad_norm": 0.04347499459981918,
"learning_rate": 1.2757894736842106e-05,
"loss": 1.2749,
"step": 176
},
{
"epoch": 0.23870532703978423,
"grad_norm": 0.05659535899758339,
"learning_rate": 1.2226315789473685e-05,
"loss": 1.1002,
"step": 177
},
{
"epoch": 0.24005394470667565,
"grad_norm": 0.052577510476112366,
"learning_rate": 1.1694736842105264e-05,
"loss": 0.9994,
"step": 178
},
{
"epoch": 0.24140256237356708,
"grad_norm": 0.057779863476753235,
"learning_rate": 1.1163157894736842e-05,
"loss": 0.909,
"step": 179
},
{
"epoch": 0.24275118004045854,
"grad_norm": 0.08827288448810577,
"learning_rate": 1.0631578947368421e-05,
"loss": 1.0058,
"step": 180
},
{
"epoch": 0.24409979770734996,
"grad_norm": 0.13255777955055237,
"learning_rate": 1.0100000000000002e-05,
"loss": 1.0172,
"step": 181
},
{
"epoch": 0.2454484153742414,
"grad_norm": 0.04913058876991272,
"learning_rate": 9.56842105263158e-06,
"loss": 1.2372,
"step": 182
},
{
"epoch": 0.24679703304113285,
"grad_norm": 0.04706710949540138,
"learning_rate": 9.036842105263159e-06,
"loss": 1.2301,
"step": 183
},
{
"epoch": 0.24814565070802427,
"grad_norm": 0.054957691580057144,
"learning_rate": 8.505263157894738e-06,
"loss": 1.0688,
"step": 184
},
{
"epoch": 0.2494942683749157,
"grad_norm": 0.05530289560556412,
"learning_rate": 7.973684210526316e-06,
"loss": 0.8931,
"step": 185
},
{
"epoch": 0.25084288604180716,
"grad_norm": 0.0766233429312706,
"learning_rate": 7.442105263157894e-06,
"loss": 0.9363,
"step": 186
},
{
"epoch": 0.2521915037086986,
"grad_norm": 0.11903451383113861,
"learning_rate": 6.9105263157894745e-06,
"loss": 1.0512,
"step": 187
},
{
"epoch": 0.25354012137559,
"grad_norm": 0.060021765530109406,
"learning_rate": 6.378947368421053e-06,
"loss": 1.2052,
"step": 188
},
{
"epoch": 0.25488873904248144,
"grad_norm": 0.04856010153889656,
"learning_rate": 5.847368421052632e-06,
"loss": 1.1913,
"step": 189
},
{
"epoch": 0.25623735670937287,
"grad_norm": 0.05699603632092476,
"learning_rate": 5.315789473684211e-06,
"loss": 1.0949,
"step": 190
},
{
"epoch": 0.25758597437626435,
"grad_norm": 0.05736822634935379,
"learning_rate": 4.78421052631579e-06,
"loss": 0.928,
"step": 191
},
{
"epoch": 0.2589345920431558,
"grad_norm": 0.07478796690702438,
"learning_rate": 4.252631578947369e-06,
"loss": 0.9499,
"step": 192
},
{
"epoch": 0.2602832097100472,
"grad_norm": 0.10775664448738098,
"learning_rate": 3.721052631578947e-06,
"loss": 1.0327,
"step": 193
},
{
"epoch": 0.26163182737693863,
"grad_norm": 0.12051226943731308,
"learning_rate": 3.1894736842105266e-06,
"loss": 1.1059,
"step": 194
},
{
"epoch": 0.26298044504383006,
"grad_norm": 0.042873069643974304,
"learning_rate": 2.6578947368421053e-06,
"loss": 1.2271,
"step": 195
},
{
"epoch": 0.2643290627107215,
"grad_norm": 0.053670722991228104,
"learning_rate": 2.1263157894736844e-06,
"loss": 1.0726,
"step": 196
},
{
"epoch": 0.26567768037761297,
"grad_norm": 0.056742120534181595,
"learning_rate": 1.5947368421052633e-06,
"loss": 0.9458,
"step": 197
},
{
"epoch": 0.2670262980445044,
"grad_norm": 0.06713538616895676,
"learning_rate": 1.0631578947368422e-06,
"loss": 0.939,
"step": 198
},
{
"epoch": 0.2683749157113958,
"grad_norm": 0.09788156300783157,
"learning_rate": 5.315789473684211e-07,
"loss": 1.0481,
"step": 199
},
{
"epoch": 0.26972353337828725,
"grad_norm": 0.16827541589736938,
"learning_rate": 0.0,
"loss": 1.1147,
"step": 200
},
{
"epoch": 0.26972353337828725,
"eval_loss": 1.0406328439712524,
"eval_runtime": 53.1358,
"eval_samples_per_second": 188.009,
"eval_steps_per_second": 5.891,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.526924234200187e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}