|
{ |
|
"best_metric": 0.44632911682128906, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0821186614658181, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000821186614658181, |
|
"grad_norm": 2.0305542945861816, |
|
"learning_rate": 1e-05, |
|
"loss": 0.7261, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000821186614658181, |
|
"eval_loss": 1.465772271156311, |
|
"eval_runtime": 288.2262, |
|
"eval_samples_per_second": 7.116, |
|
"eval_steps_per_second": 1.78, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001642373229316362, |
|
"grad_norm": 2.114654541015625, |
|
"learning_rate": 2e-05, |
|
"loss": 0.7726, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002463559843974543, |
|
"grad_norm": 2.2381458282470703, |
|
"learning_rate": 3e-05, |
|
"loss": 0.8371, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003284746458632724, |
|
"grad_norm": 1.9415267705917358, |
|
"learning_rate": 4e-05, |
|
"loss": 0.7885, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0041059330732909054, |
|
"grad_norm": 1.484900712966919, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7522, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004927119687949086, |
|
"grad_norm": 1.259929895401001, |
|
"learning_rate": 6e-05, |
|
"loss": 0.7248, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005748306302607267, |
|
"grad_norm": 0.9762012958526611, |
|
"learning_rate": 7e-05, |
|
"loss": 0.5922, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006569492917265448, |
|
"grad_norm": 1.0204635858535767, |
|
"learning_rate": 8e-05, |
|
"loss": 0.5452, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00739067953192363, |
|
"grad_norm": 0.8520223498344421, |
|
"learning_rate": 9e-05, |
|
"loss": 0.4675, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.008211866146581811, |
|
"grad_norm": 1.470017433166504, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4164, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.009033052761239991, |
|
"grad_norm": 0.8858131766319275, |
|
"learning_rate": 9.99983777858264e-05, |
|
"loss": 0.3847, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009854239375898173, |
|
"grad_norm": 1.0039604902267456, |
|
"learning_rate": 9.999351124856874e-05, |
|
"loss": 0.3836, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010675425990556354, |
|
"grad_norm": 0.6317289471626282, |
|
"learning_rate": 9.998540070400966e-05, |
|
"loss": 0.3325, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011496612605214535, |
|
"grad_norm": 0.680807888507843, |
|
"learning_rate": 9.997404667843075e-05, |
|
"loss": 0.3493, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.012317799219872716, |
|
"grad_norm": 0.6124119758605957, |
|
"learning_rate": 9.995944990857849e-05, |
|
"loss": 0.3204, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.013138985834530896, |
|
"grad_norm": 0.6300580501556396, |
|
"learning_rate": 9.994161134161634e-05, |
|
"loss": 0.3341, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013960172449189078, |
|
"grad_norm": 0.608344554901123, |
|
"learning_rate": 9.992053213506334e-05, |
|
"loss": 0.3386, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01478135906384726, |
|
"grad_norm": 0.5785073041915894, |
|
"learning_rate": 9.989621365671902e-05, |
|
"loss": 0.3035, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01560254567850544, |
|
"grad_norm": 0.6188136339187622, |
|
"learning_rate": 9.986865748457457e-05, |
|
"loss": 0.3162, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.016423732293163622, |
|
"grad_norm": 0.5587109923362732, |
|
"learning_rate": 9.983786540671051e-05, |
|
"loss": 0.273, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.017244918907821802, |
|
"grad_norm": 0.5472216606140137, |
|
"learning_rate": 9.980383942118066e-05, |
|
"loss": 0.3215, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.018066105522479982, |
|
"grad_norm": 0.4810258150100708, |
|
"learning_rate": 9.976658173588244e-05, |
|
"loss": 0.307, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.018887292137138165, |
|
"grad_norm": 0.5424895882606506, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.3065, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.019708478751796345, |
|
"grad_norm": 0.5074129700660706, |
|
"learning_rate": 9.968238114591566e-05, |
|
"loss": 0.2774, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.020529665366454525, |
|
"grad_norm": 0.5599948167800903, |
|
"learning_rate": 9.96354437049027e-05, |
|
"loss": 0.2691, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02135085198111271, |
|
"grad_norm": 0.6742061972618103, |
|
"learning_rate": 9.95852854910781e-05, |
|
"loss": 0.2898, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02217203859577089, |
|
"grad_norm": 0.5526089072227478, |
|
"learning_rate": 9.953190975913647e-05, |
|
"loss": 0.3071, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02299322521042907, |
|
"grad_norm": 0.5390534996986389, |
|
"learning_rate": 9.947531997255256e-05, |
|
"loss": 0.2906, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.023814411825087253, |
|
"grad_norm": 0.504539430141449, |
|
"learning_rate": 9.941551980335652e-05, |
|
"loss": 0.2688, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.024635598439745433, |
|
"grad_norm": 0.4898300766944885, |
|
"learning_rate": 9.935251313189564e-05, |
|
"loss": 0.2845, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.025456785054403613, |
|
"grad_norm": 0.49974775314331055, |
|
"learning_rate": 9.928630404658255e-05, |
|
"loss": 0.2702, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.026277971669061793, |
|
"grad_norm": 0.5819835662841797, |
|
"learning_rate": 9.921689684362989e-05, |
|
"loss": 0.2699, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.027099158283719976, |
|
"grad_norm": 0.6171815395355225, |
|
"learning_rate": 9.914429602677162e-05, |
|
"loss": 0.3477, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.027920344898378156, |
|
"grad_norm": 0.6020647883415222, |
|
"learning_rate": 9.906850630697068e-05, |
|
"loss": 0.2901, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.028741531513036336, |
|
"grad_norm": 0.7458943724632263, |
|
"learning_rate": 9.898953260211338e-05, |
|
"loss": 0.2498, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02956271812769452, |
|
"grad_norm": 0.6588975787162781, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.2599, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0303839047423527, |
|
"grad_norm": 0.6839740872383118, |
|
"learning_rate": 9.882205394146361e-05, |
|
"loss": 0.337, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03120509135701088, |
|
"grad_norm": 0.6753020882606506, |
|
"learning_rate": 9.87335598531214e-05, |
|
"loss": 0.3069, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.032026277971669063, |
|
"grad_norm": 0.8709951043128967, |
|
"learning_rate": 9.864190351391822e-05, |
|
"loss": 0.3396, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.032847464586327244, |
|
"grad_norm": 0.6819374561309814, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 0.2963, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.033668651200985424, |
|
"grad_norm": 0.856820821762085, |
|
"learning_rate": 9.844912807753104e-05, |
|
"loss": 0.289, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.034489837815643604, |
|
"grad_norm": 0.7196516990661621, |
|
"learning_rate": 9.834802148926882e-05, |
|
"loss": 0.2858, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.035311024430301784, |
|
"grad_norm": 0.7757695913314819, |
|
"learning_rate": 9.824377766717759e-05, |
|
"loss": 0.2882, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.036132211044959964, |
|
"grad_norm": 0.7751405835151672, |
|
"learning_rate": 9.813640337548954e-05, |
|
"loss": 0.3174, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03695339765961815, |
|
"grad_norm": 0.8659068942070007, |
|
"learning_rate": 9.802590558156862e-05, |
|
"loss": 0.2818, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03777458427427633, |
|
"grad_norm": 1.336848258972168, |
|
"learning_rate": 9.791229145545831e-05, |
|
"loss": 0.3209, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03859577088893451, |
|
"grad_norm": 0.9144354462623596, |
|
"learning_rate": 9.779556836941645e-05, |
|
"loss": 0.28, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03941695750359269, |
|
"grad_norm": 0.8315229415893555, |
|
"learning_rate": 9.767574389743682e-05, |
|
"loss": 0.276, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.04023814411825087, |
|
"grad_norm": 0.967755913734436, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.3298, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04105933073290905, |
|
"grad_norm": 1.0207139253616333, |
|
"learning_rate": 9.742682209735727e-05, |
|
"loss": 0.3058, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04105933073290905, |
|
"eval_loss": 0.44632911682128906, |
|
"eval_runtime": 291.2748, |
|
"eval_samples_per_second": 7.041, |
|
"eval_steps_per_second": 1.761, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04188051734756724, |
|
"grad_norm": 1.5864020586013794, |
|
"learning_rate": 9.729774092143627e-05, |
|
"loss": 0.4838, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04270170396222542, |
|
"grad_norm": 1.2497475147247314, |
|
"learning_rate": 9.716559066288715e-05, |
|
"loss": 0.4411, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0435228905768836, |
|
"grad_norm": 0.9487363696098328, |
|
"learning_rate": 9.703037989675087e-05, |
|
"loss": 0.3657, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04434407719154178, |
|
"grad_norm": 0.33357444405555725, |
|
"learning_rate": 9.689211739666023e-05, |
|
"loss": 0.2727, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04516526380619996, |
|
"grad_norm": 0.4681488573551178, |
|
"learning_rate": 9.675081213427076e-05, |
|
"loss": 0.315, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04598645042085814, |
|
"grad_norm": 0.4412896931171417, |
|
"learning_rate": 9.66064732786784e-05, |
|
"loss": 0.3051, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04680763703551632, |
|
"grad_norm": 0.42806002497673035, |
|
"learning_rate": 9.645911019582467e-05, |
|
"loss": 0.296, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.047628823650174505, |
|
"grad_norm": 0.3795586824417114, |
|
"learning_rate": 9.630873244788883e-05, |
|
"loss": 0.3184, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.048450010264832685, |
|
"grad_norm": 0.34351328015327454, |
|
"learning_rate": 9.615534979266745e-05, |
|
"loss": 0.2832, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.049271196879490865, |
|
"grad_norm": 0.35215750336647034, |
|
"learning_rate": 9.599897218294122e-05, |
|
"loss": 0.3083, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.050092383494149045, |
|
"grad_norm": 0.3156924843788147, |
|
"learning_rate": 9.583960976582913e-05, |
|
"loss": 0.2577, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.050913570108807225, |
|
"grad_norm": 0.4058922231197357, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.3156, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.051734756723465405, |
|
"grad_norm": 0.3673064410686493, |
|
"learning_rate": 9.551197206565173e-05, |
|
"loss": 0.2932, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.052555943338123585, |
|
"grad_norm": 0.3226417601108551, |
|
"learning_rate": 9.534371804252728e-05, |
|
"loss": 0.2404, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05337712995278177, |
|
"grad_norm": 0.3365015685558319, |
|
"learning_rate": 9.517252173051911e-05, |
|
"loss": 0.263, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05419831656743995, |
|
"grad_norm": 0.38001781702041626, |
|
"learning_rate": 9.49983942383106e-05, |
|
"loss": 0.2973, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05501950318209813, |
|
"grad_norm": 0.5203686356544495, |
|
"learning_rate": 9.482134686478519e-05, |
|
"loss": 0.2693, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.05584068979675631, |
|
"grad_norm": 0.32921281456947327, |
|
"learning_rate": 9.464139109829321e-05, |
|
"loss": 0.2742, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.05666187641141449, |
|
"grad_norm": 0.43800121545791626, |
|
"learning_rate": 9.445853861590647e-05, |
|
"loss": 0.2915, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05748306302607267, |
|
"grad_norm": 0.32359007000923157, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 0.218, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05830424964073085, |
|
"grad_norm": 0.39324089884757996, |
|
"learning_rate": 9.408419115078471e-05, |
|
"loss": 0.2635, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05912543625538904, |
|
"grad_norm": 0.45099887251853943, |
|
"learning_rate": 9.389272045892024e-05, |
|
"loss": 0.2402, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05994662287004722, |
|
"grad_norm": 0.4052051603794098, |
|
"learning_rate": 9.36984016313259e-05, |
|
"loss": 0.2775, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.0607678094847054, |
|
"grad_norm": 0.41129499673843384, |
|
"learning_rate": 9.350124727707197e-05, |
|
"loss": 0.2564, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.06158899609936358, |
|
"grad_norm": 0.3723108470439911, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.2614, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06241018271402176, |
|
"grad_norm": 0.39635589718818665, |
|
"learning_rate": 9.309848334400246e-05, |
|
"loss": 0.2825, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.06323136932867994, |
|
"grad_norm": 0.4818005859851837, |
|
"learning_rate": 9.289289989996133e-05, |
|
"loss": 0.2539, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.06405255594333813, |
|
"grad_norm": 0.4484040439128876, |
|
"learning_rate": 9.268453319711363e-05, |
|
"loss": 0.2857, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.0648737425579963, |
|
"grad_norm": 0.46060118079185486, |
|
"learning_rate": 9.247339675607605e-05, |
|
"loss": 0.2745, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06569492917265449, |
|
"grad_norm": 0.3952708840370178, |
|
"learning_rate": 9.225950427718975e-05, |
|
"loss": 0.2954, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06651611578731266, |
|
"grad_norm": 0.4594082534313202, |
|
"learning_rate": 9.204286963963111e-05, |
|
"loss": 0.2647, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06733730240197085, |
|
"grad_norm": 0.498677134513855, |
|
"learning_rate": 9.182350690051133e-05, |
|
"loss": 0.2741, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.06815848901662903, |
|
"grad_norm": 0.48790332674980164, |
|
"learning_rate": 9.160143029396422e-05, |
|
"loss": 0.217, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06897967563128721, |
|
"grad_norm": 0.4500594437122345, |
|
"learning_rate": 9.13766542302225e-05, |
|
"loss": 0.2478, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.0698008622459454, |
|
"grad_norm": 0.477200984954834, |
|
"learning_rate": 9.114919329468282e-05, |
|
"loss": 0.276, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.07062204886060357, |
|
"grad_norm": 0.5421808362007141, |
|
"learning_rate": 9.091906224695935e-05, |
|
"loss": 0.3018, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.07144323547526175, |
|
"grad_norm": 0.473712295293808, |
|
"learning_rate": 9.068627601992598e-05, |
|
"loss": 0.2687, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.07226442208991993, |
|
"grad_norm": 0.5594393610954285, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.2515, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.07308560870457811, |
|
"grad_norm": 0.6386433839797974, |
|
"learning_rate": 9.021279861989885e-05, |
|
"loss": 0.2529, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.0739067953192363, |
|
"grad_norm": 0.5319457650184631, |
|
"learning_rate": 8.997213817017507e-05, |
|
"loss": 0.2646, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07472798193389447, |
|
"grad_norm": 0.595516562461853, |
|
"learning_rate": 8.972888398568772e-05, |
|
"loss": 0.2756, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.07554916854855266, |
|
"grad_norm": 0.6601677536964417, |
|
"learning_rate": 8.948305185085225e-05, |
|
"loss": 0.2805, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.07637035516321083, |
|
"grad_norm": 0.6162546873092651, |
|
"learning_rate": 8.92346577173636e-05, |
|
"loss": 0.2435, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07719154177786902, |
|
"grad_norm": 0.6918389797210693, |
|
"learning_rate": 8.898371770316111e-05, |
|
"loss": 0.3159, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.07801272839252721, |
|
"grad_norm": 0.7181240916252136, |
|
"learning_rate": 8.873024809138272e-05, |
|
"loss": 0.2413, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07883391500718538, |
|
"grad_norm": 1.1472549438476562, |
|
"learning_rate": 8.847426532930831e-05, |
|
"loss": 0.2949, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07965510162184357, |
|
"grad_norm": 0.9359822273254395, |
|
"learning_rate": 8.821578602729242e-05, |
|
"loss": 0.316, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.08047628823650174, |
|
"grad_norm": 0.7444966435432434, |
|
"learning_rate": 8.795482695768658e-05, |
|
"loss": 0.3119, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.08129747485115993, |
|
"grad_norm": 1.2565944194793701, |
|
"learning_rate": 8.769140505375085e-05, |
|
"loss": 0.2877, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.0821186614658181, |
|
"grad_norm": 0.9129545092582703, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 0.3209, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0821186614658181, |
|
"eval_loss": 0.4635893702507019, |
|
"eval_runtime": 290.5387, |
|
"eval_samples_per_second": 7.059, |
|
"eval_steps_per_second": 1.766, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7401760676682138e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|