|
{ |
|
"best_metric": 0.011105372570455074, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.4069175991861648, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004069175991861648, |
|
"grad_norm": 0.18430504202842712, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.4546, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004069175991861648, |
|
"eval_loss": 0.2489006370306015, |
|
"eval_runtime": 12.8231, |
|
"eval_samples_per_second": 3.899, |
|
"eval_steps_per_second": 0.546, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008138351983723296, |
|
"grad_norm": 0.20032061636447906, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.4626, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.012207527975584944, |
|
"grad_norm": 0.1911066621541977, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.4799, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01627670396744659, |
|
"grad_norm": 0.20888791978359222, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.4716, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02034587995930824, |
|
"grad_norm": 0.24220263957977295, |
|
"learning_rate": 0.00015, |
|
"loss": 0.4777, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.024415055951169887, |
|
"grad_norm": 0.25128036737442017, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.3979, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.028484231943031537, |
|
"grad_norm": 0.1742318868637085, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.3759, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03255340793489318, |
|
"grad_norm": 0.17110617458820343, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.3271, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03662258392675483, |
|
"grad_norm": 0.19768884778022766, |
|
"learning_rate": 0.00027, |
|
"loss": 0.2259, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.04069175991861648, |
|
"grad_norm": 0.2143348902463913, |
|
"learning_rate": 0.0003, |
|
"loss": 0.1569, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.044760935910478125, |
|
"grad_norm": 0.2793988287448883, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.1393, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.048830111902339775, |
|
"grad_norm": 0.2352091372013092, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.0939, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.052899287894201424, |
|
"grad_norm": 0.17386719584465027, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.0802, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.056968463886063074, |
|
"grad_norm": 0.5942983627319336, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.0948, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.061037639877924724, |
|
"grad_norm": 0.21323800086975098, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.0423, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06510681586978637, |
|
"grad_norm": 0.11683189123868942, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.0327, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06917599186164802, |
|
"grad_norm": 0.09624740481376648, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.0322, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.07324516785350967, |
|
"grad_norm": 0.12023144215345383, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.0437, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07731434384537131, |
|
"grad_norm": 0.4698963761329651, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.1443, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.08138351983723296, |
|
"grad_norm": 1.0405900478363037, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.4887, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08545269582909461, |
|
"grad_norm": 0.5793169736862183, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.4255, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08952187182095625, |
|
"grad_norm": 0.5396560430526733, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.4089, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0935910478128179, |
|
"grad_norm": 0.43461382389068604, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.5076, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.09766022380467955, |
|
"grad_norm": 0.4178922474384308, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.5139, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1017293997965412, |
|
"grad_norm": 0.47736668586730957, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.6272, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1017293997965412, |
|
"eval_loss": 0.14711476862430573, |
|
"eval_runtime": 13.014, |
|
"eval_samples_per_second": 3.842, |
|
"eval_steps_per_second": 0.538, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.10579857578840285, |
|
"grad_norm": 0.5472037196159363, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.6612, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.10986775178026449, |
|
"grad_norm": 0.4757766127586365, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.5034, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.11393692777212615, |
|
"grad_norm": 0.43715453147888184, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 0.4988, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.11800610376398779, |
|
"grad_norm": 0.4352036416530609, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.54, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.12207527975584945, |
|
"grad_norm": 0.5779057741165161, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.4994, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12614445574771108, |
|
"grad_norm": 0.5189437866210938, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.5054, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.13021363173957273, |
|
"grad_norm": 0.5969810485839844, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 0.6, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1342828077314344, |
|
"grad_norm": 0.6625540256500244, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 0.4243, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.13835198372329605, |
|
"grad_norm": 0.6620368361473083, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.6029, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.14242115971515767, |
|
"grad_norm": 0.6099522709846497, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.4348, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.14649033570701933, |
|
"grad_norm": 0.696644127368927, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.5671, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.150559511698881, |
|
"grad_norm": 0.6944995522499084, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.5403, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.15462868769074262, |
|
"grad_norm": 0.8135762810707092, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.4017, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.15869786368260427, |
|
"grad_norm": 0.6780802011489868, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.4815, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.16276703967446593, |
|
"grad_norm": 0.6904082298278809, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.5513, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16683621566632756, |
|
"grad_norm": 0.7780231237411499, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.4496, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.17090539165818922, |
|
"grad_norm": 0.690417468547821, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.2988, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.17497456765005087, |
|
"grad_norm": 0.7350278496742249, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.2754, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1790437436419125, |
|
"grad_norm": 0.8998621106147766, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.535, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.18311291963377416, |
|
"grad_norm": 0.5199292898178101, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.2193, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1871820956256358, |
|
"grad_norm": 0.7286368012428284, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.3971, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.19125127161749747, |
|
"grad_norm": 0.8066439628601074, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.5201, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1953204476093591, |
|
"grad_norm": 0.8500910401344299, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.4594, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.19938962360122076, |
|
"grad_norm": 0.9414553642272949, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.7813, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2034587995930824, |
|
"grad_norm": 2.258089065551758, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 1.5064, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2034587995930824, |
|
"eval_loss": 0.03713131323456764, |
|
"eval_runtime": 13.0009, |
|
"eval_samples_per_second": 3.846, |
|
"eval_steps_per_second": 0.538, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.20752797558494404, |
|
"grad_norm": 0.7120374441146851, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 0.096, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2115971515768057, |
|
"grad_norm": 0.3248690366744995, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 0.0389, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.21566632756866735, |
|
"grad_norm": 0.3197277784347534, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 0.0302, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.21973550356052898, |
|
"grad_norm": 0.14361833035945892, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 0.0137, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.22380467955239064, |
|
"grad_norm": 0.17876560986042023, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 0.036, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2278738555442523, |
|
"grad_norm": 0.07787288725376129, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 0.0154, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.23194303153611392, |
|
"grad_norm": 0.11396508663892746, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 0.0188, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.23601220752797558, |
|
"grad_norm": 0.10331092029809952, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 0.0184, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.24008138351983724, |
|
"grad_norm": 0.07832586020231247, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 0.0126, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.2441505595116989, |
|
"grad_norm": 0.038063760846853256, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 0.0064, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24821973550356052, |
|
"grad_norm": 0.04389215260744095, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 0.0035, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.25228891149542215, |
|
"grad_norm": 0.033812519162893295, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 0.0029, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.25635808748728384, |
|
"grad_norm": 0.05039115995168686, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 0.0049, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.26042726347914547, |
|
"grad_norm": 0.04454028606414795, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 0.003, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.2644964394710071, |
|
"grad_norm": 0.03696225956082344, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 0.0022, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.2685656154628688, |
|
"grad_norm": 0.05542654171586037, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 0.0021, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.2726347914547304, |
|
"grad_norm": 0.013826167210936546, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 0.0008, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.2767039674465921, |
|
"grad_norm": 0.023703791201114655, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 0.0022, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2807731434384537, |
|
"grad_norm": 0.08458836376667023, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 0.0164, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.28484231943031535, |
|
"grad_norm": 0.7300925850868225, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 0.0927, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.28891149542217703, |
|
"grad_norm": 0.5981572270393372, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 0.1194, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.29298067141403866, |
|
"grad_norm": 0.5619075298309326, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 0.0957, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2970498474059003, |
|
"grad_norm": 0.33565792441368103, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 0.0656, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.301119023397762, |
|
"grad_norm": 0.46171268820762634, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 0.0598, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.3051881993896236, |
|
"grad_norm": 0.45766952633857727, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 0.0685, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.3051881993896236, |
|
"eval_loss": 0.025676894932985306, |
|
"eval_runtime": 13.0016, |
|
"eval_samples_per_second": 3.846, |
|
"eval_steps_per_second": 0.538, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.30925737538148523, |
|
"grad_norm": 0.5149214863777161, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 0.0755, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.3133265513733469, |
|
"grad_norm": 0.3870311975479126, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 0.0534, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.31739572736520855, |
|
"grad_norm": 0.33499646186828613, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 0.0599, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.3214649033570702, |
|
"grad_norm": 0.29793283343315125, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 0.0375, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.32553407934893186, |
|
"grad_norm": 0.2961036264896393, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 0.0398, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.3296032553407935, |
|
"grad_norm": 0.3199717104434967, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 0.0375, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.3336724313326551, |
|
"grad_norm": 0.30673283338546753, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 0.0474, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.3377416073245168, |
|
"grad_norm": 0.35638052225112915, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 0.0364, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.34181078331637843, |
|
"grad_norm": 0.541354775428772, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 0.0596, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.34587995930824006, |
|
"grad_norm": 0.23636458814144135, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 0.0285, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.34994913530010174, |
|
"grad_norm": 0.25946715474128723, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.0334, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.3540183112919634, |
|
"grad_norm": 0.3717379868030548, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 0.0823, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.358087487283825, |
|
"grad_norm": 0.6748691201210022, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 0.072, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.3621566632756867, |
|
"grad_norm": 0.42658162117004395, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 0.0462, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.3662258392675483, |
|
"grad_norm": 0.42387211322784424, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 0.1011, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.37029501525940994, |
|
"grad_norm": 0.29736894369125366, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 0.0411, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.3743641912512716, |
|
"grad_norm": 0.2868631184101105, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 0.0671, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.37843336724313326, |
|
"grad_norm": 0.4251483082771301, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 0.1333, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.38250254323499494, |
|
"grad_norm": 0.3550730049610138, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 0.0863, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.38657171922685657, |
|
"grad_norm": 0.28326889872550964, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 0.0795, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3906408952187182, |
|
"grad_norm": 0.3059619963169098, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 0.1373, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3947100712105799, |
|
"grad_norm": 0.3868827819824219, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 0.1411, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.3987792472024415, |
|
"grad_norm": 0.35499969124794006, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 0.1811, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.40284842319430314, |
|
"grad_norm": 0.793555736541748, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 0.4984, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.4069175991861648, |
|
"grad_norm": 1.1363798379898071, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 0.742, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4069175991861648, |
|
"eval_loss": 0.011105372570455074, |
|
"eval_runtime": 12.9997, |
|
"eval_samples_per_second": 3.846, |
|
"eval_steps_per_second": 0.538, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.7563955529659187e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|