|
{ |
|
"best_metric": 1.1896018981933594, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-500", |
|
"epoch": 0.026203390718759007, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.240678143751801e-05, |
|
"eval_loss": 2.1741511821746826, |
|
"eval_runtime": 489.2013, |
|
"eval_samples_per_second": 16.425, |
|
"eval_steps_per_second": 4.107, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005240678143751802, |
|
"grad_norm": 2.150914192199707, |
|
"learning_rate": 4.08e-05, |
|
"loss": 1.3698, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0010481356287503603, |
|
"grad_norm": 1.843773365020752, |
|
"learning_rate": 8.16e-05, |
|
"loss": 1.33, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0015722034431255405, |
|
"grad_norm": 2.1218819618225098, |
|
"learning_rate": 0.0001224, |
|
"loss": 1.2321, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0020962712575007206, |
|
"grad_norm": 2.44899320602417, |
|
"learning_rate": 0.0001632, |
|
"loss": 1.2333, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.002620339071875901, |
|
"grad_norm": 13.852787017822266, |
|
"learning_rate": 0.000204, |
|
"loss": 2.0712, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.002620339071875901, |
|
"eval_loss": 1.7819404602050781, |
|
"eval_runtime": 489.242, |
|
"eval_samples_per_second": 16.423, |
|
"eval_steps_per_second": 4.106, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.003144406886251081, |
|
"grad_norm": 1.8030827045440674, |
|
"learning_rate": 0.00020375153312650207, |
|
"loss": 1.3647, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.003668474700626261, |
|
"grad_norm": 1.8373987674713135, |
|
"learning_rate": 0.00020300734301164017, |
|
"loss": 1.1339, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.004192542515001441, |
|
"grad_norm": 1.8776074647903442, |
|
"learning_rate": 0.00020177105527484818, |
|
"loss": 1.1834, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.004716610329376621, |
|
"grad_norm": 2.3595855236053467, |
|
"learning_rate": 0.00020004869298570854, |
|
"loss": 1.2042, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.005240678143751802, |
|
"grad_norm": 11.033510208129883, |
|
"learning_rate": 0.00019784864732016265, |
|
"loss": 1.8743, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.005240678143751802, |
|
"eval_loss": 1.7514146566390991, |
|
"eval_runtime": 489.1456, |
|
"eval_samples_per_second": 16.427, |
|
"eval_steps_per_second": 4.107, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.005764745958126981, |
|
"grad_norm": 1.6452189683914185, |
|
"learning_rate": 0.00019518163667954527, |
|
"loss": 1.2991, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.006288813772502162, |
|
"grad_norm": 1.5686390399932861, |
|
"learning_rate": 0.00019206065447161056, |
|
"loss": 1.162, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.006812881586877342, |
|
"grad_norm": 1.821743369102478, |
|
"learning_rate": 0.00018850090580795544, |
|
"loss": 1.1572, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.007336949401252522, |
|
"grad_norm": 4.036351680755615, |
|
"learning_rate": 0.00018451973342624464, |
|
"loss": 1.2418, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.007861017215627703, |
|
"grad_norm": 12.068865776062012, |
|
"learning_rate": 0.00018013653319813575, |
|
"loss": 1.8739, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.007861017215627703, |
|
"eval_loss": 1.8119529485702515, |
|
"eval_runtime": 489.208, |
|
"eval_samples_per_second": 16.425, |
|
"eval_steps_per_second": 4.107, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.008385085030002883, |
|
"grad_norm": 1.7102468013763428, |
|
"learning_rate": 0.0001753726596345424, |
|
"loss": 1.3199, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.008909152844378062, |
|
"grad_norm": 1.9763778448104858, |
|
"learning_rate": 0.00017025132184860355, |
|
"loss": 1.2008, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.009433220658753242, |
|
"grad_norm": 1.650948405265808, |
|
"learning_rate": 0.00016479747048321714, |
|
"loss": 1.1192, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.009957288473128423, |
|
"grad_norm": 2.9453556537628174, |
|
"learning_rate": 0.00015903767615401616, |
|
"loss": 1.2746, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.010481356287503603, |
|
"grad_norm": 14.243999481201172, |
|
"learning_rate": 0.000153, |
|
"loss": 2.1821, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.010481356287503603, |
|
"eval_loss": 1.688521385192871, |
|
"eval_runtime": 488.6728, |
|
"eval_samples_per_second": 16.442, |
|
"eval_steps_per_second": 4.111, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.011005424101878783, |
|
"grad_norm": 1.8800654411315918, |
|
"learning_rate": 0.0001467138569724859, |
|
"loss": 1.328, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.011529491916253963, |
|
"grad_norm": 1.8253871202468872, |
|
"learning_rate": 0.00014020987252842305, |
|
"loss": 1.1803, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.012053559730629144, |
|
"grad_norm": 2.225597858428955, |
|
"learning_rate": 0.00013351973342624464, |
|
"loss": 1.1601, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.012577627545004324, |
|
"grad_norm": 2.5397775173187256, |
|
"learning_rate": 0.00012667603335116609, |
|
"loss": 1.227, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.013101695359379504, |
|
"grad_norm": 11.309868812561035, |
|
"learning_rate": 0.00011971211412202691, |
|
"loss": 1.7126, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.013101695359379504, |
|
"eval_loss": 1.499428391456604, |
|
"eval_runtime": 488.4857, |
|
"eval_samples_per_second": 16.449, |
|
"eval_steps_per_second": 4.113, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.013625763173754683, |
|
"grad_norm": 1.6462516784667969, |
|
"learning_rate": 0.00011266190325330066, |
|
"loss": 1.1976, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.014149830988129865, |
|
"grad_norm": 1.5284520387649536, |
|
"learning_rate": 0.00010555974866365511, |
|
"loss": 1.0957, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.014673898802505045, |
|
"grad_norm": 1.8043922185897827, |
|
"learning_rate": 9.844025133634492e-05, |
|
"loss": 1.0894, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.015197966616880224, |
|
"grad_norm": 2.4363131523132324, |
|
"learning_rate": 9.133809674669937e-05, |
|
"loss": 1.1335, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.015722034431255406, |
|
"grad_norm": 8.980298042297363, |
|
"learning_rate": 8.428788587797311e-05, |
|
"loss": 1.8793, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.015722034431255406, |
|
"eval_loss": 1.4283676147460938, |
|
"eval_runtime": 489.4252, |
|
"eval_samples_per_second": 16.417, |
|
"eval_steps_per_second": 4.105, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.016246102245630584, |
|
"grad_norm": 1.416838526725769, |
|
"learning_rate": 7.73239666488339e-05, |
|
"loss": 1.1683, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.016770170060005765, |
|
"grad_norm": 1.574950933456421, |
|
"learning_rate": 7.048026657375537e-05, |
|
"loss": 1.0502, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.017294237874380947, |
|
"grad_norm": 1.8394525051116943, |
|
"learning_rate": 6.379012747157697e-05, |
|
"loss": 1.0767, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.017818305688756125, |
|
"grad_norm": 2.86199688911438, |
|
"learning_rate": 5.7286143027514095e-05, |
|
"loss": 1.1481, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.018342373503131306, |
|
"grad_norm": 12.534944534301758, |
|
"learning_rate": 5.100000000000002e-05, |
|
"loss": 1.8794, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.018342373503131306, |
|
"eval_loss": 1.309641718864441, |
|
"eval_runtime": 490.402, |
|
"eval_samples_per_second": 16.385, |
|
"eval_steps_per_second": 4.097, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.018866441317506484, |
|
"grad_norm": 1.5316487550735474, |
|
"learning_rate": 4.496232384598384e-05, |
|
"loss": 1.1357, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.019390509131881666, |
|
"grad_norm": 1.5178347826004028, |
|
"learning_rate": 3.9202529516782854e-05, |
|
"loss": 1.0699, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.019914576946256847, |
|
"grad_norm": 2.074788808822632, |
|
"learning_rate": 3.374867815139649e-05, |
|
"loss": 1.0479, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.020438644760632025, |
|
"grad_norm": 2.566411256790161, |
|
"learning_rate": 2.8627340365457602e-05, |
|
"loss": 1.1419, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.020962712575007206, |
|
"grad_norm": 7.236230850219727, |
|
"learning_rate": 2.3863466801864254e-05, |
|
"loss": 1.6114, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.020962712575007206, |
|
"eval_loss": 1.215065360069275, |
|
"eval_runtime": 490.1796, |
|
"eval_samples_per_second": 16.392, |
|
"eval_steps_per_second": 4.098, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.021486780389382384, |
|
"grad_norm": 1.1142139434814453, |
|
"learning_rate": 1.9480266573755372e-05, |
|
"loss": 1.0362, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.022010848203757566, |
|
"grad_norm": 1.5296214818954468, |
|
"learning_rate": 1.5499094192044554e-05, |
|
"loss": 1.038, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.022534916018132747, |
|
"grad_norm": 1.5614051818847656, |
|
"learning_rate": 1.1939345528389446e-05, |
|
"loss": 1.0573, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.023058983832507925, |
|
"grad_norm": 2.5465171337127686, |
|
"learning_rate": 8.818363320454701e-06, |
|
"loss": 1.082, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.023583051646883107, |
|
"grad_norm": 12.415629386901855, |
|
"learning_rate": 6.1513526798373514e-06, |
|
"loss": 1.8403, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.023583051646883107, |
|
"eval_loss": 1.1930021047592163, |
|
"eval_runtime": 489.2822, |
|
"eval_samples_per_second": 16.422, |
|
"eval_steps_per_second": 4.106, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.02410711946125829, |
|
"grad_norm": 1.3619661331176758, |
|
"learning_rate": 3.9513070142914725e-06, |
|
"loss": 1.077, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.024631187275633466, |
|
"grad_norm": 1.5200282335281372, |
|
"learning_rate": 2.2289447251518195e-06, |
|
"loss": 1.0089, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.025155255090008648, |
|
"grad_norm": 1.4571294784545898, |
|
"learning_rate": 9.92656988359823e-07, |
|
"loss": 1.0334, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.025679322904383826, |
|
"grad_norm": 2.3840417861938477, |
|
"learning_rate": 2.4846687349793185e-07, |
|
"loss": 1.0615, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.026203390718759007, |
|
"grad_norm": 9.744729995727539, |
|
"learning_rate": 0.0, |
|
"loss": 1.8475, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.026203390718759007, |
|
"eval_loss": 1.1896018981933594, |
|
"eval_runtime": 488.1331, |
|
"eval_samples_per_second": 16.461, |
|
"eval_steps_per_second": 4.116, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.4282098671616e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|