|
{ |
|
"best_metric": 0.5612907409667969, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0648508430609598, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012970168612191958, |
|
"grad_norm": 43.15007019042969, |
|
"learning_rate": 1e-06, |
|
"loss": 24.5956, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012970168612191958, |
|
"eval_loss": 11.099210739135742, |
|
"eval_runtime": 43.2298, |
|
"eval_samples_per_second": 7.518, |
|
"eval_steps_per_second": 1.897, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025940337224383916, |
|
"grad_norm": 34.01232147216797, |
|
"learning_rate": 2e-06, |
|
"loss": 21.4527, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0038910505836575876, |
|
"grad_norm": 43.92300796508789, |
|
"learning_rate": 3e-06, |
|
"loss": 21.7764, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005188067444876783, |
|
"grad_norm": 36.848297119140625, |
|
"learning_rate": 4e-06, |
|
"loss": 22.2293, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00648508430609598, |
|
"grad_norm": 34.78293991088867, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 21.3363, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007782101167315175, |
|
"grad_norm": 43.606285095214844, |
|
"learning_rate": 6e-06, |
|
"loss": 21.3269, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.009079118028534372, |
|
"grad_norm": 35.7335319519043, |
|
"learning_rate": 7e-06, |
|
"loss": 18.8252, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010376134889753566, |
|
"grad_norm": 39.83949661254883, |
|
"learning_rate": 8e-06, |
|
"loss": 20.006, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011673151750972763, |
|
"grad_norm": 32.874061584472656, |
|
"learning_rate": 9e-06, |
|
"loss": 19.244, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01297016861219196, |
|
"grad_norm": 35.38383102416992, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 21.033, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014267185473411154, |
|
"grad_norm": 42.5994987487793, |
|
"learning_rate": 1.1e-05, |
|
"loss": 19.4729, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01556420233463035, |
|
"grad_norm": 32.3964958190918, |
|
"learning_rate": 1.2e-05, |
|
"loss": 19.9392, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.016861219195849545, |
|
"grad_norm": 34.603797912597656, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 18.9286, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.018158236057068743, |
|
"grad_norm": 22.889278411865234, |
|
"learning_rate": 1.4e-05, |
|
"loss": 19.2667, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.019455252918287938, |
|
"grad_norm": 20.571109771728516, |
|
"learning_rate": 1.5e-05, |
|
"loss": 17.9724, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.020752269779507133, |
|
"grad_norm": 24.65373992919922, |
|
"learning_rate": 1.6e-05, |
|
"loss": 18.7244, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02204928664072633, |
|
"grad_norm": 20.301925659179688, |
|
"learning_rate": 1.7e-05, |
|
"loss": 15.1692, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.023346303501945526, |
|
"grad_norm": 20.82584571838379, |
|
"learning_rate": 1.8e-05, |
|
"loss": 15.8121, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02464332036316472, |
|
"grad_norm": 20.620403289794922, |
|
"learning_rate": 1.9e-05, |
|
"loss": 14.7579, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02594033722438392, |
|
"grad_norm": 38.9058952331543, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 13.4066, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.027237354085603113, |
|
"grad_norm": 24.992218017578125, |
|
"learning_rate": 2.1e-05, |
|
"loss": 12.6445, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.028534370946822308, |
|
"grad_norm": 29.613855361938477, |
|
"learning_rate": 2.2e-05, |
|
"loss": 11.2814, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.029831387808041506, |
|
"grad_norm": 24.93452262878418, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 9.3331, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0311284046692607, |
|
"grad_norm": 27.680971145629883, |
|
"learning_rate": 2.4e-05, |
|
"loss": 8.6512, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0324254215304799, |
|
"grad_norm": 29.876705169677734, |
|
"learning_rate": 2.5e-05, |
|
"loss": 7.8155, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03372243839169909, |
|
"grad_norm": 35.160499572753906, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 5.7938, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03501945525291829, |
|
"grad_norm": 27.901893615722656, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 4.1644, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03631647211413749, |
|
"grad_norm": 26.058135986328125, |
|
"learning_rate": 2.8e-05, |
|
"loss": 3.4339, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03761348897535668, |
|
"grad_norm": 21.99599838256836, |
|
"learning_rate": 2.9e-05, |
|
"loss": 2.2824, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.038910505836575876, |
|
"grad_norm": 25.322467803955078, |
|
"learning_rate": 3e-05, |
|
"loss": 1.5445, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.040207522697795074, |
|
"grad_norm": 16.150192260742188, |
|
"learning_rate": 2.9984895998119723e-05, |
|
"loss": 1.1335, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.041504539559014265, |
|
"grad_norm": 23.29557228088379, |
|
"learning_rate": 2.993961440992859e-05, |
|
"loss": 2.223, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.042801556420233464, |
|
"grad_norm": 11.53945541381836, |
|
"learning_rate": 2.9864246426519023e-05, |
|
"loss": 0.357, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04409857328145266, |
|
"grad_norm": 16.441926956176758, |
|
"learning_rate": 2.9758943828979444e-05, |
|
"loss": 0.4553, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04539559014267185, |
|
"grad_norm": 13.965824127197266, |
|
"learning_rate": 2.9623918682727355e-05, |
|
"loss": 0.8248, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04669260700389105, |
|
"grad_norm": 21.11853790283203, |
|
"learning_rate": 2.9459442910437798e-05, |
|
"loss": 1.1816, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04798962386511025, |
|
"grad_norm": 21.567947387695312, |
|
"learning_rate": 2.9265847744427305e-05, |
|
"loss": 2.031, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04928664072632944, |
|
"grad_norm": 18.425945281982422, |
|
"learning_rate": 2.904352305959606e-05, |
|
"loss": 0.9298, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05058365758754864, |
|
"grad_norm": 7.7157511711120605, |
|
"learning_rate": 2.8792916588271762e-05, |
|
"loss": 0.4712, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05188067444876784, |
|
"grad_norm": 40.98714065551758, |
|
"learning_rate": 2.8514533018536286e-05, |
|
"loss": 1.8018, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05317769130998703, |
|
"grad_norm": 36.11446762084961, |
|
"learning_rate": 2.820893297785107e-05, |
|
"loss": 1.6269, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.054474708171206226, |
|
"grad_norm": 40.24372482299805, |
|
"learning_rate": 2.7876731904027994e-05, |
|
"loss": 1.417, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.055771725032425425, |
|
"grad_norm": 37.61830520629883, |
|
"learning_rate": 2.7518598805819542e-05, |
|
"loss": 0.8011, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.057068741893644616, |
|
"grad_norm": 7.277573108673096, |
|
"learning_rate": 2.7135254915624213e-05, |
|
"loss": 0.1595, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.058365758754863814, |
|
"grad_norm": 7.73269510269165, |
|
"learning_rate": 2.672747223702045e-05, |
|
"loss": 0.2451, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05966277561608301, |
|
"grad_norm": 5.931721210479736, |
|
"learning_rate": 2.6296071990054167e-05, |
|
"loss": 0.086, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0609597924773022, |
|
"grad_norm": 18.46846580505371, |
|
"learning_rate": 2.5841922957410875e-05, |
|
"loss": 0.8838, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"grad_norm": 20.60508155822754, |
|
"learning_rate": 2.5365939734802973e-05, |
|
"loss": 2.3646, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06355382619974059, |
|
"grad_norm": 12.769781112670898, |
|
"learning_rate": 2.4869080889095693e-05, |
|
"loss": 1.2122, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0648508430609598, |
|
"grad_norm": 19.520858764648438, |
|
"learning_rate": 2.4352347027881003e-05, |
|
"loss": 1.8933, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0648508430609598, |
|
"eval_loss": 0.5612907409667969, |
|
"eval_runtime": 44.2478, |
|
"eval_samples_per_second": 7.345, |
|
"eval_steps_per_second": 1.853, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.66313582395392e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|