|
{ |
|
"best_metric": 0.8431416153907776, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 1.3157894736842106, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"grad_norm": 35.830352783203125, |
|
"learning_rate": 1e-05, |
|
"loss": 5.5742, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"eval_loss": 1.6754724979400635, |
|
"eval_runtime": 4.6369, |
|
"eval_samples_per_second": 13.802, |
|
"eval_steps_per_second": 3.451, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 45.00202560424805, |
|
"learning_rate": 2e-05, |
|
"loss": 6.0369, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 46.17910385131836, |
|
"learning_rate": 3e-05, |
|
"loss": 6.0642, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 49.760963439941406, |
|
"learning_rate": 4e-05, |
|
"loss": 5.5496, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.13157894736842105, |
|
"grad_norm": 39.75947189331055, |
|
"learning_rate": 5e-05, |
|
"loss": 5.168, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 41.92656707763672, |
|
"learning_rate": 6e-05, |
|
"loss": 4.7943, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18421052631578946, |
|
"grad_norm": 27.3580322265625, |
|
"learning_rate": 7e-05, |
|
"loss": 3.9595, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 34.04036331176758, |
|
"learning_rate": 8e-05, |
|
"loss": 4.0193, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.23684210526315788, |
|
"grad_norm": 39.38542556762695, |
|
"learning_rate": 9e-05, |
|
"loss": 4.7816, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 42.04862976074219, |
|
"learning_rate": 0.0001, |
|
"loss": 4.7192, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2894736842105263, |
|
"grad_norm": 16.514020919799805, |
|
"learning_rate": 9.997718922447667e-05, |
|
"loss": 3.6934, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 14.977937698364258, |
|
"learning_rate": 9.990877771116589e-05, |
|
"loss": 4.0051, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.34210526315789475, |
|
"grad_norm": 16.822614669799805, |
|
"learning_rate": 9.979482788085454e-05, |
|
"loss": 3.3175, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 17.509784698486328, |
|
"learning_rate": 9.96354437049027e-05, |
|
"loss": 3.4816, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.39473684210526316, |
|
"grad_norm": 15.930502891540527, |
|
"learning_rate": 9.943077061037671e-05, |
|
"loss": 3.2727, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 20.140819549560547, |
|
"learning_rate": 9.918099534735718e-05, |
|
"loss": 3.531, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.4473684210526316, |
|
"grad_norm": 21.51377296447754, |
|
"learning_rate": 9.888634581854234e-05, |
|
"loss": 3.2109, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 21.92951202392578, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 3.6691, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 10.309015274047852, |
|
"learning_rate": 9.816354005237583e-05, |
|
"loss": 3.3325, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 14.019783973693848, |
|
"learning_rate": 9.773604332542729e-05, |
|
"loss": 3.8225, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5526315789473685, |
|
"grad_norm": 12.386563301086426, |
|
"learning_rate": 9.726499075173201e-05, |
|
"loss": 3.4366, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 13.775650978088379, |
|
"learning_rate": 9.675081213427076e-05, |
|
"loss": 3.6102, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6052631578947368, |
|
"grad_norm": 13.588586807250977, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 3.7363, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 13.898175239562988, |
|
"learning_rate": 9.559499229960451e-05, |
|
"loss": 3.2825, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6578947368421053, |
|
"grad_norm": 15.590497970581055, |
|
"learning_rate": 9.495440568827129e-05, |
|
"loss": 3.2594, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 18.052392959594727, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 3.4811, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.7105263157894737, |
|
"grad_norm": 20.789844512939453, |
|
"learning_rate": 9.355080099977578e-05, |
|
"loss": 4.004, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 11.545269012451172, |
|
"learning_rate": 9.278906361507238e-05, |
|
"loss": 3.407, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.7631578947368421, |
|
"grad_norm": 11.368681907653809, |
|
"learning_rate": 9.19882841613699e-05, |
|
"loss": 3.212, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 12.940420150756836, |
|
"learning_rate": 9.114919329468282e-05, |
|
"loss": 3.4642, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8157894736842105, |
|
"grad_norm": 11.59272575378418, |
|
"learning_rate": 9.02725566275473e-05, |
|
"loss": 3.3124, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 17.267396926879883, |
|
"learning_rate": 8.935917403045251e-05, |
|
"loss": 3.5721, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.868421052631579, |
|
"grad_norm": 14.882741928100586, |
|
"learning_rate": 8.840987890201403e-05, |
|
"loss": 3.7928, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 14.032687187194824, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 3.2582, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.9210526315789473, |
|
"grad_norm": 13.775826454162598, |
|
"learning_rate": 8.640704769378942e-05, |
|
"loss": 3.4732, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 17.22196388244629, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 3.6359, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9736842105263158, |
|
"grad_norm": 14.44973373413086, |
|
"learning_rate": 8.427137111675199e-05, |
|
"loss": 3.5035, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 17.80911636352539, |
|
"learning_rate": 8.315613291203976e-05, |
|
"loss": 3.3937, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0263157894736843, |
|
"grad_norm": 8.416217803955078, |
|
"learning_rate": 8.201064202312441e-05, |
|
"loss": 2.8219, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.0526315789473684, |
|
"grad_norm": 8.224820137023926, |
|
"learning_rate": 8.083594363142717e-05, |
|
"loss": 1.9419, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0789473684210527, |
|
"grad_norm": 8.183276176452637, |
|
"learning_rate": 7.963310956820085e-05, |
|
"loss": 1.795, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.1052631578947367, |
|
"grad_norm": 8.23655891418457, |
|
"learning_rate": 7.840323733655778e-05, |
|
"loss": 1.6889, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.131578947368421, |
|
"grad_norm": 11.236390113830566, |
|
"learning_rate": 7.714744911007394e-05, |
|
"loss": 1.6847, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.1578947368421053, |
|
"grad_norm": 10.796011924743652, |
|
"learning_rate": 7.586689070888284e-05, |
|
"loss": 1.4455, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.1842105263157894, |
|
"grad_norm": 10.806239128112793, |
|
"learning_rate": 7.456273055419388e-05, |
|
"loss": 1.1321, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.2105263157894737, |
|
"grad_norm": 11.67683219909668, |
|
"learning_rate": 7.323615860218843e-05, |
|
"loss": 1.0502, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.236842105263158, |
|
"grad_norm": 15.044631958007812, |
|
"learning_rate": 7.188838525826702e-05, |
|
"loss": 1.189, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 13.156881332397461, |
|
"learning_rate": 7.052064027263786e-05, |
|
"loss": 2.1428, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.2894736842105263, |
|
"grad_norm": 11.645526885986328, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.6295, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"grad_norm": 10.376519203186035, |
|
"learning_rate": 6.773024435212678e-05, |
|
"loss": 1.609, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"eval_loss": 0.8431416153907776, |
|
"eval_runtime": 4.7683, |
|
"eval_samples_per_second": 13.422, |
|
"eval_steps_per_second": 3.355, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 114, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.123694981323162e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|