|
{ |
|
"best_metric": 0.4959838390350342, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-20", |
|
"epoch": 0.008487163165711862, |
|
"eval_steps": 5, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000424358158285593, |
|
"grad_norm": 0.7751368284225464, |
|
"learning_rate": 2e-05, |
|
"loss": 0.7689, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000424358158285593, |
|
"eval_loss": 0.8173423409461975, |
|
"eval_runtime": 185.8366, |
|
"eval_samples_per_second": 5.343, |
|
"eval_steps_per_second": 2.674, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000848716316571186, |
|
"grad_norm": 1.2337510585784912, |
|
"learning_rate": 4e-05, |
|
"loss": 0.849, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001273074474856779, |
|
"grad_norm": 0.8578928709030151, |
|
"learning_rate": 6e-05, |
|
"loss": 0.9114, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001697432633142372, |
|
"grad_norm": 0.6766877174377441, |
|
"learning_rate": 8e-05, |
|
"loss": 0.7335, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0021217907914279654, |
|
"grad_norm": 0.7290933728218079, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6751, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0021217907914279654, |
|
"eval_loss": 0.7813173532485962, |
|
"eval_runtime": 183.9072, |
|
"eval_samples_per_second": 5.399, |
|
"eval_steps_per_second": 2.702, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002546148949713558, |
|
"grad_norm": 0.830825924873352, |
|
"learning_rate": 0.00012, |
|
"loss": 0.798, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002970507107999151, |
|
"grad_norm": 0.8948835134506226, |
|
"learning_rate": 0.00014, |
|
"loss": 0.7795, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003394865266284744, |
|
"grad_norm": 0.8340906500816345, |
|
"learning_rate": 0.00016, |
|
"loss": 0.7168, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0038192234245703373, |
|
"grad_norm": 0.9142249822616577, |
|
"learning_rate": 0.00018, |
|
"loss": 0.6675, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004243581582855931, |
|
"grad_norm": 1.0526764392852783, |
|
"learning_rate": 0.0002, |
|
"loss": 0.6064, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004243581582855931, |
|
"eval_loss": 0.6158913373947144, |
|
"eval_runtime": 183.7572, |
|
"eval_samples_per_second": 5.404, |
|
"eval_steps_per_second": 2.705, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0046679397411415234, |
|
"grad_norm": 0.9812945127487183, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 0.5798, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005092297899427116, |
|
"grad_norm": 0.9407110810279846, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 0.5111, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00551665605771271, |
|
"grad_norm": 0.9945716857910156, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.4749, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.005941014215998302, |
|
"grad_norm": 1.3098291158676147, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 0.5797, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.006365372374283896, |
|
"grad_norm": 0.945827841758728, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.4763, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.006365372374283896, |
|
"eval_loss": 0.5205870866775513, |
|
"eval_runtime": 183.9724, |
|
"eval_samples_per_second": 5.398, |
|
"eval_steps_per_second": 2.701, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.006789730532569488, |
|
"grad_norm": 1.1491625308990479, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.6383, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.007214088690855082, |
|
"grad_norm": 0.8612709641456604, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.4815, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0076384468491406746, |
|
"grad_norm": 0.8541699051856995, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 0.4945, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.008062805007426267, |
|
"grad_norm": 1.076985239982605, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.5025, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.008487163165711862, |
|
"grad_norm": 0.7121065855026245, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.4774, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.008487163165711862, |
|
"eval_loss": 0.4959838390350342, |
|
"eval_runtime": 185.1092, |
|
"eval_samples_per_second": 5.364, |
|
"eval_steps_per_second": 2.685, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6704670399528960.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|