|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01278118609406953, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002556237218813906, |
|
"eval_loss": 20.340166091918945, |
|
"eval_runtime": 12.1761, |
|
"eval_samples_per_second": 135.348, |
|
"eval_steps_per_second": 67.674, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001278118609406953, |
|
"grad_norm": 806.2634887695312, |
|
"learning_rate": 5e-05, |
|
"loss": 85.3687, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002556237218813906, |
|
"grad_norm": 614.0523681640625, |
|
"learning_rate": 0.0001, |
|
"loss": 82.5967, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002556237218813906, |
|
"eval_loss": 20.30628776550293, |
|
"eval_runtime": 11.5713, |
|
"eval_samples_per_second": 142.422, |
|
"eval_steps_per_second": 71.211, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003834355828220859, |
|
"grad_norm": 721.6196899414062, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 74.5537, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005112474437627812, |
|
"grad_norm": 770.1486206054688, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 93.181, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005112474437627812, |
|
"eval_loss": 20.2139949798584, |
|
"eval_runtime": 11.7008, |
|
"eval_samples_per_second": 140.846, |
|
"eval_steps_per_second": 70.423, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.006390593047034765, |
|
"grad_norm": 868.2890625, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 77.0789, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007668711656441718, |
|
"grad_norm": 654.0535888671875, |
|
"learning_rate": 5e-05, |
|
"loss": 79.6657, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.007668711656441718, |
|
"eval_loss": 20.094011306762695, |
|
"eval_runtime": 11.6588, |
|
"eval_samples_per_second": 141.352, |
|
"eval_steps_per_second": 70.676, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00894683026584867, |
|
"grad_norm": 1161.23974609375, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 89.0292, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.010224948875255624, |
|
"grad_norm": 1144.7630615234375, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 86.0579, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.010224948875255624, |
|
"eval_loss": 20.039480209350586, |
|
"eval_runtime": 11.7921, |
|
"eval_samples_per_second": 139.755, |
|
"eval_steps_per_second": 69.877, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.011503067484662576, |
|
"grad_norm": 621.7279663085938, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 73.1134, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01278118609406953, |
|
"grad_norm": 887.598388671875, |
|
"learning_rate": 0.0, |
|
"loss": 69.3403, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01278118609406953, |
|
"eval_loss": 20.029600143432617, |
|
"eval_runtime": 11.5521, |
|
"eval_samples_per_second": 142.658, |
|
"eval_steps_per_second": 71.329, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 55374879129600.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|