|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09250693802035152, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018501387604070306, |
|
"eval_loss": 3.789647102355957, |
|
"eval_runtime": 14.6466, |
|
"eval_samples_per_second": 15.567, |
|
"eval_steps_per_second": 7.783, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009250693802035153, |
|
"grad_norm": 3.147674798965454, |
|
"learning_rate": 5e-05, |
|
"loss": 3.5633, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018501387604070305, |
|
"grad_norm": 4.135693550109863, |
|
"learning_rate": 0.0001, |
|
"loss": 3.0784, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.018501387604070305, |
|
"eval_loss": 2.5756938457489014, |
|
"eval_runtime": 14.7893, |
|
"eval_samples_per_second": 15.417, |
|
"eval_steps_per_second": 7.708, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027752081406105456, |
|
"grad_norm": 2.7247848510742188, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.1231, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03700277520814061, |
|
"grad_norm": 2.6493232250213623, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.6803, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03700277520814061, |
|
"eval_loss": 1.6247913837432861, |
|
"eval_runtime": 14.9738, |
|
"eval_samples_per_second": 15.227, |
|
"eval_steps_per_second": 7.613, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04625346901017576, |
|
"grad_norm": 2.0250251293182373, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.6013, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05550416281221091, |
|
"grad_norm": 2.2003724575042725, |
|
"learning_rate": 5e-05, |
|
"loss": 1.415, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05550416281221091, |
|
"eval_loss": 1.2630630731582642, |
|
"eval_runtime": 15.013, |
|
"eval_samples_per_second": 15.187, |
|
"eval_steps_per_second": 7.593, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06475485661424607, |
|
"grad_norm": 2.0022053718566895, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.0455, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07400555041628122, |
|
"grad_norm": 2.3407399654388428, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.3008, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07400555041628122, |
|
"eval_loss": 1.1422836780548096, |
|
"eval_runtime": 15.0272, |
|
"eval_samples_per_second": 15.172, |
|
"eval_steps_per_second": 7.586, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08325624421831637, |
|
"grad_norm": 1.9095128774642944, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.0084, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09250693802035152, |
|
"grad_norm": 2.2135424613952637, |
|
"learning_rate": 0.0, |
|
"loss": 1.2982, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09250693802035152, |
|
"eval_loss": 1.123547077178955, |
|
"eval_runtime": 14.9935, |
|
"eval_samples_per_second": 15.207, |
|
"eval_steps_per_second": 7.603, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8200486649856000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|