|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.010989010989011, |
|
"eval_steps": 18, |
|
"global_step": 69, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.014652014652014652, |
|
"eval_loss": 1.734892725944519, |
|
"eval_runtime": 1.6866, |
|
"eval_samples_per_second": 17.195, |
|
"eval_steps_per_second": 8.894, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07326007326007326, |
|
"grad_norm": 0.23976674675941467, |
|
"learning_rate": 5e-05, |
|
"loss": 1.7015, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14652014652014653, |
|
"grad_norm": 0.25410085916519165, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6799, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 0.42443525791168213, |
|
"learning_rate": 9.82383934407258e-05, |
|
"loss": 1.6018, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.26373626373626374, |
|
"eval_loss": 1.4797354936599731, |
|
"eval_runtime": 1.6858, |
|
"eval_samples_per_second": 17.202, |
|
"eval_steps_per_second": 8.898, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.29304029304029305, |
|
"grad_norm": 0.3112971782684326, |
|
"learning_rate": 9.30777040696903e-05, |
|
"loss": 1.5036, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3663003663003663, |
|
"grad_norm": 0.26191750168800354, |
|
"learning_rate": 8.488157605674925e-05, |
|
"loss": 1.4147, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 0.2886798083782196, |
|
"learning_rate": 7.422754351663252e-05, |
|
"loss": 1.3807, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 0.24221237003803253, |
|
"learning_rate": 6.186633499355576e-05, |
|
"loss": 1.3328, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5274725274725275, |
|
"eval_loss": 1.3494279384613037, |
|
"eval_runtime": 1.6879, |
|
"eval_samples_per_second": 17.181, |
|
"eval_steps_per_second": 8.887, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5860805860805861, |
|
"grad_norm": 0.2196742296218872, |
|
"learning_rate": 4.866897392811126e-05, |
|
"loss": 1.399, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 0.23982864618301392, |
|
"learning_rate": 3.556540263301896e-05, |
|
"loss": 1.3184, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7326007326007326, |
|
"grad_norm": 0.22501102089881897, |
|
"learning_rate": 2.347895459401288e-05, |
|
"loss": 1.2391, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7912087912087912, |
|
"eval_loss": 1.2975658178329468, |
|
"eval_runtime": 1.6936, |
|
"eval_samples_per_second": 17.124, |
|
"eval_steps_per_second": 8.857, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.8058608058608059, |
|
"grad_norm": 0.17278903722763062, |
|
"learning_rate": 1.3261292456846647e-05, |
|
"loss": 1.3108, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8791208791208791, |
|
"grad_norm": 0.17497693002223969, |
|
"learning_rate": 5.6323962471714286e-06, |
|
"loss": 1.3163, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 0.23885966837406158, |
|
"learning_rate": 1.1298305091066664e-06, |
|
"loss": 1.292, |
|
"step": 65 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 69, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0986773236678656e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|