|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3220035778175313, |
|
"eval_steps": 5, |
|
"global_step": 45, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007155635062611807, |
|
"eval_loss": 8.003866195678711, |
|
"eval_runtime": 21.074, |
|
"eval_samples_per_second": 11.199, |
|
"eval_steps_per_second": 1.424, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02146690518783542, |
|
"grad_norm": 36.30503845214844, |
|
"learning_rate": 3e-05, |
|
"loss": 29.32, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03577817531305903, |
|
"eval_loss": 6.45150899887085, |
|
"eval_runtime": 21.281, |
|
"eval_samples_per_second": 11.09, |
|
"eval_steps_per_second": 1.41, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04293381037567084, |
|
"grad_norm": 59.01460647583008, |
|
"learning_rate": 6e-05, |
|
"loss": 29.5122, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06440071556350627, |
|
"grad_norm": 31.912750244140625, |
|
"learning_rate": 9e-05, |
|
"loss": 15.7048, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07155635062611806, |
|
"eval_loss": 1.4119746685028076, |
|
"eval_runtime": 21.3324, |
|
"eval_samples_per_second": 11.063, |
|
"eval_steps_per_second": 1.406, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08586762075134168, |
|
"grad_norm": 12.302176475524902, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 5.5122, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1073345259391771, |
|
"grad_norm": 7.404484272003174, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.4541, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1073345259391771, |
|
"eval_loss": 0.1575503647327423, |
|
"eval_runtime": 21.3407, |
|
"eval_samples_per_second": 11.059, |
|
"eval_steps_per_second": 1.406, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12880143112701253, |
|
"grad_norm": 6.458031177520752, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.5215, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.14311270125223613, |
|
"eval_loss": 0.0960574522614479, |
|
"eval_runtime": 21.3479, |
|
"eval_samples_per_second": 11.055, |
|
"eval_steps_per_second": 1.405, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15026833631484796, |
|
"grad_norm": 21.97161293029785, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 0.3563, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17173524150268335, |
|
"grad_norm": 2.182478666305542, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 0.2626, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.17889087656529518, |
|
"eval_loss": 0.041570138186216354, |
|
"eval_runtime": 21.352, |
|
"eval_samples_per_second": 11.053, |
|
"eval_steps_per_second": 1.405, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.19320214669051877, |
|
"grad_norm": 1.8692655563354492, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 0.1014, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2146690518783542, |
|
"grad_norm": 10.44522762298584, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4899, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2146690518783542, |
|
"eval_loss": 0.021523064002394676, |
|
"eval_runtime": 21.3288, |
|
"eval_samples_per_second": 11.065, |
|
"eval_steps_per_second": 1.407, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23613595706618962, |
|
"grad_norm": 1.440638780593872, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 0.0295, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2504472271914132, |
|
"eval_loss": 0.017327968031167984, |
|
"eval_runtime": 21.3349, |
|
"eval_samples_per_second": 11.062, |
|
"eval_steps_per_second": 1.406, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.25760286225402507, |
|
"grad_norm": 0.0844140574336052, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 0.0055, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.27906976744186046, |
|
"grad_norm": 0.12384696304798126, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 0.0257, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.28622540250447226, |
|
"eval_loss": 0.014691304415464401, |
|
"eval_runtime": 21.3348, |
|
"eval_samples_per_second": 11.062, |
|
"eval_steps_per_second": 1.406, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3005366726296959, |
|
"grad_norm": 13.742753982543945, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.3929, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3220035778175313, |
|
"grad_norm": 3.5590667724609375, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0585, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3220035778175313, |
|
"eval_loss": 0.015118077397346497, |
|
"eval_runtime": 21.333, |
|
"eval_samples_per_second": 11.063, |
|
"eval_steps_per_second": 1.406, |
|
"step": 45 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.987288966234112e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|