|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.007435865658693766, |
|
"eval_steps": 5, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00024786218862312553, |
|
"eval_loss": 2.09014892578125, |
|
"eval_runtime": 305.3412, |
|
"eval_samples_per_second": 5.564, |
|
"eval_steps_per_second": 2.784, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007435865658693766, |
|
"grad_norm": 1.1143418550491333, |
|
"learning_rate": 4e-05, |
|
"loss": 1.8924, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0012393109431156277, |
|
"eval_loss": 1.9998418092727661, |
|
"eval_runtime": 304.6612, |
|
"eval_samples_per_second": 5.577, |
|
"eval_steps_per_second": 2.79, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0014871731317387532, |
|
"grad_norm": 1.0754435062408447, |
|
"learning_rate": 8e-05, |
|
"loss": 1.5918, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00223075969760813, |
|
"grad_norm": 1.3846229314804077, |
|
"learning_rate": 0.00012, |
|
"loss": 1.893, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0024786218862312553, |
|
"eval_loss": 1.2593779563903809, |
|
"eval_runtime": 306.6124, |
|
"eval_samples_per_second": 5.541, |
|
"eval_steps_per_second": 2.772, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0029743462634775064, |
|
"grad_norm": 1.2388172149658203, |
|
"learning_rate": 0.00016, |
|
"loss": 1.3159, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003717932829346883, |
|
"grad_norm": 0.8172288537025452, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8568, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.003717932829346883, |
|
"eval_loss": 0.7768763899803162, |
|
"eval_runtime": 304.5869, |
|
"eval_samples_per_second": 5.578, |
|
"eval_steps_per_second": 2.791, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00446151939521626, |
|
"grad_norm": 0.7465487122535706, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.6662, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.004957243772462511, |
|
"eval_loss": 0.6128036975860596, |
|
"eval_runtime": 297.1192, |
|
"eval_samples_per_second": 5.718, |
|
"eval_steps_per_second": 2.861, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005205105961085636, |
|
"grad_norm": 1.5202752351760864, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.6669, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.005948692526955013, |
|
"grad_norm": 0.6265591979026794, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.5783, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006196554715578138, |
|
"eval_loss": 0.5611212253570557, |
|
"eval_runtime": 146.0056, |
|
"eval_samples_per_second": 11.637, |
|
"eval_steps_per_second": 5.822, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006692279092824389, |
|
"grad_norm": 0.6711304783821106, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.5688, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.007435865658693766, |
|
"grad_norm": 0.5014675855636597, |
|
"learning_rate": 0.0, |
|
"loss": 0.5148, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.007435865658693766, |
|
"eval_loss": 0.5482997298240662, |
|
"eval_runtime": 145.7456, |
|
"eval_samples_per_second": 11.657, |
|
"eval_steps_per_second": 5.832, |
|
"step": 30 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 30, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 15, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.016432204709888e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|