|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.004218697266284171, |
|
"eval_steps": 9, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016874789065136686, |
|
"grad_norm": 0.6673353910446167, |
|
"learning_rate": 1e-05, |
|
"loss": 1.1828, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016874789065136686, |
|
"eval_loss": 1.133446216583252, |
|
"eval_runtime": 670.9071, |
|
"eval_samples_per_second": 7.438, |
|
"eval_steps_per_second": 0.93, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003374957813027337, |
|
"grad_norm": 0.6421517133712769, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1352, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005062436719541006, |
|
"grad_norm": 0.8541758060455322, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9903, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006749915626054674, |
|
"grad_norm": 0.4865100383758545, |
|
"learning_rate": 4e-05, |
|
"loss": 1.1104, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0008437394532568343, |
|
"grad_norm": 0.5594805479049683, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9953, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0010124873439082012, |
|
"grad_norm": 0.5936105251312256, |
|
"learning_rate": 6e-05, |
|
"loss": 1.019, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001181235234559568, |
|
"grad_norm": 0.7241169214248657, |
|
"learning_rate": 7e-05, |
|
"loss": 1.0544, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0013499831252109348, |
|
"grad_norm": 0.8012577295303345, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0349, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0015187310158623017, |
|
"grad_norm": 0.6014957427978516, |
|
"learning_rate": 9e-05, |
|
"loss": 0.8921, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0015187310158623017, |
|
"eval_loss": 0.9576449990272522, |
|
"eval_runtime": 672.5267, |
|
"eval_samples_per_second": 7.42, |
|
"eval_steps_per_second": 0.928, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016874789065136687, |
|
"grad_norm": 0.3161397874355316, |
|
"learning_rate": 0.0001, |
|
"loss": 1.0526, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0018562267971650355, |
|
"grad_norm": 0.3254038393497467, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 0.9626, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0020249746878164025, |
|
"grad_norm": 0.29402822256088257, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.8483, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0021937225784677693, |
|
"grad_norm": 0.3435806930065155, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.9679, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.002362470469119136, |
|
"grad_norm": 0.35293349623680115, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.9465, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002531218359770503, |
|
"grad_norm": 0.3628636598587036, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.832, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0026999662504218697, |
|
"grad_norm": 0.354307621717453, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.7619, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0028687141410732365, |
|
"grad_norm": 0.40063461661338806, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 0.8526, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0030374620317246033, |
|
"grad_norm": 0.3604790270328522, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.8343, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0030374620317246033, |
|
"eval_loss": 0.8436176180839539, |
|
"eval_runtime": 672.8891, |
|
"eval_samples_per_second": 7.416, |
|
"eval_steps_per_second": 0.927, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00320620992237597, |
|
"grad_norm": 0.32435283064842224, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.8905, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0033749578130273373, |
|
"grad_norm": 0.29493603110313416, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.8299, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.003543705703678704, |
|
"grad_norm": 0.28454816341400146, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.8245, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003712453594330071, |
|
"grad_norm": 0.30943214893341064, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.8342, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0038812014849814377, |
|
"grad_norm": 0.2970975637435913, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 0.854, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.004049949375632805, |
|
"grad_norm": 0.32714584469795227, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.8385, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004218697266284171, |
|
"grad_norm": 0.30870357155799866, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.715, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.049391762341888e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|