|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.014712643678160919, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 7.35632183908046e-05, |
|
"eval_loss": 2.3851115703582764, |
|
"eval_runtime": 592.4759, |
|
"eval_samples_per_second": 9.661, |
|
"eval_steps_per_second": 4.831, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000367816091954023, |
|
"grad_norm": 3.2054150104522705, |
|
"learning_rate": 5e-05, |
|
"loss": 2.3289, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.000735632183908046, |
|
"grad_norm": 2.5026473999023438, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0213, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001103448275862069, |
|
"grad_norm": 1.4173187017440796, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.7658, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.001471264367816092, |
|
"grad_norm": 1.2474634647369385, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 1.5871, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0018390804597701149, |
|
"grad_norm": 1.3965306282043457, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.5578, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.002206896551724138, |
|
"grad_norm": 1.4394673109054565, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.5593, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.002574712643678161, |
|
"grad_norm": 1.296766757965088, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 1.447, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.002942528735632184, |
|
"grad_norm": 1.230973243713379, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.641, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.003310344827586207, |
|
"grad_norm": 1.3208956718444824, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 1.6261, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0036781609195402297, |
|
"grad_norm": 1.1837828159332275, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.6179, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0036781609195402297, |
|
"eval_loss": 1.5870054960250854, |
|
"eval_runtime": 594.4969, |
|
"eval_samples_per_second": 9.628, |
|
"eval_steps_per_second": 4.814, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.004045977011494253, |
|
"grad_norm": 1.1053797006607056, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 1.6441, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.004413793103448276, |
|
"grad_norm": 1.1771963834762573, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 1.5023, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0047816091954022985, |
|
"grad_norm": 0.9694396257400513, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 1.684, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.005149425287356322, |
|
"grad_norm": 1.202872395515442, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 1.6616, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.005517241379310344, |
|
"grad_norm": 1.0650283098220825, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 1.5284, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.005885057471264368, |
|
"grad_norm": 1.0592875480651855, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 1.6002, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.006252873563218391, |
|
"grad_norm": 1.0368422269821167, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 1.5468, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.006620689655172414, |
|
"grad_norm": 1.0760207176208496, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 1.5335, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.006988505747126437, |
|
"grad_norm": 1.0518993139266968, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 1.5375, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0073563218390804595, |
|
"grad_norm": 0.99644535779953, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 1.57, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0073563218390804595, |
|
"eval_loss": 1.5718929767608643, |
|
"eval_runtime": 594.3131, |
|
"eval_samples_per_second": 9.631, |
|
"eval_steps_per_second": 4.816, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.007724137931034483, |
|
"grad_norm": 0.9961000084877014, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5176, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.008091954022988505, |
|
"grad_norm": 0.9916229844093323, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 1.5182, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.00845977011494253, |
|
"grad_norm": 1.0973960161209106, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 1.7128, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.008827586206896552, |
|
"grad_norm": 0.992687463760376, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 1.6312, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.009195402298850575, |
|
"grad_norm": 1.0211665630340576, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 1.6858, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.009563218390804597, |
|
"grad_norm": 1.02646005153656, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 1.4403, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.009931034482758621, |
|
"grad_norm": 1.0905909538269043, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 1.4371, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.010298850574712644, |
|
"grad_norm": 1.1285451650619507, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 1.5096, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 1.1632394790649414, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 1.4889, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.011034482758620689, |
|
"grad_norm": 1.2418015003204346, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 1.7854, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.011034482758620689, |
|
"eval_loss": 1.5659912824630737, |
|
"eval_runtime": 594.3479, |
|
"eval_samples_per_second": 9.631, |
|
"eval_steps_per_second": 4.815, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.011402298850574713, |
|
"grad_norm": 1.043492317199707, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 1.6572, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.011770114942528736, |
|
"grad_norm": 0.958644688129425, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 1.6572, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.012137931034482758, |
|
"grad_norm": 0.9402018785476685, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 1.5295, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.012505747126436782, |
|
"grad_norm": 1.0198190212249756, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 1.532, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.012873563218390805, |
|
"grad_norm": 0.9930036067962646, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 1.5778, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.013241379310344827, |
|
"grad_norm": 0.9383140802383423, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 1.4909, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.01360919540229885, |
|
"grad_norm": 1.0298653841018677, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 1.5713, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.013977011494252874, |
|
"grad_norm": 0.9578173160552979, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 1.4889, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.014344827586206896, |
|
"grad_norm": 1.009029507637024, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 1.5112, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.014712643678160919, |
|
"grad_norm": 1.2505810260772705, |
|
"learning_rate": 0.0, |
|
"loss": 1.7079, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.014712643678160919, |
|
"eval_loss": 1.564515233039856, |
|
"eval_runtime": 594.4025, |
|
"eval_samples_per_second": 9.63, |
|
"eval_steps_per_second": 4.815, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.4487308312576e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|