|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.015281757402101241, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00015281757402101241, |
|
"eval_loss": 1.1498295068740845, |
|
"eval_runtime": 296.5009, |
|
"eval_samples_per_second": 37.17, |
|
"eval_steps_per_second": 4.648, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00045845272206303727, |
|
"grad_norm": 0.5009817481040955, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0517, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0009169054441260745, |
|
"grad_norm": 0.5765618681907654, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2558, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0013753581661891117, |
|
"grad_norm": 0.680789589881897, |
|
"learning_rate": 9e-05, |
|
"loss": 1.1772, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0013753581661891117, |
|
"eval_loss": 1.0687979459762573, |
|
"eval_runtime": 296.5136, |
|
"eval_samples_per_second": 37.169, |
|
"eval_steps_per_second": 4.647, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.001833810888252149, |
|
"grad_norm": 0.6807825565338135, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.8785, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002292263610315186, |
|
"grad_norm": 0.6680711507797241, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.6678, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0027507163323782233, |
|
"grad_norm": 0.5782425999641418, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.6998, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0027507163323782233, |
|
"eval_loss": 0.45378875732421875, |
|
"eval_runtime": 297.1012, |
|
"eval_samples_per_second": 37.095, |
|
"eval_steps_per_second": 4.638, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.003209169054441261, |
|
"grad_norm": 0.2907300889492035, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.3854, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003667621776504298, |
|
"grad_norm": 0.39028528332710266, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.4963, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004126074498567335, |
|
"grad_norm": 0.2929573059082031, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.2358, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004126074498567335, |
|
"eval_loss": 0.32671964168548584, |
|
"eval_runtime": 297.1526, |
|
"eval_samples_per_second": 37.089, |
|
"eval_steps_per_second": 4.637, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004584527220630372, |
|
"grad_norm": 0.43915438652038574, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.4901, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00504297994269341, |
|
"grad_norm": 0.25669264793395996, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.3356, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005501432664756447, |
|
"grad_norm": 0.24750342965126038, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.2031, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005501432664756447, |
|
"eval_loss": 0.3014591932296753, |
|
"eval_runtime": 297.1231, |
|
"eval_samples_per_second": 37.092, |
|
"eval_steps_per_second": 4.638, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005959885386819484, |
|
"grad_norm": 0.33709627389907837, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.3068, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006418338108882522, |
|
"grad_norm": 0.18148460984230042, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.1992, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006876790830945559, |
|
"grad_norm": 0.38474711775779724, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.4824, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.006876790830945559, |
|
"eval_loss": 0.2919241786003113, |
|
"eval_runtime": 297.3015, |
|
"eval_samples_per_second": 37.07, |
|
"eval_steps_per_second": 4.635, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.007335243553008596, |
|
"grad_norm": 0.12017693370580673, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.1679, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007793696275071633, |
|
"grad_norm": 0.22833120822906494, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 0.2791, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.00825214899713467, |
|
"grad_norm": 0.12489762902259827, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.1671, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.00825214899713467, |
|
"eval_loss": 0.28554975986480713, |
|
"eval_runtime": 296.7155, |
|
"eval_samples_per_second": 37.143, |
|
"eval_steps_per_second": 4.644, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.008710601719197708, |
|
"grad_norm": 0.19015756249427795, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 0.3258, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.009169054441260744, |
|
"grad_norm": 0.1945570856332779, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.3784, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.009627507163323782, |
|
"grad_norm": 0.2393001914024353, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.3064, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.009627507163323782, |
|
"eval_loss": 0.2829715311527252, |
|
"eval_runtime": 296.4782, |
|
"eval_samples_per_second": 37.173, |
|
"eval_steps_per_second": 4.648, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.01008595988538682, |
|
"grad_norm": 0.2453099638223648, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.4036, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.010544412607449857, |
|
"grad_norm": 0.2992512881755829, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.24, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.011002865329512893, |
|
"grad_norm": 0.21949434280395508, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.3146, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.011002865329512893, |
|
"eval_loss": 0.2810784876346588, |
|
"eval_runtime": 296.6441, |
|
"eval_samples_per_second": 37.152, |
|
"eval_steps_per_second": 4.645, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.011461318051575931, |
|
"grad_norm": 0.21194490790367126, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.3362, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.011919770773638969, |
|
"grad_norm": 0.25095176696777344, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.262, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.012378223495702006, |
|
"grad_norm": 0.11870462447404861, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.1055, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.012378223495702006, |
|
"eval_loss": 0.2801515460014343, |
|
"eval_runtime": 296.9407, |
|
"eval_samples_per_second": 37.115, |
|
"eval_steps_per_second": 4.641, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.012836676217765044, |
|
"grad_norm": 0.16880249977111816, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.192, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.01329512893982808, |
|
"grad_norm": 0.30556225776672363, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.2727, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.013753581661891117, |
|
"grad_norm": 0.23559348285198212, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.2933, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.013753581661891117, |
|
"eval_loss": 0.2797732949256897, |
|
"eval_runtime": 296.9853, |
|
"eval_samples_per_second": 37.11, |
|
"eval_steps_per_second": 4.64, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.014212034383954155, |
|
"grad_norm": 0.24415625631809235, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.3288, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.014670487106017193, |
|
"grad_norm": 0.09298162907361984, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.0904, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.015128939828080229, |
|
"grad_norm": 0.007296608295291662, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 0.1468, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.015128939828080229, |
|
"eval_loss": 0.2796213924884796, |
|
"eval_runtime": 296.5122, |
|
"eval_samples_per_second": 37.169, |
|
"eval_steps_per_second": 4.647, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.20235311726592e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|