|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3436426116838488, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003436426116838488, |
|
"eval_loss": 1.0753878355026245, |
|
"eval_runtime": 70.8712, |
|
"eval_samples_per_second": 6.914, |
|
"eval_steps_per_second": 0.875, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010309278350515464, |
|
"grad_norm": 9.508790016174316, |
|
"learning_rate": 3e-05, |
|
"loss": 4.2728, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020618556701030927, |
|
"grad_norm": 4.23081111907959, |
|
"learning_rate": 6e-05, |
|
"loss": 4.2308, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.030927835051546393, |
|
"grad_norm": 2.994974136352539, |
|
"learning_rate": 9e-05, |
|
"loss": 4.0505, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.030927835051546393, |
|
"eval_loss": 0.9270894527435303, |
|
"eval_runtime": 72.0129, |
|
"eval_samples_per_second": 6.804, |
|
"eval_steps_per_second": 0.861, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.041237113402061855, |
|
"grad_norm": 3.4861974716186523, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 3.3622, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.05154639175257732, |
|
"grad_norm": 2.4739320278167725, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 3.4222, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.061855670103092786, |
|
"grad_norm": 2.656764507293701, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 3.3192, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.061855670103092786, |
|
"eval_loss": 0.781949520111084, |
|
"eval_runtime": 72.0068, |
|
"eval_samples_per_second": 6.805, |
|
"eval_steps_per_second": 0.861, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07216494845360824, |
|
"grad_norm": 2.133312702178955, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.9592, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 2.0654103755950928, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 3.2154, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09278350515463918, |
|
"grad_norm": 1.8565651178359985, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.9275, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.09278350515463918, |
|
"eval_loss": 0.7440634965896606, |
|
"eval_runtime": 72.0175, |
|
"eval_samples_per_second": 6.804, |
|
"eval_steps_per_second": 0.861, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10309278350515463, |
|
"grad_norm": 2.0615859031677246, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.9618, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1134020618556701, |
|
"grad_norm": 2.0000863075256348, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 3.0231, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"grad_norm": 1.8480544090270996, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.8036, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"eval_loss": 0.727074146270752, |
|
"eval_runtime": 72.0101, |
|
"eval_samples_per_second": 6.805, |
|
"eval_steps_per_second": 0.861, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.13402061855670103, |
|
"grad_norm": 2.198150634765625, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.795, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14432989690721648, |
|
"grad_norm": 1.8142603635787964, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.7169, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.15463917525773196, |
|
"grad_norm": 1.9104666709899902, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.8454, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.15463917525773196, |
|
"eval_loss": 0.720156729221344, |
|
"eval_runtime": 72.0724, |
|
"eval_samples_per_second": 6.799, |
|
"eval_steps_per_second": 0.86, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 3.0694377422332764, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 3.0856, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.17525773195876287, |
|
"grad_norm": 1.9787322282791138, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 2.83, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.18556701030927836, |
|
"grad_norm": 1.9755208492279053, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.765, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.18556701030927836, |
|
"eval_loss": 0.7139042615890503, |
|
"eval_runtime": 72.0348, |
|
"eval_samples_per_second": 6.802, |
|
"eval_steps_per_second": 0.861, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.1958762886597938, |
|
"grad_norm": 1.9179588556289673, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 2.9697, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.20618556701030927, |
|
"grad_norm": 2.107278823852539, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 3.0271, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21649484536082475, |
|
"grad_norm": 1.985304832458496, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 2.799, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.21649484536082475, |
|
"eval_loss": 0.7117459177970886, |
|
"eval_runtime": 71.9674, |
|
"eval_samples_per_second": 6.809, |
|
"eval_steps_per_second": 0.862, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2268041237113402, |
|
"grad_norm": 2.0945565700531006, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.8955, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.23711340206185566, |
|
"grad_norm": 2.2720227241516113, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 2.9467, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.24742268041237114, |
|
"grad_norm": 1.7193487882614136, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.9671, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.24742268041237114, |
|
"eval_loss": 0.7079923748970032, |
|
"eval_runtime": 72.1273, |
|
"eval_samples_per_second": 6.794, |
|
"eval_steps_per_second": 0.86, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.25773195876288657, |
|
"grad_norm": 1.9435456991195679, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 3.1961, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.26804123711340205, |
|
"grad_norm": 3.1002867221832275, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 3.0302, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.27835051546391754, |
|
"grad_norm": 1.6273797750473022, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 2.8564, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.27835051546391754, |
|
"eval_loss": 0.7072886228561401, |
|
"eval_runtime": 72.0658, |
|
"eval_samples_per_second": 6.799, |
|
"eval_steps_per_second": 0.86, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.28865979381443296, |
|
"grad_norm": 1.9313831329345703, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.6863, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.29896907216494845, |
|
"grad_norm": 1.7214744091033936, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 2.9589, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.30927835051546393, |
|
"grad_norm": 1.789853811264038, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 3.0606, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.30927835051546393, |
|
"eval_loss": 0.7060263752937317, |
|
"eval_runtime": 72.1222, |
|
"eval_samples_per_second": 6.794, |
|
"eval_steps_per_second": 0.86, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.31958762886597936, |
|
"grad_norm": 1.5414116382598877, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 2.7664, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.32989690721649484, |
|
"grad_norm": 1.7637780904769897, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.8141, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3402061855670103, |
|
"grad_norm": 1.83016836643219, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 2.8253, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.3402061855670103, |
|
"eval_loss": 0.7058959603309631, |
|
"eval_runtime": 71.8781, |
|
"eval_samples_per_second": 6.817, |
|
"eval_steps_per_second": 0.863, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.5224119259693056e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|