|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0101010101010102, |
|
"eval_steps": 19, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013468013468013467, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013468013468013467, |
|
"eval_loss": NaN, |
|
"eval_runtime": 150.507, |
|
"eval_samples_per_second": 0.831, |
|
"eval_steps_per_second": 0.419, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026936026936026935, |
|
"grad_norm": NaN, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04040404040404041, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05387205387205387, |
|
"grad_norm": NaN, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06734006734006734, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.08080808080808081, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09427609427609428, |
|
"grad_norm": NaN, |
|
"learning_rate": 7e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10774410774410774, |
|
"grad_norm": NaN, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13468013468013468, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.994161134161634e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.16161616161616163, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.976658173588244e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1750841750841751, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.947531997255256e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.18855218855218855, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.906850630697068e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.20202020202020202, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21548821548821548, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.791229145545831e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.22895622895622897, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.716559066288715e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.630873244788883e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2558922558922559, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.534371804252728e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2558922558922559, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.1562, |
|
"eval_samples_per_second": 30.076, |
|
"eval_steps_per_second": 15.158, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.26936026936026936, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2828282828282828, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.309848334400246e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.182350690051133e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.30976430976430974, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.32323232323232326, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.898371770316111e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3367003367003367, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3501683501683502, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.577994803720606e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.405079293933986e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3771043771043771, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.224211063680853e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.39057239057239057, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.035812539093557e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.40404040404040403, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.840323733655778e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4175084175084175, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.638201220530665e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.43097643097643096, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.42991706621303e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.215957727996207e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.45791245791245794, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.996822917828477e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4713804713804714, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.773024435212678e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4983164983164983, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.313536890992935e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5117845117845118, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.078920983839031e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5117845117845118, |
|
"eval_loss": NaN, |
|
"eval_runtime": 4.1693, |
|
"eval_samples_per_second": 29.981, |
|
"eval_steps_per_second": 15.11, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5252525252525253, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.841785206735192e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5387205387205387, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.602683401276615e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5521885521885522, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.3621740008088126e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5656565656565656, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.1208187261806615e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5791245791245792, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.87918127381934e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6378259991911886e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.397316598723385e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6195286195286195, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.1582147932648074e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.632996632996633, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.92107901616097e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6464646464646465, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6864631090070655e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6599326599326599, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6734006734006734, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.226975564787322e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6868686868686869, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.003177082171523e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.7003367003367004, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.784042272003794e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.7138047138047138, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.57008293378697e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.361798779469336e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1596762663442218e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7542087542087542, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9641874609064443e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.7676767676767676, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7757889363191483e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7676767676767676, |
|
"eval_loss": NaN, |
|
"eval_runtime": 6.5724, |
|
"eval_samples_per_second": 19.019, |
|
"eval_steps_per_second": 9.586, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7811447811447811, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5949207060660138e-05, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7946127946127947, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.422005196279395e-05, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.8080808080808081, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.257446259144494e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8215488215488216, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1016282296838887e-05, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.835016835016835, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.176493099488663e-06, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8619528619528619, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.901516655997536e-06, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8754208754208754, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.727198717339511e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.65628195747273e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.9023569023569024, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.691267552111183e-06, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.9158249158249159, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8344093371128424e-06, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9292929292929293, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.087708544541689e-06, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9427609427609428, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4529091286973995e-06, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9562289562289562, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.314936930293283e-07, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.246800274474439e-07, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9831649831649831, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.334182641175686e-07, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9966329966329966, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.838865838366792e-08, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.0101010101010102, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 19, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.83543405945815e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|