|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.19461563412260785, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025948751216347712, |
|
"grad_norm": 114.6451644897461, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 86.6227, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025948751216347712, |
|
"eval_loss": 11.188401222229004, |
|
"eval_runtime": 240.2405, |
|
"eval_samples_per_second": 1.353, |
|
"eval_steps_per_second": 0.678, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0051897502432695424, |
|
"grad_norm": 110.34148406982422, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 86.6639, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007784625364904314, |
|
"grad_norm": 140.974365234375, |
|
"learning_rate": 0.0001, |
|
"loss": 86.2836, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010379500486539085, |
|
"grad_norm": 107.53729248046875, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 76.8209, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012974375608173857, |
|
"grad_norm": 58.06427001953125, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 70.0069, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015569250729808628, |
|
"grad_norm": 63.29356384277344, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 53.7268, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.018164125851443398, |
|
"grad_norm": 64.70842742919922, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 43.4032, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02075900097307817, |
|
"grad_norm": 66.98622131347656, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 30.8297, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02335387609471294, |
|
"grad_norm": 75.47815704345703, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 19.1012, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.025948751216347713, |
|
"grad_norm": 59.91986083984375, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 7.854, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.028543626337982485, |
|
"grad_norm": 61.32648468017578, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 5.9719, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.031138501459617256, |
|
"grad_norm": 50.40642547607422, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 3.8169, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03373337658125203, |
|
"grad_norm": 39.5262336730957, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 3.6317, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.036328251702886796, |
|
"grad_norm": 34.03517150878906, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 2.619, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03892312682452157, |
|
"grad_norm": 40.00255584716797, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.9728, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04151800194615634, |
|
"grad_norm": 62.94084167480469, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 2.668, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.044112877067791115, |
|
"grad_norm": 64.5201416015625, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 3.8366, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04670775218942588, |
|
"grad_norm": 39.453330993652344, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.834, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04930262731106066, |
|
"grad_norm": 74.45320892333984, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 3.1071, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.051897502432695426, |
|
"grad_norm": 119.65555572509766, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 5.0249, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054492377554330194, |
|
"grad_norm": 57.47029113769531, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.8011, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05708725267596497, |
|
"grad_norm": 23.485986709594727, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 1.008, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05968212779759974, |
|
"grad_norm": 15.74601936340332, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.6645, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06227700291923451, |
|
"grad_norm": 35.179073333740234, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 3.0041, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06487187804086929, |
|
"grad_norm": 35.2408332824707, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 1.1321, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06487187804086929, |
|
"eval_loss": 0.19091396033763885, |
|
"eval_runtime": 240.1741, |
|
"eval_samples_per_second": 1.353, |
|
"eval_steps_per_second": 0.679, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06746675316250406, |
|
"grad_norm": 22.732812881469727, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.6565, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07006162828413882, |
|
"grad_norm": 9.80388355255127, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.2933, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07265650340577359, |
|
"grad_norm": 14.081550598144531, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.4886, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07525137852740837, |
|
"grad_norm": 16.663516998291016, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.7272, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07784625364904314, |
|
"grad_norm": 15.300065040588379, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.4666, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08044112877067791, |
|
"grad_norm": 40.297645568847656, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.5482, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08303600389231268, |
|
"grad_norm": 11.791970252990723, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 1.3021, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08563087901394745, |
|
"grad_norm": 34.81112289428711, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.5804, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08822575413558223, |
|
"grad_norm": 12.368243217468262, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.5155, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.090820629257217, |
|
"grad_norm": 19.11678123474121, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.9189, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09341550437885177, |
|
"grad_norm": 40.73845291137695, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 4.2073, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09601037950048653, |
|
"grad_norm": 30.314462661743164, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 1.1171, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09860525462212132, |
|
"grad_norm": 13.374330520629883, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.3863, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10120012974375608, |
|
"grad_norm": 21.539278030395508, |
|
"learning_rate": 5e-05, |
|
"loss": 1.2575, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10379500486539085, |
|
"grad_norm": 8.351127624511719, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.2764, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10638987998702562, |
|
"grad_norm": 29.027982711791992, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 1.042, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10898475510866039, |
|
"grad_norm": 10.286849021911621, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.3774, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11157963023029517, |
|
"grad_norm": 2.650897264480591, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0587, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11417450535192994, |
|
"grad_norm": 14.90556526184082, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.6228, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11676938047356471, |
|
"grad_norm": 3.8510780334472656, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.101, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11936425559519948, |
|
"grad_norm": 25.16055679321289, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.8737, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12195913071683426, |
|
"grad_norm": 33.220306396484375, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.9121, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12455400583846903, |
|
"grad_norm": 30.670238494873047, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.5032, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1271488809601038, |
|
"grad_norm": 24.66783905029297, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.5967, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.12974375608173858, |
|
"grad_norm": 48.0320930480957, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 3.6341, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12974375608173858, |
|
"eval_loss": 0.18054018914699554, |
|
"eval_runtime": 239.9435, |
|
"eval_samples_per_second": 1.354, |
|
"eval_steps_per_second": 0.679, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13233863120337333, |
|
"grad_norm": 31.205846786499023, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 5.3168, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1349335063250081, |
|
"grad_norm": 14.114444732666016, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.809, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.13752838144664287, |
|
"grad_norm": 8.728169441223145, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.0702, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.14012325656827765, |
|
"grad_norm": 33.120567321777344, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.9649, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.14271813168991243, |
|
"grad_norm": 16.788246154785156, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.8312, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.14531300681154719, |
|
"grad_norm": 14.110865592956543, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.5348, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.14790788193318197, |
|
"grad_norm": 21.55290412902832, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.8498, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.15050275705481675, |
|
"grad_norm": 24.52911949157715, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.9744, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.1530976321764515, |
|
"grad_norm": 22.370573043823242, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.7237, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.15569250729808629, |
|
"grad_norm": 15.488335609436035, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.4487, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15828738241972104, |
|
"grad_norm": 13.158798217773438, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.3464, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.16088225754135582, |
|
"grad_norm": 1.3022491931915283, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.0314, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.1634771326629906, |
|
"grad_norm": 5.222805023193359, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.119, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.16607200778462536, |
|
"grad_norm": 6.309266090393066, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.1355, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.16866688290626014, |
|
"grad_norm": 19.3297119140625, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 1.0106, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.1712617580278949, |
|
"grad_norm": 58.562843322753906, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.6547, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.17385663314952968, |
|
"grad_norm": 88.08123016357422, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 3.2678, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.17645150827116446, |
|
"grad_norm": 8.794583320617676, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.2489, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.1790463833927992, |
|
"grad_norm": 46.94881057739258, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.4205, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.181641258514434, |
|
"grad_norm": 3.1083903312683105, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.0701, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18423613363606878, |
|
"grad_norm": 12.472285270690918, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.2219, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.18683100875770353, |
|
"grad_norm": 32.516963958740234, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.5728, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.1894258838793383, |
|
"grad_norm": 50.7675666809082, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 4.1427, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.19202075900097307, |
|
"grad_norm": 43.09784698486328, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.7928, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.19461563412260785, |
|
"grad_norm": 45.91943359375, |
|
"learning_rate": 0.0, |
|
"loss": 2.4261, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.19461563412260785, |
|
"eval_loss": 0.1303597092628479, |
|
"eval_runtime": 240.3643, |
|
"eval_samples_per_second": 1.352, |
|
"eval_steps_per_second": 0.678, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.86091809144832e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|