|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4934210526315789, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006578947368421052, |
|
"grad_norm": 4.683763027191162, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 10.3874, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006578947368421052, |
|
"eval_loss": 1.5863150358200073, |
|
"eval_runtime": 12.1886, |
|
"eval_samples_per_second": 10.584, |
|
"eval_steps_per_second": 5.333, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013157894736842105, |
|
"grad_norm": 4.751871109008789, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 11.8587, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.019736842105263157, |
|
"grad_norm": 5.826901435852051, |
|
"learning_rate": 0.0001, |
|
"loss": 12.0305, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"grad_norm": 5.796934127807617, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 12.1736, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03289473684210526, |
|
"grad_norm": 5.535815715789795, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 10.7405, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.039473684210526314, |
|
"grad_norm": 5.5817975997924805, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 11.2704, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.046052631578947366, |
|
"grad_norm": 6.0977983474731445, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.8744, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 6.505789279937744, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 10.0401, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05921052631578947, |
|
"grad_norm": 5.587679386138916, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 11.4585, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06578947368421052, |
|
"grad_norm": 5.400643348693848, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 11.9279, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07236842105263158, |
|
"grad_norm": 5.925045013427734, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 8.0437, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 5.797214984893799, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 11.5078, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08552631578947369, |
|
"grad_norm": 5.878451347351074, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 11.8092, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09210526315789473, |
|
"grad_norm": 6.047119617462158, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 11.2271, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09868421052631579, |
|
"grad_norm": 6.350561618804932, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 11.6742, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 6.4842047691345215, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 9.9198, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1118421052631579, |
|
"grad_norm": 6.810247898101807, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 10.6038, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11842105263157894, |
|
"grad_norm": 5.906208515167236, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 11.2307, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 6.2693772315979, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 13.0133, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.13157894736842105, |
|
"grad_norm": 6.6559271812438965, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 10.8347, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13815789473684212, |
|
"grad_norm": 6.143450736999512, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 12.0125, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.14473684210526316, |
|
"grad_norm": 6.3912224769592285, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 10.9285, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1513157894736842, |
|
"grad_norm": 7.889128684997559, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 11.2765, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 6.0514020919799805, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 11.144, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16447368421052633, |
|
"grad_norm": 7.878673553466797, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 11.0654, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16447368421052633, |
|
"eval_loss": 1.369149088859558, |
|
"eval_runtime": 12.2247, |
|
"eval_samples_per_second": 10.552, |
|
"eval_steps_per_second": 5.317, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.17105263157894737, |
|
"grad_norm": 6.711698532104492, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 10.4598, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.17763157894736842, |
|
"grad_norm": 5.988046646118164, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 12.4122, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.18421052631578946, |
|
"grad_norm": 6.7396345138549805, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 10.6514, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.19078947368421054, |
|
"grad_norm": 7.260731220245361, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 10.9607, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.19736842105263158, |
|
"grad_norm": 6.341261863708496, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 12.1942, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20394736842105263, |
|
"grad_norm": 7.080831527709961, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 13.2871, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 9.200007438659668, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 11.3735, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.21710526315789475, |
|
"grad_norm": 8.438835144042969, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 9.7034, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2236842105263158, |
|
"grad_norm": 8.435461044311523, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 12.39, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.23026315789473684, |
|
"grad_norm": 7.083141326904297, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 11.1461, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.23684210526315788, |
|
"grad_norm": 6.454171657562256, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 8.7345, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.24342105263157895, |
|
"grad_norm": 8.99131965637207, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 10.1263, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 9.920761108398438, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 9.2578, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2565789473684211, |
|
"grad_norm": 9.285501480102539, |
|
"learning_rate": 5e-05, |
|
"loss": 8.6679, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 9.787482261657715, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 8.7731, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.26973684210526316, |
|
"grad_norm": 9.969244956970215, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 9.7417, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.27631578947368424, |
|
"grad_norm": 11.214414596557617, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.0999, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.28289473684210525, |
|
"grad_norm": 7.822074890136719, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 10.8718, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2894736842105263, |
|
"grad_norm": 10.223742485046387, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 10.8404, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.29605263157894735, |
|
"grad_norm": 6.327630996704102, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.2936, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3026315789473684, |
|
"grad_norm": 5.0185651779174805, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 8.2814, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.3092105263157895, |
|
"grad_norm": 5.958381175994873, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 10.0383, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 5.123436450958252, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 12.5394, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3223684210526316, |
|
"grad_norm": 5.286769866943359, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 10.9169, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.32894736842105265, |
|
"grad_norm": 5.482089042663574, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 11.8786, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.32894736842105265, |
|
"eval_loss": 1.3653817176818848, |
|
"eval_runtime": 12.2397, |
|
"eval_samples_per_second": 10.54, |
|
"eval_steps_per_second": 5.311, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3355263157894737, |
|
"grad_norm": 6.4667439460754395, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 11.2426, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.34210526315789475, |
|
"grad_norm": 5.597299098968506, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 9.1077, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.34868421052631576, |
|
"grad_norm": 4.8260884284973145, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 11.6975, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.35526315789473684, |
|
"grad_norm": 5.861974716186523, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 11.6898, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.3618421052631579, |
|
"grad_norm": 4.4668378829956055, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 12.0545, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 5.416546821594238, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 12.0258, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.375, |
|
"grad_norm": 6.295071125030518, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 9.6541, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.3815789473684211, |
|
"grad_norm": 5.567584037780762, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 12.2458, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3881578947368421, |
|
"grad_norm": 6.616762638092041, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 12.9287, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.39473684210526316, |
|
"grad_norm": 5.237483501434326, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 11.8718, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.40131578947368424, |
|
"grad_norm": 5.236186981201172, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 12.2105, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.40789473684210525, |
|
"grad_norm": 6.172914505004883, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 12.6731, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.4144736842105263, |
|
"grad_norm": 5.345962047576904, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 12.0877, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 5.08330774307251, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 10.9855, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.4276315789473684, |
|
"grad_norm": 5.678718566894531, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 10.4774, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4342105263157895, |
|
"grad_norm": 7.054388999938965, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.9022, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.4407894736842105, |
|
"grad_norm": 6.364291191101074, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 8.7441, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.4473684210526316, |
|
"grad_norm": 5.535186290740967, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 11.2657, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.45394736842105265, |
|
"grad_norm": 5.470834255218506, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.0374, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.4605263157894737, |
|
"grad_norm": 6.9800705909729, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 12.873, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.46710526315789475, |
|
"grad_norm": 6.102013111114502, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 11.3473, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 7.423451900482178, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 12.4332, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.48026315789473684, |
|
"grad_norm": 6.381293296813965, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 11.5429, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.4868421052631579, |
|
"grad_norm": 7.644157886505127, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 11.1958, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.4934210526315789, |
|
"grad_norm": 7.911625862121582, |
|
"learning_rate": 0.0, |
|
"loss": 12.3073, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.4934210526315789, |
|
"eval_loss": 1.3638100624084473, |
|
"eval_runtime": 12.2644, |
|
"eval_samples_per_second": 10.518, |
|
"eval_steps_per_second": 5.3, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.86082145468416e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|