|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.857142857142857, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"grad_norm": 12.726211547851562, |
|
"learning_rate": 1e-05, |
|
"loss": 16.7409, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"eval_loss": 9.10863208770752, |
|
"eval_runtime": 1.8789, |
|
"eval_samples_per_second": 7.983, |
|
"eval_steps_per_second": 1.064, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 10.197029113769531, |
|
"learning_rate": 2e-05, |
|
"loss": 13.2649, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.17142857142857143, |
|
"grad_norm": 12.611615180969238, |
|
"learning_rate": 3e-05, |
|
"loss": 16.1898, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 14.422361373901367, |
|
"learning_rate": 4e-05, |
|
"loss": 16.7298, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 13.265689849853516, |
|
"learning_rate": 5e-05, |
|
"loss": 16.0336, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"eval_loss": 8.701345443725586, |
|
"eval_runtime": 1.8972, |
|
"eval_samples_per_second": 7.907, |
|
"eval_steps_per_second": 1.054, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 18.975881576538086, |
|
"learning_rate": 6e-05, |
|
"loss": 15.8037, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 19.776723861694336, |
|
"learning_rate": 7e-05, |
|
"loss": 14.8698, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 13.150531768798828, |
|
"learning_rate": 8e-05, |
|
"loss": 12.3575, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5142857142857142, |
|
"grad_norm": 15.01668643951416, |
|
"learning_rate": 9e-05, |
|
"loss": 10.8048, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 14.243483543395996, |
|
"learning_rate": 0.0001, |
|
"loss": 9.9134, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"eval_loss": 4.631707668304443, |
|
"eval_runtime": 1.8806, |
|
"eval_samples_per_second": 7.976, |
|
"eval_steps_per_second": 1.063, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6285714285714286, |
|
"grad_norm": 17.032089233398438, |
|
"learning_rate": 9.986661418317759e-05, |
|
"loss": 8.326, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 31.08342933654785, |
|
"learning_rate": 9.946716840375551e-05, |
|
"loss": 6.0208, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.7428571428571429, |
|
"grad_norm": 19.09461212158203, |
|
"learning_rate": 9.880379387779637e-05, |
|
"loss": 5.0303, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 20.249990463256836, |
|
"learning_rate": 9.78800299954203e-05, |
|
"loss": 3.0912, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 12.805388450622559, |
|
"learning_rate": 9.67008054366274e-05, |
|
"loss": 1.385, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"eval_loss": 1.539831519126892, |
|
"eval_runtime": 1.8804, |
|
"eval_samples_per_second": 7.977, |
|
"eval_steps_per_second": 1.064, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 16.641372680664062, |
|
"learning_rate": 9.527241187465734e-05, |
|
"loss": 1.2639, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.9714285714285714, |
|
"grad_norm": 16.987781524658203, |
|
"learning_rate": 9.360247040719039e-05, |
|
"loss": 1.4489, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.0285714285714285, |
|
"grad_norm": 10.107866287231445, |
|
"learning_rate": 9.16998908944939e-05, |
|
"loss": 1.8918, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.0857142857142856, |
|
"grad_norm": 14.143921852111816, |
|
"learning_rate": 8.957482442146272e-05, |
|
"loss": 0.9066, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 6.628956317901611, |
|
"learning_rate": 8.72386091371891e-05, |
|
"loss": 1.3674, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"eval_loss": 1.024061918258667, |
|
"eval_runtime": 1.8791, |
|
"eval_samples_per_second": 7.983, |
|
"eval_steps_per_second": 1.064, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 13.004719734191895, |
|
"learning_rate": 8.47037097610317e-05, |
|
"loss": 0.966, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.2571428571428571, |
|
"grad_norm": 9.043590545654297, |
|
"learning_rate": 8.198365107794457e-05, |
|
"loss": 1.1478, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.3142857142857143, |
|
"grad_norm": 8.619237899780273, |
|
"learning_rate": 7.909294577789766e-05, |
|
"loss": 0.3326, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.3714285714285714, |
|
"grad_norm": 7.541545867919922, |
|
"learning_rate": 7.604701702439651e-05, |
|
"loss": 0.251, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 10.902060508728027, |
|
"learning_rate": 7.286211616523193e-05, |
|
"loss": 0.8831, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"eval_loss": 0.7456358671188354, |
|
"eval_runtime": 1.8792, |
|
"eval_samples_per_second": 7.982, |
|
"eval_steps_per_second": 1.064, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.4857142857142858, |
|
"grad_norm": 9.19676399230957, |
|
"learning_rate": 6.95552360245078e-05, |
|
"loss": 0.728, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.5428571428571427, |
|
"grad_norm": 12.570167541503906, |
|
"learning_rate": 6.614402023857232e-05, |
|
"loss": 0.92, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 8.061638832092285, |
|
"learning_rate": 6.264666911958404e-05, |
|
"loss": 0.3867, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.657142857142857, |
|
"grad_norm": 5.505425453186035, |
|
"learning_rate": 5.908184254897182e-05, |
|
"loss": 0.4116, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 4.206918239593506, |
|
"learning_rate": 5.546856041889373e-05, |
|
"loss": 0.26, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"eval_loss": 0.6001853942871094, |
|
"eval_runtime": 1.8809, |
|
"eval_samples_per_second": 7.975, |
|
"eval_steps_per_second": 1.063, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.7714285714285714, |
|
"grad_norm": 7.199727535247803, |
|
"learning_rate": 5.182610115288295e-05, |
|
"loss": 0.6765, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.8285714285714287, |
|
"grad_norm": 5.864309310913086, |
|
"learning_rate": 4.817389884711705e-05, |
|
"loss": 0.6613, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.8857142857142857, |
|
"grad_norm": 2.527921676635742, |
|
"learning_rate": 4.4531439581106295e-05, |
|
"loss": 0.0827, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.9428571428571428, |
|
"grad_norm": 5.816792011260986, |
|
"learning_rate": 4.0918157451028185e-05, |
|
"loss": 0.2566, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 4.379115104675293, |
|
"learning_rate": 3.735333088041596e-05, |
|
"loss": 0.3601, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.6280831694602966, |
|
"eval_runtime": 1.8813, |
|
"eval_samples_per_second": 7.973, |
|
"eval_steps_per_second": 1.063, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.057142857142857, |
|
"grad_norm": 5.1028642654418945, |
|
"learning_rate": 3.38559797614277e-05, |
|
"loss": 0.1492, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.1142857142857143, |
|
"grad_norm": 1.6019138097763062, |
|
"learning_rate": 3.0444763975492208e-05, |
|
"loss": 0.058, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.1714285714285713, |
|
"grad_norm": 3.3319895267486572, |
|
"learning_rate": 2.7137883834768073e-05, |
|
"loss": 0.1912, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.2285714285714286, |
|
"grad_norm": 3.3347697257995605, |
|
"learning_rate": 2.3952982975603496e-05, |
|
"loss": 0.1467, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 4.849230766296387, |
|
"learning_rate": 2.090705422210237e-05, |
|
"loss": 0.2343, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"eval_loss": 0.6117228865623474, |
|
"eval_runtime": 1.8795, |
|
"eval_samples_per_second": 7.981, |
|
"eval_steps_per_second": 1.064, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.342857142857143, |
|
"grad_norm": 0.7933540344238281, |
|
"learning_rate": 1.801634892205545e-05, |
|
"loss": 0.0241, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 4.947017192840576, |
|
"learning_rate": 1.5296290238968303e-05, |
|
"loss": 0.1974, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.4571428571428573, |
|
"grad_norm": 4.42537784576416, |
|
"learning_rate": 1.2761390862810907e-05, |
|
"loss": 0.2682, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.5142857142857142, |
|
"grad_norm": 2.0344936847686768, |
|
"learning_rate": 1.0425175578537299e-05, |
|
"loss": 0.1215, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 2.9451773166656494, |
|
"learning_rate": 8.30010910550611e-06, |
|
"loss": 0.1397, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"eval_loss": 0.567788302898407, |
|
"eval_runtime": 1.8814, |
|
"eval_samples_per_second": 7.973, |
|
"eval_steps_per_second": 1.063, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.6285714285714286, |
|
"grad_norm": 4.362372398376465, |
|
"learning_rate": 6.397529592809614e-06, |
|
"loss": 0.1807, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.685714285714286, |
|
"grad_norm": 1.7142335176467896, |
|
"learning_rate": 4.727588125342669e-06, |
|
"loss": 0.0381, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.742857142857143, |
|
"grad_norm": 5.089944362640381, |
|
"learning_rate": 3.299194563372604e-06, |
|
"loss": 0.7555, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 2.963768482208252, |
|
"learning_rate": 2.1199700045797077e-06, |
|
"loss": 0.0599, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 3.1674277782440186, |
|
"learning_rate": 1.196206122203647e-06, |
|
"loss": 0.0929, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_loss": 0.5395380854606628, |
|
"eval_runtime": 1.879, |
|
"eval_samples_per_second": 7.983, |
|
"eval_steps_per_second": 1.064, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 53, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.26732322177024e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|