|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0744047619047619, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000744047619047619, |
|
"eval_loss": 8.732146263122559, |
|
"eval_runtime": 160.2118, |
|
"eval_samples_per_second": 14.131, |
|
"eval_steps_per_second": 1.766, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002232142857142857, |
|
"grad_norm": 3.8788089752197266, |
|
"learning_rate": 1.5e-05, |
|
"loss": 8.6329, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004464285714285714, |
|
"grad_norm": 3.3676693439483643, |
|
"learning_rate": 3e-05, |
|
"loss": 8.6812, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006696428571428571, |
|
"grad_norm": 4.344758033752441, |
|
"learning_rate": 4.5e-05, |
|
"loss": 8.5473, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006696428571428571, |
|
"eval_loss": 8.29433536529541, |
|
"eval_runtime": 161.9566, |
|
"eval_samples_per_second": 13.979, |
|
"eval_steps_per_second": 1.747, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.008928571428571428, |
|
"grad_norm": 4.432471752166748, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 7.6704, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.011160714285714286, |
|
"grad_norm": 5.1014227867126465, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 7.1247, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.013392857142857142, |
|
"grad_norm": 6.452256202697754, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 6.7864, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.013392857142857142, |
|
"eval_loss": 6.469050884246826, |
|
"eval_runtime": 161.9588, |
|
"eval_samples_per_second": 13.979, |
|
"eval_steps_per_second": 1.747, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015625, |
|
"grad_norm": 6.13102912902832, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 6.0363, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.017857142857142856, |
|
"grad_norm": 5.863094329833984, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 5.4919, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.020089285714285716, |
|
"grad_norm": 7.331254959106445, |
|
"learning_rate": 4.572593931387604e-05, |
|
"loss": 5.5447, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.020089285714285716, |
|
"eval_loss": 5.072122573852539, |
|
"eval_runtime": 162.0067, |
|
"eval_samples_per_second": 13.975, |
|
"eval_steps_per_second": 1.747, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.022321428571428572, |
|
"grad_norm": 7.104370594024658, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 4.8921, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024553571428571428, |
|
"grad_norm": 5.608682632446289, |
|
"learning_rate": 4.2366459261474933e-05, |
|
"loss": 4.841, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.026785714285714284, |
|
"grad_norm": 4.773255825042725, |
|
"learning_rate": 4.039153688314145e-05, |
|
"loss": 4.5234, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.026785714285714284, |
|
"eval_loss": 4.577538967132568, |
|
"eval_runtime": 162.4812, |
|
"eval_samples_per_second": 13.934, |
|
"eval_steps_per_second": 1.742, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.029017857142857144, |
|
"grad_norm": 5.47312068939209, |
|
"learning_rate": 3.824798160583012e-05, |
|
"loss": 4.5007, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03125, |
|
"grad_norm": 5.6877641677856445, |
|
"learning_rate": 3.5959278669726935e-05, |
|
"loss": 4.0774, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.033482142857142856, |
|
"grad_norm": 4.455398082733154, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 4.2481, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.033482142857142856, |
|
"eval_loss": 4.341364860534668, |
|
"eval_runtime": 161.8916, |
|
"eval_samples_per_second": 13.985, |
|
"eval_steps_per_second": 1.748, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03571428571428571, |
|
"grad_norm": 6.039599418640137, |
|
"learning_rate": 3.104804738999169e-05, |
|
"loss": 4.1585, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03794642857142857, |
|
"grad_norm": 5.91121244430542, |
|
"learning_rate": 2.8479327524001636e-05, |
|
"loss": 4.1427, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04017857142857143, |
|
"grad_norm": 5.7630181312561035, |
|
"learning_rate": 2.587248741756253e-05, |
|
"loss": 3.8608, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04017857142857143, |
|
"eval_loss": 4.20692253112793, |
|
"eval_runtime": 162.0191, |
|
"eval_samples_per_second": 13.974, |
|
"eval_steps_per_second": 1.747, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04241071428571429, |
|
"grad_norm": 6.365548610687256, |
|
"learning_rate": 2.3256088156396868e-05, |
|
"loss": 4.1786, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.044642857142857144, |
|
"grad_norm": 8.317145347595215, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 4.2896, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.046875, |
|
"grad_norm": 6.300489902496338, |
|
"learning_rate": 1.8109066104575023e-05, |
|
"loss": 4.0623, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.046875, |
|
"eval_loss": 4.134160995483398, |
|
"eval_runtime": 161.9503, |
|
"eval_samples_per_second": 13.98, |
|
"eval_steps_per_second": 1.747, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.049107142857142856, |
|
"grad_norm": 6.4269185066223145, |
|
"learning_rate": 1.56348351646022e-05, |
|
"loss": 3.9744, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05133928571428571, |
|
"grad_norm": 5.94874382019043, |
|
"learning_rate": 1.3263210930352737e-05, |
|
"loss": 3.9959, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05357142857142857, |
|
"grad_norm": 5.269709587097168, |
|
"learning_rate": 1.1020177413231334e-05, |
|
"loss": 4.0065, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05357142857142857, |
|
"eval_loss": 4.0851731300354, |
|
"eval_runtime": 161.9017, |
|
"eval_samples_per_second": 13.984, |
|
"eval_steps_per_second": 1.748, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05580357142857143, |
|
"grad_norm": 6.060158729553223, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 3.9547, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05803571428571429, |
|
"grad_norm": 4.884081840515137, |
|
"learning_rate": 7.016504991533726e-06, |
|
"loss": 3.8446, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.060267857142857144, |
|
"grad_norm": 5.20225191116333, |
|
"learning_rate": 5.299731159831953e-06, |
|
"loss": 3.8639, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.060267857142857144, |
|
"eval_loss": 4.055640697479248, |
|
"eval_runtime": 161.812, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 1.749, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.0625, |
|
"grad_norm": 6.285635948181152, |
|
"learning_rate": 3.798797596089351e-06, |
|
"loss": 4.2296, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.06473214285714286, |
|
"grad_norm": 5.423341751098633, |
|
"learning_rate": 2.5301488425208296e-06, |
|
"loss": 4.1821, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.06696428571428571, |
|
"grad_norm": 5.548636436462402, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 3.9078, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06696428571428571, |
|
"eval_loss": 4.040578365325928, |
|
"eval_runtime": 161.4718, |
|
"eval_samples_per_second": 14.021, |
|
"eval_steps_per_second": 1.753, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06919642857142858, |
|
"grad_norm": 6.10594367980957, |
|
"learning_rate": 7.426068431000882e-07, |
|
"loss": 4.0287, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 6.208632946014404, |
|
"learning_rate": 2.4329828146074095e-07, |
|
"loss": 3.8063, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07366071428571429, |
|
"grad_norm": 5.615017414093018, |
|
"learning_rate": 1.522932452260595e-08, |
|
"loss": 3.8605, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07366071428571429, |
|
"eval_loss": 4.03863525390625, |
|
"eval_runtime": 161.5774, |
|
"eval_samples_per_second": 14.012, |
|
"eval_steps_per_second": 1.751, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.483774567120896e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|