|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.1804788213627992, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011786372007366482, |
|
"eval_loss": 3.2576773166656494, |
|
"eval_runtime": 48.3038, |
|
"eval_samples_per_second": 5.921, |
|
"eval_steps_per_second": 2.96, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03535911602209945, |
|
"grad_norm": 37.830448150634766, |
|
"learning_rate": 2.2499999999999998e-05, |
|
"loss": 99.4918, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0707182320441989, |
|
"grad_norm": 30.299089431762695, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 95.1039, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10607734806629834, |
|
"grad_norm": 31.264686584472656, |
|
"learning_rate": 6.75e-05, |
|
"loss": 99.6355, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10607734806629834, |
|
"eval_loss": 2.994905948638916, |
|
"eval_runtime": 48.3217, |
|
"eval_samples_per_second": 5.919, |
|
"eval_steps_per_second": 2.959, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1414364640883978, |
|
"grad_norm": 34.47152328491211, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 91.6026, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17679558011049723, |
|
"grad_norm": 26.11569595336914, |
|
"learning_rate": 0.0001125, |
|
"loss": 85.1959, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2121546961325967, |
|
"grad_norm": 38.721710205078125, |
|
"learning_rate": 0.000135, |
|
"loss": 78.4229, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2121546961325967, |
|
"eval_loss": 2.432400703430176, |
|
"eval_runtime": 48.3101, |
|
"eval_samples_per_second": 5.92, |
|
"eval_steps_per_second": 2.96, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.24751381215469614, |
|
"grad_norm": 37.68834686279297, |
|
"learning_rate": 0.00014994217771805422, |
|
"loss": 78.5547, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2828729281767956, |
|
"grad_norm": 36.297088623046875, |
|
"learning_rate": 0.00014907662554463532, |
|
"loss": 71.8931, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.318232044198895, |
|
"grad_norm": 40.15138244628906, |
|
"learning_rate": 0.0001471841427340235, |
|
"loss": 68.7287, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.318232044198895, |
|
"eval_loss": 2.1229209899902344, |
|
"eval_runtime": 48.3187, |
|
"eval_samples_per_second": 5.919, |
|
"eval_steps_per_second": 2.96, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.35359116022099446, |
|
"grad_norm": 22.00638198852539, |
|
"learning_rate": 0.0001442909649383465, |
|
"loss": 65.7473, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3889502762430939, |
|
"grad_norm": 26.196809768676758, |
|
"learning_rate": 0.0001404372005304598, |
|
"loss": 64.8032, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4243093922651934, |
|
"grad_norm": 30.012163162231445, |
|
"learning_rate": 0.00013567627457812106, |
|
"loss": 62.5845, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4243093922651934, |
|
"eval_loss": 1.9335837364196777, |
|
"eval_runtime": 48.3129, |
|
"eval_samples_per_second": 5.92, |
|
"eval_steps_per_second": 2.96, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.45966850828729283, |
|
"grad_norm": 24.18491554260254, |
|
"learning_rate": 0.0001300741882076764, |
|
"loss": 56.7812, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.4950276243093923, |
|
"grad_norm": 29.25063133239746, |
|
"learning_rate": 0.00012370860362476374, |
|
"loss": 59.2692, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5303867403314917, |
|
"grad_norm": 26.837539672851562, |
|
"learning_rate": 0.00011666776747647015, |
|
"loss": 57.7333, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5303867403314917, |
|
"eval_loss": 1.8067072629928589, |
|
"eval_runtime": 48.2991, |
|
"eval_samples_per_second": 5.921, |
|
"eval_steps_per_second": 2.961, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5657458563535912, |
|
"grad_norm": 27.746295928955078, |
|
"learning_rate": 0.00010904928748046599, |
|
"loss": 57.6997, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6011049723756906, |
|
"grad_norm": 26.590911865234375, |
|
"learning_rate": 0.00010095877928081196, |
|
"loss": 57.611, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.63646408839779, |
|
"grad_norm": 24.721492767333984, |
|
"learning_rate": 9.25084022891929e-05, |
|
"loss": 54.3467, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.63646408839779, |
|
"eval_loss": 1.7132052183151245, |
|
"eval_runtime": 48.2964, |
|
"eval_samples_per_second": 5.922, |
|
"eval_steps_per_second": 2.961, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6718232044198895, |
|
"grad_norm": 28.768779754638672, |
|
"learning_rate": 8.381530480933783e-05, |
|
"loss": 55.5514, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7071823204419889, |
|
"grad_norm": 32.7011833190918, |
|
"learning_rate": 7.5e-05, |
|
"loss": 54.9361, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7425414364640884, |
|
"grad_norm": 36.07527542114258, |
|
"learning_rate": 6.618469519066217e-05, |
|
"loss": 53.3471, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.7425414364640884, |
|
"eval_loss": 1.646447777748108, |
|
"eval_runtime": 48.3148, |
|
"eval_samples_per_second": 5.92, |
|
"eval_steps_per_second": 2.96, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.7779005524861878, |
|
"grad_norm": 31.98736000061035, |
|
"learning_rate": 5.7491597710807114e-05, |
|
"loss": 50.4475, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8132596685082873, |
|
"grad_norm": 32.868492126464844, |
|
"learning_rate": 4.904122071918801e-05, |
|
"loss": 55.192, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.8486187845303867, |
|
"grad_norm": 24.729814529418945, |
|
"learning_rate": 4.095071251953399e-05, |
|
"loss": 54.725, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.8486187845303867, |
|
"eval_loss": 1.5971978902816772, |
|
"eval_runtime": 48.3066, |
|
"eval_samples_per_second": 5.921, |
|
"eval_steps_per_second": 2.96, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.8839779005524862, |
|
"grad_norm": 27.09636116027832, |
|
"learning_rate": 3.333223252352985e-05, |
|
"loss": 51.9685, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9193370165745857, |
|
"grad_norm": 34.2943000793457, |
|
"learning_rate": 2.6291396375236232e-05, |
|
"loss": 50.2621, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9546961325966851, |
|
"grad_norm": 29.85685157775879, |
|
"learning_rate": 1.99258117923236e-05, |
|
"loss": 49.1029, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9546961325966851, |
|
"eval_loss": 1.5639307498931885, |
|
"eval_runtime": 48.3157, |
|
"eval_samples_per_second": 5.919, |
|
"eval_steps_per_second": 2.96, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9900552486187846, |
|
"grad_norm": 28.34723663330078, |
|
"learning_rate": 1.4323725421878949e-05, |
|
"loss": 46.7876, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.027255985267035, |
|
"grad_norm": 29.410560607910156, |
|
"learning_rate": 9.56279946954021e-06, |
|
"loss": 46.1371, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.0626151012891345, |
|
"grad_norm": 32.85994338989258, |
|
"learning_rate": 5.709035061653494e-06, |
|
"loss": 43.1931, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0626151012891345, |
|
"eval_loss": 1.5428903102874756, |
|
"eval_runtime": 48.3008, |
|
"eval_samples_per_second": 5.921, |
|
"eval_steps_per_second": 2.961, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.097974217311234, |
|
"grad_norm": 27.65435791015625, |
|
"learning_rate": 2.815857265976462e-06, |
|
"loss": 48.2304, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.1333333333333333, |
|
"grad_norm": 22.98744773864746, |
|
"learning_rate": 9.233744553646754e-07, |
|
"loss": 42.7889, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.1686924493554327, |
|
"grad_norm": 29.46680450439453, |
|
"learning_rate": 5.7822281945782424e-08, |
|
"loss": 47.1437, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.1686924493554327, |
|
"eval_loss": 1.5387442111968994, |
|
"eval_runtime": 48.2916, |
|
"eval_samples_per_second": 5.922, |
|
"eval_steps_per_second": 2.961, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.259156315439104e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|