|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04106143817687215, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00020530719088436073, |
|
"eval_loss": 1.3546793460845947, |
|
"eval_runtime": 87.9312, |
|
"eval_samples_per_second": 23.325, |
|
"eval_steps_per_second": 11.668, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002053071908843607, |
|
"grad_norm": 0.9955037236213684, |
|
"learning_rate": 0.00019967573081342103, |
|
"loss": 1.0432, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004106143817687214, |
|
"grad_norm": 1.3997418880462646, |
|
"learning_rate": 0.0001970941817426052, |
|
"loss": 0.4604, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.006159215726530822, |
|
"grad_norm": 0.8764151334762573, |
|
"learning_rate": 0.00019199794436588243, |
|
"loss": 0.3427, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008212287635374429, |
|
"grad_norm": 0.648871660232544, |
|
"learning_rate": 0.0001845190085543795, |
|
"loss": 0.3203, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.010265359544218037, |
|
"grad_norm": 0.4984304904937744, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 0.3191, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.010265359544218037, |
|
"eval_loss": 0.31821250915527344, |
|
"eval_runtime": 86.0569, |
|
"eval_samples_per_second": 23.833, |
|
"eval_steps_per_second": 11.922, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.012318431453061643, |
|
"grad_norm": 0.47477608919143677, |
|
"learning_rate": 0.00016324453755953773, |
|
"loss": 0.315, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.014371503361905251, |
|
"grad_norm": 0.526182234287262, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.3055, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.016424575270748858, |
|
"grad_norm": 0.6147613525390625, |
|
"learning_rate": 0.00013546048870425356, |
|
"loss": 0.3017, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.018477647179592466, |
|
"grad_norm": 0.6105421781539917, |
|
"learning_rate": 0.00012000256937760445, |
|
"loss": 0.3058, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.020530719088436074, |
|
"grad_norm": 0.5068532228469849, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 0.2953, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.020530719088436074, |
|
"eval_loss": 0.2939603924751282, |
|
"eval_runtime": 86.3176, |
|
"eval_samples_per_second": 23.761, |
|
"eval_steps_per_second": 11.886, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02258379099727968, |
|
"grad_norm": 0.619922399520874, |
|
"learning_rate": 8.79463319744677e-05, |
|
"loss": 0.308, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.024636862906123286, |
|
"grad_norm": 0.47642096877098083, |
|
"learning_rate": 7.217825360835473e-05, |
|
"loss": 0.2825, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.026689934814966895, |
|
"grad_norm": 0.38181358575820923, |
|
"learning_rate": 5.713074385969457e-05, |
|
"loss": 0.2863, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.028743006723810503, |
|
"grad_norm": 0.5245420336723328, |
|
"learning_rate": 4.3193525326884435e-05, |
|
"loss": 0.2711, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.030796078632654107, |
|
"grad_norm": 0.40633440017700195, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 0.2751, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.030796078632654107, |
|
"eval_loss": 0.2869807481765747, |
|
"eval_runtime": 86.1617, |
|
"eval_samples_per_second": 23.804, |
|
"eval_steps_per_second": 11.908, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.032849150541497715, |
|
"grad_norm": 0.4666227102279663, |
|
"learning_rate": 2.0055723659649904e-05, |
|
"loss": 0.2773, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.03490222245034132, |
|
"grad_norm": 0.40914592146873474, |
|
"learning_rate": 1.1454397434679021e-05, |
|
"loss": 0.2505, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.03695529435918493, |
|
"grad_norm": 0.4898912012577057, |
|
"learning_rate": 5.146355805285452e-06, |
|
"loss": 0.2766, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.03900836626802854, |
|
"grad_norm": 0.444612592458725, |
|
"learning_rate": 1.2949737362087156e-06, |
|
"loss": 0.2722, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.04106143817687215, |
|
"grad_norm": 0.48914796113967896, |
|
"learning_rate": 0.0, |
|
"loss": 0.2969, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04106143817687215, |
|
"eval_loss": 0.28439101576805115, |
|
"eval_runtime": 86.2057, |
|
"eval_samples_per_second": 23.792, |
|
"eval_steps_per_second": 11.902, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.51479053795328e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|