|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.943589743589744, |
|
"eval_steps": 500, |
|
"global_step": 144, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.10256410256410256, |
|
"grad_norm": 6.330384731292725, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9917, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.20512820512820512, |
|
"grad_norm": 2.223167896270752, |
|
"learning_rate": 1.9936215093023884e-05, |
|
"loss": 0.7004, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 1.8093996047973633, |
|
"learning_rate": 1.974567407496712e-05, |
|
"loss": 0.5531, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.41025641025641024, |
|
"grad_norm": 1.6542998552322388, |
|
"learning_rate": 1.9430807674052092e-05, |
|
"loss": 0.4873, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 1.5152506828308105, |
|
"learning_rate": 1.899563263509725e-05, |
|
"loss": 0.4658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 1.466179370880127, |
|
"learning_rate": 1.8445700477978207e-05, |
|
"loss": 0.4239, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.717948717948718, |
|
"grad_norm": 1.3292107582092285, |
|
"learning_rate": 1.778802667699196e-05, |
|
"loss": 0.4138, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8205128205128205, |
|
"grad_norm": 1.4440351724624634, |
|
"learning_rate": 1.7031001164581828e-05, |
|
"loss": 0.3948, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"grad_norm": 1.3163414001464844, |
|
"learning_rate": 1.618428130112533e-05, |
|
"loss": 0.3881, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0205128205128204, |
|
"grad_norm": 1.3107187747955322, |
|
"learning_rate": 1.5258668676167548e-05, |
|
"loss": 0.3565, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.123076923076923, |
|
"grad_norm": 1.3364475965499878, |
|
"learning_rate": 1.4265971312744252e-05, |
|
"loss": 0.324, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.2256410256410257, |
|
"grad_norm": 1.1268199682235718, |
|
"learning_rate": 1.3218853032651719e-05, |
|
"loss": 0.3167, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3282051282051281, |
|
"grad_norm": 1.2984614372253418, |
|
"learning_rate": 1.2130671904307692e-05, |
|
"loss": 0.3045, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.4307692307692308, |
|
"grad_norm": 1.2189068794250488, |
|
"learning_rate": 1.1015309834121083e-05, |
|
"loss": 0.2888, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5333333333333332, |
|
"grad_norm": 1.208184003829956, |
|
"learning_rate": 9.886995475270205e-06, |
|
"loss": 0.2973, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.6358974358974359, |
|
"grad_norm": 1.1965588331222534, |
|
"learning_rate": 8.76012271303888e-06, |
|
"loss": 0.2931, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7384615384615385, |
|
"grad_norm": 1.1966915130615234, |
|
"learning_rate": 7.649067042289681e-06, |
|
"loss": 0.2915, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.8410256410256411, |
|
"grad_norm": 1.2417312860488892, |
|
"learning_rate": 6.568002179543409e-06, |
|
"loss": 0.2858, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.9435897435897436, |
|
"grad_norm": 1.2801405191421509, |
|
"learning_rate": 5.530719249141148e-06, |
|
"loss": 0.287, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.041025641025641, |
|
"grad_norm": 1.1753469705581665, |
|
"learning_rate": 4.550450850127626e-06, |
|
"loss": 0.2573, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.1435897435897435, |
|
"grad_norm": 1.3422598838806152, |
|
"learning_rate": 3.6397022482313804e-06, |
|
"loss": 0.2207, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.246153846153846, |
|
"grad_norm": 1.1634305715560913, |
|
"learning_rate": 2.8100918464225304e-06, |
|
"loss": 0.2188, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.348717948717949, |
|
"grad_norm": 1.2047914266586304, |
|
"learning_rate": 2.072202969162234e-06, |
|
"loss": 0.2144, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.4512820512820515, |
|
"grad_norm": 1.1728473901748657, |
|
"learning_rate": 1.4354488511294418e-06, |
|
"loss": 0.2065, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.5538461538461537, |
|
"grad_norm": 1.2496421337127686, |
|
"learning_rate": 9.079525527612321e-07, |
|
"loss": 0.2107, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.6564102564102563, |
|
"grad_norm": 1.200203537940979, |
|
"learning_rate": 4.964433345219354e-07, |
|
"loss": 0.2083, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.758974358974359, |
|
"grad_norm": 1.2024612426757812, |
|
"learning_rate": 2.0617081185259512e-07, |
|
"loss": 0.2111, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.8615384615384616, |
|
"grad_norm": 1.1126112937927246, |
|
"learning_rate": 4.083798592444899e-08, |
|
"loss": 0.2157, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.943589743589744, |
|
"step": 144, |
|
"total_flos": 5.491122506196582e+16, |
|
"train_loss": 0.34712929568356937, |
|
"train_runtime": 1463.7078, |
|
"train_samples_per_second": 6.393, |
|
"train_steps_per_second": 0.098 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 144, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.491122506196582e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|