dixedus's picture
Training in progress, step 72, checkpoint
92c9fad verified
raw
history blame
6.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8947368421052633,
"eval_steps": 9,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02631578947368421,
"eval_loss": 1.6184024810791016,
"eval_runtime": 4.386,
"eval_samples_per_second": 14.592,
"eval_steps_per_second": 1.824,
"step": 1
},
{
"epoch": 0.07894736842105263,
"grad_norm": 25.33377456665039,
"learning_rate": 3e-05,
"loss": 6.2534,
"step": 3
},
{
"epoch": 0.15789473684210525,
"grad_norm": 19.7912540435791,
"learning_rate": 6e-05,
"loss": 6.0633,
"step": 6
},
{
"epoch": 0.23684210526315788,
"grad_norm": 17.20012855529785,
"learning_rate": 9e-05,
"loss": 5.0168,
"step": 9
},
{
"epoch": 0.23684210526315788,
"eval_loss": 1.0376337766647339,
"eval_runtime": 4.4723,
"eval_samples_per_second": 14.31,
"eval_steps_per_second": 1.789,
"step": 9
},
{
"epoch": 0.3157894736842105,
"grad_norm": 11.844976425170898,
"learning_rate": 9.987820251299122e-05,
"loss": 4.0152,
"step": 12
},
{
"epoch": 0.39473684210526316,
"grad_norm": 10.39311695098877,
"learning_rate": 9.924038765061042e-05,
"loss": 3.859,
"step": 15
},
{
"epoch": 0.47368421052631576,
"grad_norm": 9.025694847106934,
"learning_rate": 9.806308479691595e-05,
"loss": 3.9375,
"step": 18
},
{
"epoch": 0.47368421052631576,
"eval_loss": 0.8340747356414795,
"eval_runtime": 4.4933,
"eval_samples_per_second": 14.244,
"eval_steps_per_second": 1.78,
"step": 18
},
{
"epoch": 0.5526315789473685,
"grad_norm": 8.11834716796875,
"learning_rate": 9.635919272833938e-05,
"loss": 3.2265,
"step": 21
},
{
"epoch": 0.631578947368421,
"grad_norm": 10.354762077331543,
"learning_rate": 9.414737964294636e-05,
"loss": 3.3718,
"step": 24
},
{
"epoch": 0.7105263157894737,
"grad_norm": 8.096390724182129,
"learning_rate": 9.145187862775209e-05,
"loss": 3.5513,
"step": 27
},
{
"epoch": 0.7105263157894737,
"eval_loss": 0.8040823340415955,
"eval_runtime": 4.4907,
"eval_samples_per_second": 14.252,
"eval_steps_per_second": 1.781,
"step": 27
},
{
"epoch": 0.7894736842105263,
"grad_norm": 8.748348236083984,
"learning_rate": 8.83022221559489e-05,
"loss": 3.7277,
"step": 30
},
{
"epoch": 0.868421052631579,
"grad_norm": 9.880663871765137,
"learning_rate": 8.473291852294987e-05,
"loss": 3.2257,
"step": 33
},
{
"epoch": 0.9473684210526315,
"grad_norm": 8.828300476074219,
"learning_rate": 8.07830737662829e-05,
"loss": 3.4785,
"step": 36
},
{
"epoch": 0.9473684210526315,
"eval_loss": 0.7874240279197693,
"eval_runtime": 4.495,
"eval_samples_per_second": 14.238,
"eval_steps_per_second": 1.78,
"step": 36
},
{
"epoch": 1.0263157894736843,
"grad_norm": 6.3108296394348145,
"learning_rate": 7.649596321166024e-05,
"loss": 3.1481,
"step": 39
},
{
"epoch": 1.1052631578947367,
"grad_norm": 8.543023109436035,
"learning_rate": 7.191855733945387e-05,
"loss": 2.5156,
"step": 42
},
{
"epoch": 1.1842105263157894,
"grad_norm": 6.542039394378662,
"learning_rate": 6.710100716628344e-05,
"loss": 2.4297,
"step": 45
},
{
"epoch": 1.1842105263157894,
"eval_loss": 0.7846142649650574,
"eval_runtime": 4.4938,
"eval_samples_per_second": 14.242,
"eval_steps_per_second": 1.78,
"step": 45
},
{
"epoch": 1.263157894736842,
"grad_norm": 7.43921422958374,
"learning_rate": 6.209609477998338e-05,
"loss": 2.4722,
"step": 48
},
{
"epoch": 1.3421052631578947,
"grad_norm": 8.37086296081543,
"learning_rate": 5.695865504800327e-05,
"loss": 2.6133,
"step": 51
},
{
"epoch": 1.4210526315789473,
"grad_norm": 8.553829193115234,
"learning_rate": 5.174497483512506e-05,
"loss": 2.6193,
"step": 54
},
{
"epoch": 1.4210526315789473,
"eval_loss": 0.7718786597251892,
"eval_runtime": 4.4967,
"eval_samples_per_second": 14.233,
"eval_steps_per_second": 1.779,
"step": 54
},
{
"epoch": 1.5,
"grad_norm": 6.831599712371826,
"learning_rate": 4.6512176312793736e-05,
"loss": 2.3717,
"step": 57
},
{
"epoch": 1.5789473684210527,
"grad_norm": 8.077478408813477,
"learning_rate": 4.131759111665349e-05,
"loss": 2.124,
"step": 60
},
{
"epoch": 1.6578947368421053,
"grad_norm": 7.879061222076416,
"learning_rate": 3.6218132209150045e-05,
"loss": 2.1771,
"step": 63
},
{
"epoch": 1.6578947368421053,
"eval_loss": 0.7753761410713196,
"eval_runtime": 4.4951,
"eval_samples_per_second": 14.238,
"eval_steps_per_second": 1.78,
"step": 63
},
{
"epoch": 1.736842105263158,
"grad_norm": 9.225750923156738,
"learning_rate": 3.12696703292044e-05,
"loss": 2.2392,
"step": 66
},
{
"epoch": 1.8157894736842106,
"grad_norm": 8.263725280761719,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.1842,
"step": 69
},
{
"epoch": 1.8947368421052633,
"grad_norm": 8.750575065612793,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.3606,
"step": 72
},
{
"epoch": 1.8947368421052633,
"eval_loss": 0.7717390060424805,
"eval_runtime": 4.4993,
"eval_samples_per_second": 14.225,
"eval_steps_per_second": 1.778,
"step": 72
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.009431442662359e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}