Nexspear's picture
Training in progress, step 45, checkpoint
35c9ac1 verified
raw
history blame
4.61 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0316288877174486,
"eval_steps": 9,
"global_step": 45,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007028641714988579,
"eval_loss": 1.1621719598770142,
"eval_runtime": 333.2956,
"eval_samples_per_second": 7.192,
"eval_steps_per_second": 0.9,
"step": 1
},
{
"epoch": 0.0021085925144965737,
"grad_norm": 0.7200496196746826,
"learning_rate": 1.5e-05,
"loss": 1.1481,
"step": 3
},
{
"epoch": 0.004217185028993147,
"grad_norm": 0.4291810393333435,
"learning_rate": 3e-05,
"loss": 1.1496,
"step": 6
},
{
"epoch": 0.0063257775434897206,
"grad_norm": 0.6174260377883911,
"learning_rate": 4.5e-05,
"loss": 1.1159,
"step": 9
},
{
"epoch": 0.0063257775434897206,
"eval_loss": 1.077151894569397,
"eval_runtime": 336.606,
"eval_samples_per_second": 7.121,
"eval_steps_per_second": 0.891,
"step": 9
},
{
"epoch": 0.008434370057986295,
"grad_norm": 0.5994438529014587,
"learning_rate": 4.993910125649561e-05,
"loss": 1.0264,
"step": 12
},
{
"epoch": 0.010542962572482868,
"grad_norm": 0.25993818044662476,
"learning_rate": 4.962019382530521e-05,
"loss": 0.8913,
"step": 15
},
{
"epoch": 0.012651555086979441,
"grad_norm": 0.2148323357105255,
"learning_rate": 4.9031542398457974e-05,
"loss": 0.9068,
"step": 18
},
{
"epoch": 0.012651555086979441,
"eval_loss": 0.8854690194129944,
"eval_runtime": 336.5338,
"eval_samples_per_second": 7.123,
"eval_steps_per_second": 0.891,
"step": 18
},
{
"epoch": 0.014760147601476014,
"grad_norm": 0.2650648057460785,
"learning_rate": 4.817959636416969e-05,
"loss": 0.8897,
"step": 21
},
{
"epoch": 0.01686874011597259,
"grad_norm": 0.2767353355884552,
"learning_rate": 4.707368982147318e-05,
"loss": 0.805,
"step": 24
},
{
"epoch": 0.018977332630469163,
"grad_norm": 0.21788977086544037,
"learning_rate": 4.572593931387604e-05,
"loss": 0.8363,
"step": 27
},
{
"epoch": 0.018977332630469163,
"eval_loss": 0.8280394673347473,
"eval_runtime": 336.3295,
"eval_samples_per_second": 7.127,
"eval_steps_per_second": 0.892,
"step": 27
},
{
"epoch": 0.021085925144965736,
"grad_norm": 0.19669905304908752,
"learning_rate": 4.415111107797445e-05,
"loss": 0.8562,
"step": 30
},
{
"epoch": 0.02319451765946231,
"grad_norm": 0.16489790380001068,
"learning_rate": 4.2366459261474933e-05,
"loss": 0.7713,
"step": 33
},
{
"epoch": 0.025303110173958882,
"grad_norm": 0.18417863547801971,
"learning_rate": 4.039153688314145e-05,
"loss": 0.7589,
"step": 36
},
{
"epoch": 0.025303110173958882,
"eval_loss": 0.7888206243515015,
"eval_runtime": 336.3758,
"eval_samples_per_second": 7.126,
"eval_steps_per_second": 0.892,
"step": 36
},
{
"epoch": 0.027411702688455455,
"grad_norm": 0.1926659792661667,
"learning_rate": 3.824798160583012e-05,
"loss": 0.7353,
"step": 39
},
{
"epoch": 0.02952029520295203,
"grad_norm": 0.20542505383491516,
"learning_rate": 3.5959278669726935e-05,
"loss": 0.8628,
"step": 42
},
{
"epoch": 0.0316288877174486,
"grad_norm": 0.1803169548511505,
"learning_rate": 3.355050358314172e-05,
"loss": 0.8016,
"step": 45
},
{
"epoch": 0.0316288877174486,
"eval_loss": 0.7604277729988098,
"eval_runtime": 336.9862,
"eval_samples_per_second": 7.113,
"eval_steps_per_second": 0.89,
"step": 45
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.189969410392064e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}