ardaspear's picture
Training in progress, step 40, checkpoint
55e868a verified
raw
history blame
4.89 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05884516366311144,
"eval_steps": 5,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001471129091577786,
"eval_loss": 2.445326089859009,
"eval_runtime": 103.0161,
"eval_samples_per_second": 11.115,
"eval_steps_per_second": 1.398,
"step": 1
},
{
"epoch": 0.004413387274733358,
"grad_norm": 1.040910005569458,
"learning_rate": 1.5e-05,
"loss": 9.5736,
"step": 3
},
{
"epoch": 0.00735564545788893,
"eval_loss": 2.441359758377075,
"eval_runtime": 103.7991,
"eval_samples_per_second": 11.031,
"eval_steps_per_second": 1.387,
"step": 5
},
{
"epoch": 0.008826774549466716,
"grad_norm": 1.0511504411697388,
"learning_rate": 3e-05,
"loss": 9.7365,
"step": 6
},
{
"epoch": 0.013240161824200073,
"grad_norm": 1.3530985116958618,
"learning_rate": 4.5e-05,
"loss": 9.7514,
"step": 9
},
{
"epoch": 0.01471129091577786,
"eval_loss": 2.3896217346191406,
"eval_runtime": 103.8136,
"eval_samples_per_second": 11.029,
"eval_steps_per_second": 1.387,
"step": 10
},
{
"epoch": 0.017653549098933432,
"grad_norm": 1.658168077468872,
"learning_rate": 4.9692208514878444e-05,
"loss": 9.3972,
"step": 12
},
{
"epoch": 0.02206693637366679,
"grad_norm": 1.7478517293930054,
"learning_rate": 4.8096988312782174e-05,
"loss": 9.1795,
"step": 15
},
{
"epoch": 0.02206693637366679,
"eval_loss": 2.2163238525390625,
"eval_runtime": 103.8055,
"eval_samples_per_second": 11.03,
"eval_steps_per_second": 1.387,
"step": 15
},
{
"epoch": 0.026480323648400146,
"grad_norm": 1.6007380485534668,
"learning_rate": 4.522542485937369e-05,
"loss": 8.6175,
"step": 18
},
{
"epoch": 0.02942258183155572,
"eval_loss": 2.0439629554748535,
"eval_runtime": 103.8112,
"eval_samples_per_second": 11.03,
"eval_steps_per_second": 1.387,
"step": 20
},
{
"epoch": 0.030893710923133505,
"grad_norm": 1.5845534801483154,
"learning_rate": 4.123620120825459e-05,
"loss": 8.3277,
"step": 21
},
{
"epoch": 0.035307098197866864,
"grad_norm": 1.4348173141479492,
"learning_rate": 3.634976249348867e-05,
"loss": 7.9879,
"step": 24
},
{
"epoch": 0.036778227289444645,
"eval_loss": 1.9180830717086792,
"eval_runtime": 103.7801,
"eval_samples_per_second": 11.033,
"eval_steps_per_second": 1.388,
"step": 25
},
{
"epoch": 0.03972048547260022,
"grad_norm": 1.5030665397644043,
"learning_rate": 3.083613409639764e-05,
"loss": 7.6862,
"step": 27
},
{
"epoch": 0.04413387274733358,
"grad_norm": 1.541954517364502,
"learning_rate": 2.5e-05,
"loss": 7.2887,
"step": 30
},
{
"epoch": 0.04413387274733358,
"eval_loss": 1.839240550994873,
"eval_runtime": 103.7652,
"eval_samples_per_second": 11.035,
"eval_steps_per_second": 1.388,
"step": 30
},
{
"epoch": 0.04854726002206693,
"grad_norm": 1.4720203876495361,
"learning_rate": 1.9163865903602374e-05,
"loss": 7.1466,
"step": 33
},
{
"epoch": 0.05148951820522251,
"eval_loss": 1.7863552570343018,
"eval_runtime": 103.7831,
"eval_samples_per_second": 11.033,
"eval_steps_per_second": 1.388,
"step": 35
},
{
"epoch": 0.05296064729680029,
"grad_norm": 1.4572272300720215,
"learning_rate": 1.3650237506511331e-05,
"loss": 7.2036,
"step": 36
},
{
"epoch": 0.05737403457153365,
"grad_norm": 1.4571818113327026,
"learning_rate": 8.763798791745411e-06,
"loss": 6.8959,
"step": 39
},
{
"epoch": 0.05884516366311144,
"eval_loss": 1.7566184997558594,
"eval_runtime": 103.7914,
"eval_samples_per_second": 11.032,
"eval_steps_per_second": 1.387,
"step": 40
}
],
"logging_steps": 3,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.227717154832384e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}