oldiday's picture
Training in progress, step 45, checkpoint
d2841a2 verified
raw
history blame
4.62 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01576320168140818,
"eval_steps": 9,
"global_step": 45,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003502933706979595,
"eval_loss": 1.2642643451690674,
"eval_runtime": 170.7468,
"eval_samples_per_second": 28.159,
"eval_steps_per_second": 3.52,
"step": 1
},
{
"epoch": 0.0010508801120938786,
"grad_norm": 0.26421603560447693,
"learning_rate": 3e-05,
"loss": 1.2586,
"step": 3
},
{
"epoch": 0.002101760224187757,
"grad_norm": 0.2573237419128418,
"learning_rate": 6e-05,
"loss": 1.2507,
"step": 6
},
{
"epoch": 0.0031526403362816357,
"grad_norm": 0.23207610845565796,
"learning_rate": 9e-05,
"loss": 1.2601,
"step": 9
},
{
"epoch": 0.0031526403362816357,
"eval_loss": 1.229485034942627,
"eval_runtime": 172.5126,
"eval_samples_per_second": 27.87,
"eval_steps_per_second": 3.484,
"step": 9
},
{
"epoch": 0.004203520448375514,
"grad_norm": 0.2796209156513214,
"learning_rate": 9.987820251299122e-05,
"loss": 1.2384,
"step": 12
},
{
"epoch": 0.005254400560469393,
"grad_norm": 0.28329601883888245,
"learning_rate": 9.924038765061042e-05,
"loss": 1.2393,
"step": 15
},
{
"epoch": 0.0063052806725632715,
"grad_norm": 0.22264641523361206,
"learning_rate": 9.806308479691595e-05,
"loss": 1.1474,
"step": 18
},
{
"epoch": 0.0063052806725632715,
"eval_loss": 1.1840261220932007,
"eval_runtime": 172.502,
"eval_samples_per_second": 27.872,
"eval_steps_per_second": 3.484,
"step": 18
},
{
"epoch": 0.0073561607846571505,
"grad_norm": 0.23280549049377441,
"learning_rate": 9.635919272833938e-05,
"loss": 1.1913,
"step": 21
},
{
"epoch": 0.008407040896751029,
"grad_norm": 0.2081017643213272,
"learning_rate": 9.414737964294636e-05,
"loss": 1.1646,
"step": 24
},
{
"epoch": 0.009457921008844907,
"grad_norm": 0.22857177257537842,
"learning_rate": 9.145187862775209e-05,
"loss": 1.1644,
"step": 27
},
{
"epoch": 0.009457921008844907,
"eval_loss": 1.1528327465057373,
"eval_runtime": 172.5822,
"eval_samples_per_second": 27.859,
"eval_steps_per_second": 3.482,
"step": 27
},
{
"epoch": 0.010508801120938787,
"grad_norm": 0.21918490529060364,
"learning_rate": 8.83022221559489e-05,
"loss": 1.1566,
"step": 30
},
{
"epoch": 0.011559681233032665,
"grad_norm": 0.21285656094551086,
"learning_rate": 8.473291852294987e-05,
"loss": 1.1424,
"step": 33
},
{
"epoch": 0.012610561345126543,
"grad_norm": 0.2332816869020462,
"learning_rate": 8.07830737662829e-05,
"loss": 1.1502,
"step": 36
},
{
"epoch": 0.012610561345126543,
"eval_loss": 1.129488468170166,
"eval_runtime": 172.61,
"eval_samples_per_second": 27.855,
"eval_steps_per_second": 3.482,
"step": 36
},
{
"epoch": 0.013661441457220423,
"grad_norm": 0.23356926441192627,
"learning_rate": 7.649596321166024e-05,
"loss": 1.1295,
"step": 39
},
{
"epoch": 0.014712321569314301,
"grad_norm": 0.21442051231861115,
"learning_rate": 7.191855733945387e-05,
"loss": 1.1046,
"step": 42
},
{
"epoch": 0.01576320168140818,
"grad_norm": 0.23844681680202484,
"learning_rate": 6.710100716628344e-05,
"loss": 1.0832,
"step": 45
},
{
"epoch": 0.01576320168140818,
"eval_loss": 1.1117569208145142,
"eval_runtime": 172.5645,
"eval_samples_per_second": 27.862,
"eval_steps_per_second": 3.483,
"step": 45
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.852863789662208e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}