leixa's picture
Training in progress, step 52, checkpoint
6cc5386 verified
raw
history blame
4.73 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05877366487708392,
"eval_steps": 13,
"global_step": 52,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011302627860977678,
"eval_loss": 0.7253943681716919,
"eval_runtime": 222.5674,
"eval_samples_per_second": 6.695,
"eval_steps_per_second": 0.84,
"step": 1
},
{
"epoch": 0.003390788358293303,
"grad_norm": 6.4600725173950195,
"learning_rate": 1.5e-05,
"loss": 2.9057,
"step": 3
},
{
"epoch": 0.006781576716586606,
"grad_norm": 4.287564277648926,
"learning_rate": 3e-05,
"loss": 2.7228,
"step": 6
},
{
"epoch": 0.010172365074879909,
"grad_norm": 4.434852600097656,
"learning_rate": 4.5e-05,
"loss": 2.2412,
"step": 9
},
{
"epoch": 0.013563153433173212,
"grad_norm": 4.096804618835449,
"learning_rate": 4.997482666353287e-05,
"loss": 1.6976,
"step": 12
},
{
"epoch": 0.01469341621927098,
"eval_loss": 0.281572163105011,
"eval_runtime": 224.6256,
"eval_samples_per_second": 6.633,
"eval_steps_per_second": 0.832,
"step": 13
},
{
"epoch": 0.016953941791466517,
"grad_norm": 3.934027910232544,
"learning_rate": 4.984280524733107e-05,
"loss": 1.2107,
"step": 15
},
{
"epoch": 0.020344730149759818,
"grad_norm": 2.3636763095855713,
"learning_rate": 4.959823971496574e-05,
"loss": 0.8397,
"step": 18
},
{
"epoch": 0.02373551850805312,
"grad_norm": 1.3671884536743164,
"learning_rate": 4.9242238009417175e-05,
"loss": 0.7578,
"step": 21
},
{
"epoch": 0.027126306866346424,
"grad_norm": 1.3218061923980713,
"learning_rate": 4.877641290737884e-05,
"loss": 0.6645,
"step": 24
},
{
"epoch": 0.02938683243854196,
"eval_loss": 0.16004851460456848,
"eval_runtime": 224.8305,
"eval_samples_per_second": 6.627,
"eval_steps_per_second": 0.832,
"step": 26
},
{
"epoch": 0.030517095224639728,
"grad_norm": 1.757283091545105,
"learning_rate": 4.820287471297598e-05,
"loss": 0.6531,
"step": 27
},
{
"epoch": 0.033907883582933035,
"grad_norm": 1.2630192041397095,
"learning_rate": 4.752422169756048e-05,
"loss": 0.616,
"step": 30
},
{
"epoch": 0.03729867194122634,
"grad_norm": 1.2325754165649414,
"learning_rate": 4.674352832889239e-05,
"loss": 0.6141,
"step": 33
},
{
"epoch": 0.040689460299519635,
"grad_norm": 1.5199618339538574,
"learning_rate": 4.586433134303257e-05,
"loss": 0.6026,
"step": 36
},
{
"epoch": 0.04408024865781294,
"grad_norm": 1.2060375213623047,
"learning_rate": 4.489061372204453e-05,
"loss": 0.6114,
"step": 39
},
{
"epoch": 0.04408024865781294,
"eval_loss": 0.14285726845264435,
"eval_runtime": 224.8051,
"eval_samples_per_second": 6.628,
"eval_steps_per_second": 0.832,
"step": 39
},
{
"epoch": 0.04747103701610624,
"grad_norm": 1.2959660291671753,
"learning_rate": 4.382678665009028e-05,
"loss": 0.576,
"step": 42
},
{
"epoch": 0.050861825374399545,
"grad_norm": 1.1517972946166992,
"learning_rate": 4.267766952966369e-05,
"loss": 0.5597,
"step": 45
},
{
"epoch": 0.05425261373269285,
"grad_norm": 1.2231593132019043,
"learning_rate": 4.144846814849282e-05,
"loss": 0.5803,
"step": 48
},
{
"epoch": 0.05764340209098615,
"grad_norm": 1.1372201442718506,
"learning_rate": 4.01447510960205e-05,
"loss": 0.5299,
"step": 51
},
{
"epoch": 0.05877366487708392,
"eval_loss": 0.13713335990905762,
"eval_runtime": 224.8683,
"eval_samples_per_second": 6.626,
"eval_steps_per_second": 0.832,
"step": 52
}
],
"logging_steps": 3,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.462511032294441e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}