|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.001886080724254998, |
|
"eval_steps": 3, |
|
"global_step": 10, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0001886080724254998, |
|
"grad_norm": 11.620072364807129, |
|
"learning_rate": 2e-05, |
|
"loss": 3.832, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001886080724254998, |
|
"eval_loss": 1.0258384943008423, |
|
"eval_runtime": 72.4949, |
|
"eval_samples_per_second": 30.802, |
|
"eval_steps_per_second": 15.408, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003772161448509996, |
|
"grad_norm": 9.528853416442871, |
|
"learning_rate": 4e-05, |
|
"loss": 3.3093, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005658242172764995, |
|
"grad_norm": 9.460070610046387, |
|
"learning_rate": 6e-05, |
|
"loss": 3.2833, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005658242172764995, |
|
"eval_loss": 1.0034009218215942, |
|
"eval_runtime": 70.92, |
|
"eval_samples_per_second": 31.486, |
|
"eval_steps_per_second": 15.75, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0007544322897019992, |
|
"grad_norm": 9.758016586303711, |
|
"learning_rate": 8e-05, |
|
"loss": 3.4399, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.000943040362127499, |
|
"grad_norm": 19.836227416992188, |
|
"learning_rate": 0.0001, |
|
"loss": 4.7794, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.001131648434552999, |
|
"grad_norm": 12.299468040466309, |
|
"learning_rate": 0.00012, |
|
"loss": 3.0613, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001131648434552999, |
|
"eval_loss": 0.9074329733848572, |
|
"eval_runtime": 68.176, |
|
"eval_samples_per_second": 32.753, |
|
"eval_steps_per_second": 16.384, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0013202565069784986, |
|
"grad_norm": 18.84109115600586, |
|
"learning_rate": 0.00014, |
|
"loss": 4.0897, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0015088645794039985, |
|
"grad_norm": 12.41354751586914, |
|
"learning_rate": 0.00016, |
|
"loss": 2.6245, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0016974726518294984, |
|
"grad_norm": 9.773690223693848, |
|
"learning_rate": 0.00018, |
|
"loss": 2.8458, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016974726518294984, |
|
"eval_loss": 0.7293293476104736, |
|
"eval_runtime": 70.9246, |
|
"eval_samples_per_second": 31.484, |
|
"eval_steps_per_second": 15.749, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.001886080724254998, |
|
"grad_norm": 13.274161338806152, |
|
"learning_rate": 0.0002, |
|
"loss": 2.9545, |
|
"step": 10 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 10, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 3, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1752671799214080.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|