nadejdatarabukina's picture
Training in progress, step 30, checkpoint
05b1d96 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01905366783105748,
"eval_steps": 8,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006351222610352493,
"eval_loss": 11.5,
"eval_runtime": 6.9997,
"eval_samples_per_second": 94.719,
"eval_steps_per_second": 47.431,
"step": 1
},
{
"epoch": 0.001905366783105748,
"grad_norm": 2.719556141528301e-05,
"learning_rate": 0.00012,
"loss": 46.0,
"step": 3
},
{
"epoch": 0.003810733566211496,
"grad_norm": 2.3012857127469033e-05,
"learning_rate": 0.0001992114701314478,
"loss": 46.0,
"step": 6
},
{
"epoch": 0.0050809780882819944,
"eval_loss": 11.5,
"eval_runtime": 7.2166,
"eval_samples_per_second": 91.872,
"eval_steps_per_second": 46.005,
"step": 8
},
{
"epoch": 0.0057161003493172435,
"grad_norm": 3.317999289720319e-05,
"learning_rate": 0.00018763066800438636,
"loss": 46.0,
"step": 9
},
{
"epoch": 0.007621467132422992,
"grad_norm": 2.1859506887267344e-05,
"learning_rate": 0.000163742398974869,
"loss": 46.0,
"step": 12
},
{
"epoch": 0.00952683391552874,
"grad_norm": 3.495635974104516e-05,
"learning_rate": 0.00013090169943749476,
"loss": 46.0,
"step": 15
},
{
"epoch": 0.010161956176563989,
"eval_loss": 11.5,
"eval_runtime": 7.2877,
"eval_samples_per_second": 90.975,
"eval_steps_per_second": 45.556,
"step": 16
},
{
"epoch": 0.011432200698634487,
"grad_norm": 5.0297352572670206e-05,
"learning_rate": 9.372094804706867e-05,
"loss": 46.0,
"step": 18
},
{
"epoch": 0.013337567481740234,
"grad_norm": 4.19372008764185e-05,
"learning_rate": 5.7422070843492734e-05,
"loss": 46.0,
"step": 21
},
{
"epoch": 0.015242934264845983,
"grad_norm": 3.232848030165769e-05,
"learning_rate": 2.7103137257858868e-05,
"loss": 46.0,
"step": 24
},
{
"epoch": 0.015242934264845983,
"eval_loss": 11.5,
"eval_runtime": 7.0877,
"eval_samples_per_second": 93.542,
"eval_steps_per_second": 46.842,
"step": 24
},
{
"epoch": 0.01714830104795173,
"grad_norm": 4.935485776513815e-05,
"learning_rate": 7.022351411174866e-06,
"loss": 46.0,
"step": 27
},
{
"epoch": 0.01905366783105748,
"grad_norm": 2.9111788535374217e-05,
"learning_rate": 0.0,
"loss": 46.0,
"step": 30
}
],
"logging_steps": 3,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1196882657280.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}