kostiantynk's picture
Training in progress, step 200, checkpoint
b1fd0a6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.098015192354815,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000490075961774075,
"eval_loss": NaN,
"eval_runtime": 20.257,
"eval_samples_per_second": 21.227,
"eval_steps_per_second": 10.614,
"step": 1
},
{
"epoch": 0.00490075961774075,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.0098015192354815,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.014702278853222249,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.019603038470963,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.02450379808870375,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.02450379808870375,
"eval_loss": NaN,
"eval_runtime": 19.2564,
"eval_samples_per_second": 22.33,
"eval_steps_per_second": 11.165,
"step": 50
},
{
"epoch": 0.029404557706444498,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.03430531732418525,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.1023,
"step": 70
},
{
"epoch": 0.039206076941926,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 2.9289,
"step": 80
},
{
"epoch": 0.04410683655966675,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.0490075961774075,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.0490075961774075,
"eval_loss": NaN,
"eval_runtime": 19.2916,
"eval_samples_per_second": 22.29,
"eval_steps_per_second": 11.145,
"step": 100
},
{
"epoch": 0.05390835579514825,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.058809115412888996,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.06370987503062975,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.0686106346483705,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.07351139426611125,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.07351139426611125,
"eval_loss": NaN,
"eval_runtime": 19.2392,
"eval_samples_per_second": 22.35,
"eval_steps_per_second": 11.175,
"step": 150
},
{
"epoch": 0.078412153883852,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.08331291350159274,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 170
},
{
"epoch": 0.0882136731193335,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.09311443273707425,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 190
},
{
"epoch": 0.098015192354815,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 200
},
{
"epoch": 0.098015192354815,
"eval_loss": NaN,
"eval_runtime": 19.2339,
"eval_samples_per_second": 22.356,
"eval_steps_per_second": 11.178,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.659940467684147e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}