eeeebbb2's picture
Training in progress, step 41, checkpoint
97c846b verified
{
"best_metric": 0.7650073170661926,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.1069767441860465,
"eval_steps": 25,
"global_step": 41,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07441860465116279,
"grad_norm": 26.82486915588379,
"learning_rate": 5e-05,
"loss": 15.5404,
"step": 1
},
{
"epoch": 0.07441860465116279,
"eval_loss": 1.1030958890914917,
"eval_runtime": 4.0595,
"eval_samples_per_second": 12.317,
"eval_steps_per_second": 3.202,
"step": 1
},
{
"epoch": 0.14883720930232558,
"grad_norm": 32.55805969238281,
"learning_rate": 0.0001,
"loss": 17.9188,
"step": 2
},
{
"epoch": 0.22325581395348837,
"grad_norm": 23.376680374145508,
"learning_rate": 9.985407886603945e-05,
"loss": 17.5531,
"step": 3
},
{
"epoch": 0.29767441860465116,
"grad_norm": 15.263604164123535,
"learning_rate": 9.941726181870608e-05,
"loss": 14.8715,
"step": 4
},
{
"epoch": 0.37209302325581395,
"grad_norm": 13.026528358459473,
"learning_rate": 9.869238178417235e-05,
"loss": 14.1927,
"step": 5
},
{
"epoch": 0.44651162790697674,
"grad_norm": 11.496667861938477,
"learning_rate": 9.768413988762156e-05,
"loss": 14.7444,
"step": 6
},
{
"epoch": 0.5209302325581395,
"grad_norm": 8.687058448791504,
"learning_rate": 9.639907496464709e-05,
"loss": 13.8784,
"step": 7
},
{
"epoch": 0.5953488372093023,
"grad_norm": 7.8401970863342285,
"learning_rate": 9.484552115439445e-05,
"loss": 13.6697,
"step": 8
},
{
"epoch": 0.6697674418604651,
"grad_norm": 7.2838263511657715,
"learning_rate": 9.303355384947076e-05,
"loss": 13.5577,
"step": 9
},
{
"epoch": 0.7441860465116279,
"grad_norm": 7.735969066619873,
"learning_rate": 9.097492435315756e-05,
"loss": 12.9802,
"step": 10
},
{
"epoch": 0.8186046511627907,
"grad_norm": 6.482202053070068,
"learning_rate": 8.868298366769954e-05,
"loss": 13.1186,
"step": 11
},
{
"epoch": 0.8930232558139535,
"grad_norm": 6.737044811248779,
"learning_rate": 8.617259590793198e-05,
"loss": 13.2019,
"step": 12
},
{
"epoch": 0.9674418604651163,
"grad_norm": 8.719466209411621,
"learning_rate": 8.346004190179198e-05,
"loss": 13.6937,
"step": 13
},
{
"epoch": 1.0604651162790697,
"grad_norm": 5.885293483734131,
"learning_rate": 8.056291360290201e-05,
"loss": 11.8573,
"step": 14
},
{
"epoch": 1.1348837209302325,
"grad_norm": 7.148478031158447,
"learning_rate": 7.75e-05,
"loss": 12.4117,
"step": 15
},
{
"epoch": 1.2093023255813953,
"grad_norm": 7.730895519256592,
"learning_rate": 7.429116526313744e-05,
"loss": 12.0587,
"step": 16
},
{
"epoch": 1.283720930232558,
"grad_norm": 7.085984230041504,
"learning_rate": 7.095721991691411e-05,
"loss": 11.7461,
"step": 17
},
{
"epoch": 1.3581395348837209,
"grad_norm": 6.469642639160156,
"learning_rate": 6.751978587624037e-05,
"loss": 11.8886,
"step": 18
},
{
"epoch": 1.4325581395348836,
"grad_norm": 6.971822738647461,
"learning_rate": 6.400115621992201e-05,
"loss": 11.9513,
"step": 19
},
{
"epoch": 1.5069767441860464,
"grad_norm": 6.892801284790039,
"learning_rate": 6.042415061148954e-05,
"loss": 11.5344,
"step": 20
},
{
"epoch": 1.5813953488372094,
"grad_norm": 5.958037853240967,
"learning_rate": 5.681196730492368e-05,
"loss": 11.6245,
"step": 21
},
{
"epoch": 1.655813953488372,
"grad_norm": 6.668700218200684,
"learning_rate": 5.318803269507634e-05,
"loss": 11.5543,
"step": 22
},
{
"epoch": 1.730232558139535,
"grad_norm": 7.723108291625977,
"learning_rate": 4.9575849388510473e-05,
"loss": 11.489,
"step": 23
},
{
"epoch": 1.8046511627906976,
"grad_norm": 5.082211494445801,
"learning_rate": 4.599884378007802e-05,
"loss": 11.2579,
"step": 24
},
{
"epoch": 1.8790697674418606,
"grad_norm": 6.550709247589111,
"learning_rate": 4.248021412375963e-05,
"loss": 11.8115,
"step": 25
},
{
"epoch": 1.8790697674418606,
"eval_loss": 0.7650073170661926,
"eval_runtime": 3.3463,
"eval_samples_per_second": 14.942,
"eval_steps_per_second": 3.885,
"step": 25
},
{
"epoch": 1.9534883720930232,
"grad_norm": 7.476520538330078,
"learning_rate": 3.904278008308589e-05,
"loss": 11.5912,
"step": 26
},
{
"epoch": 2.046511627906977,
"grad_norm": 5.527069091796875,
"learning_rate": 3.570883473686256e-05,
"loss": 11.2094,
"step": 27
},
{
"epoch": 2.1209302325581394,
"grad_norm": 5.50228214263916,
"learning_rate": 3.250000000000001e-05,
"loss": 11.0862,
"step": 28
},
{
"epoch": 2.1953488372093024,
"grad_norm": 6.5125651359558105,
"learning_rate": 2.9437086397097995e-05,
"loss": 10.4032,
"step": 29
},
{
"epoch": 2.269767441860465,
"grad_norm": 6.218883514404297,
"learning_rate": 2.6539958098208027e-05,
"loss": 10.4676,
"step": 30
},
{
"epoch": 2.344186046511628,
"grad_norm": 5.223501205444336,
"learning_rate": 2.3827404092068032e-05,
"loss": 10.3137,
"step": 31
},
{
"epoch": 2.4186046511627906,
"grad_norm": 6.159942150115967,
"learning_rate": 2.1317016332300447e-05,
"loss": 10.5853,
"step": 32
},
{
"epoch": 2.4930232558139536,
"grad_norm": 6.657078266143799,
"learning_rate": 1.902507564684246e-05,
"loss": 10.3025,
"step": 33
},
{
"epoch": 2.567441860465116,
"grad_norm": 5.459758758544922,
"learning_rate": 1.6966446150529244e-05,
"loss": 10.4436,
"step": 34
},
{
"epoch": 2.641860465116279,
"grad_norm": 6.186044692993164,
"learning_rate": 1.515447884560556e-05,
"loss": 10.5271,
"step": 35
},
{
"epoch": 2.7162790697674417,
"grad_norm": 7.22088098526001,
"learning_rate": 1.3600925035352913e-05,
"loss": 10.1892,
"step": 36
},
{
"epoch": 2.7906976744186047,
"grad_norm": 5.603003025054932,
"learning_rate": 1.2315860112378455e-05,
"loss": 10.7062,
"step": 37
},
{
"epoch": 2.8651162790697673,
"grad_norm": 5.911949157714844,
"learning_rate": 1.130761821582766e-05,
"loss": 10.5918,
"step": 38
},
{
"epoch": 2.9395348837209303,
"grad_norm": 6.643774032592773,
"learning_rate": 1.0582738181293923e-05,
"loss": 10.518,
"step": 39
},
{
"epoch": 3.0325581395348835,
"grad_norm": 5.792553424835205,
"learning_rate": 1.0145921133960554e-05,
"loss": 10.4178,
"step": 40
},
{
"epoch": 3.1069767441860465,
"grad_norm": 5.410275936126709,
"learning_rate": 1e-05,
"loss": 10.3721,
"step": 41
}
],
"logging_steps": 1,
"max_steps": 41,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.63958278020268e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}