revusyi's picture
Training in progress, step 50, checkpoint
b362476 verified
{
"best_metric": 1.156433343887329,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.078125,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015625,
"grad_norm": 1.002217173576355,
"learning_rate": 0.0001,
"loss": 6.6339,
"step": 1
},
{
"epoch": 0.0015625,
"eval_loss": 1.3499351739883423,
"eval_runtime": 68.1466,
"eval_samples_per_second": 3.962,
"eval_steps_per_second": 1.981,
"step": 1
},
{
"epoch": 0.003125,
"grad_norm": 1.0105056762695312,
"learning_rate": 0.0002,
"loss": 3.3938,
"step": 2
},
{
"epoch": 0.0046875,
"grad_norm": 1.1939878463745117,
"learning_rate": 0.00019978589232386035,
"loss": 5.1615,
"step": 3
},
{
"epoch": 0.00625,
"grad_norm": 1.1158249378204346,
"learning_rate": 0.00019914448613738106,
"loss": 3.7772,
"step": 4
},
{
"epoch": 0.0078125,
"grad_norm": 1.0754586458206177,
"learning_rate": 0.00019807852804032305,
"loss": 3.8606,
"step": 5
},
{
"epoch": 0.009375,
"grad_norm": 1.1657280921936035,
"learning_rate": 0.00019659258262890683,
"loss": 3.4493,
"step": 6
},
{
"epoch": 0.0109375,
"grad_norm": 1.3628090620040894,
"learning_rate": 0.0001946930129495106,
"loss": 4.7123,
"step": 7
},
{
"epoch": 0.0125,
"grad_norm": 1.3325765132904053,
"learning_rate": 0.0001923879532511287,
"loss": 4.0295,
"step": 8
},
{
"epoch": 0.0140625,
"grad_norm": 1.3623121976852417,
"learning_rate": 0.00018968727415326884,
"loss": 4.2144,
"step": 9
},
{
"epoch": 0.015625,
"grad_norm": 1.3872919082641602,
"learning_rate": 0.00018660254037844388,
"loss": 4.359,
"step": 10
},
{
"epoch": 0.0171875,
"grad_norm": 1.6150164604187012,
"learning_rate": 0.00018314696123025454,
"loss": 4.2599,
"step": 11
},
{
"epoch": 0.01875,
"grad_norm": 1.5497463941574097,
"learning_rate": 0.00017933533402912354,
"loss": 4.1864,
"step": 12
},
{
"epoch": 0.0203125,
"grad_norm": 1.6500900983810425,
"learning_rate": 0.00017518398074789775,
"loss": 4.2355,
"step": 13
},
{
"epoch": 0.021875,
"grad_norm": 1.6816496849060059,
"learning_rate": 0.00017071067811865476,
"loss": 4.3307,
"step": 14
},
{
"epoch": 0.0234375,
"grad_norm": 1.6419248580932617,
"learning_rate": 0.00016593458151000688,
"loss": 3.7119,
"step": 15
},
{
"epoch": 0.025,
"grad_norm": 1.6070975065231323,
"learning_rate": 0.00016087614290087208,
"loss": 4.3371,
"step": 16
},
{
"epoch": 0.0265625,
"grad_norm": 1.6957802772521973,
"learning_rate": 0.00015555702330196023,
"loss": 4.449,
"step": 17
},
{
"epoch": 0.028125,
"grad_norm": 1.9621312618255615,
"learning_rate": 0.00015000000000000001,
"loss": 4.1207,
"step": 18
},
{
"epoch": 0.0296875,
"grad_norm": 2.0953187942504883,
"learning_rate": 0.00014422886902190014,
"loss": 5.4249,
"step": 19
},
{
"epoch": 0.03125,
"grad_norm": 1.8811029195785522,
"learning_rate": 0.000138268343236509,
"loss": 4.2354,
"step": 20
},
{
"epoch": 0.0328125,
"grad_norm": 1.602460503578186,
"learning_rate": 0.00013214394653031616,
"loss": 3.5258,
"step": 21
},
{
"epoch": 0.034375,
"grad_norm": 1.9910820722579956,
"learning_rate": 0.00012588190451025207,
"loss": 4.9889,
"step": 22
},
{
"epoch": 0.0359375,
"grad_norm": 1.5707694292068481,
"learning_rate": 0.00011950903220161285,
"loss": 3.8021,
"step": 23
},
{
"epoch": 0.0375,
"grad_norm": 2.191178560256958,
"learning_rate": 0.00011305261922200519,
"loss": 5.185,
"step": 24
},
{
"epoch": 0.0390625,
"grad_norm": 1.939286470413208,
"learning_rate": 0.00010654031292301432,
"loss": 3.4836,
"step": 25
},
{
"epoch": 0.0390625,
"eval_loss": 1.196044921875,
"eval_runtime": 68.9157,
"eval_samples_per_second": 3.918,
"eval_steps_per_second": 1.959,
"step": 25
},
{
"epoch": 0.040625,
"grad_norm": 2.3794100284576416,
"learning_rate": 0.0001,
"loss": 5.3229,
"step": 26
},
{
"epoch": 0.0421875,
"grad_norm": 2.2176098823547363,
"learning_rate": 9.345968707698569e-05,
"loss": 5.1244,
"step": 27
},
{
"epoch": 0.04375,
"grad_norm": 2.5599007606506348,
"learning_rate": 8.694738077799488e-05,
"loss": 4.9215,
"step": 28
},
{
"epoch": 0.0453125,
"grad_norm": 2.7694389820098877,
"learning_rate": 8.049096779838719e-05,
"loss": 5.3148,
"step": 29
},
{
"epoch": 0.046875,
"grad_norm": 2.875401258468628,
"learning_rate": 7.411809548974792e-05,
"loss": 5.9734,
"step": 30
},
{
"epoch": 0.0484375,
"grad_norm": 2.8313703536987305,
"learning_rate": 6.785605346968386e-05,
"loss": 5.1775,
"step": 31
},
{
"epoch": 0.05,
"grad_norm": 2.8203907012939453,
"learning_rate": 6.173165676349103e-05,
"loss": 5.5451,
"step": 32
},
{
"epoch": 0.0515625,
"grad_norm": 2.829041004180908,
"learning_rate": 5.577113097809989e-05,
"loss": 5.8053,
"step": 33
},
{
"epoch": 0.053125,
"grad_norm": 2.9791057109832764,
"learning_rate": 5.000000000000002e-05,
"loss": 4.9274,
"step": 34
},
{
"epoch": 0.0546875,
"grad_norm": 3.606865406036377,
"learning_rate": 4.444297669803981e-05,
"loss": 5.321,
"step": 35
},
{
"epoch": 0.05625,
"grad_norm": 3.3906495571136475,
"learning_rate": 3.9123857099127936e-05,
"loss": 4.4049,
"step": 36
},
{
"epoch": 0.0578125,
"grad_norm": 4.231806755065918,
"learning_rate": 3.406541848999312e-05,
"loss": 5.6204,
"step": 37
},
{
"epoch": 0.059375,
"grad_norm": 5.441033363342285,
"learning_rate": 2.9289321881345254e-05,
"loss": 4.8918,
"step": 38
},
{
"epoch": 0.0609375,
"grad_norm": 3.8367645740509033,
"learning_rate": 2.4816019252102273e-05,
"loss": 5.9264,
"step": 39
},
{
"epoch": 0.0625,
"grad_norm": 3.5314066410064697,
"learning_rate": 2.0664665970876496e-05,
"loss": 4.2635,
"step": 40
},
{
"epoch": 0.0640625,
"grad_norm": 4.272062301635742,
"learning_rate": 1.6853038769745467e-05,
"loss": 4.6596,
"step": 41
},
{
"epoch": 0.065625,
"grad_norm": 3.7861499786376953,
"learning_rate": 1.339745962155613e-05,
"loss": 4.0068,
"step": 42
},
{
"epoch": 0.0671875,
"grad_norm": 4.380683422088623,
"learning_rate": 1.0312725846731175e-05,
"loss": 5.556,
"step": 43
},
{
"epoch": 0.06875,
"grad_norm": 5.555474281311035,
"learning_rate": 7.612046748871327e-06,
"loss": 5.1522,
"step": 44
},
{
"epoch": 0.0703125,
"grad_norm": 7.23276424407959,
"learning_rate": 5.306987050489442e-06,
"loss": 5.7147,
"step": 45
},
{
"epoch": 0.071875,
"grad_norm": 5.7143120765686035,
"learning_rate": 3.40741737109318e-06,
"loss": 6.1734,
"step": 46
},
{
"epoch": 0.0734375,
"grad_norm": 8.563957214355469,
"learning_rate": 1.921471959676957e-06,
"loss": 4.2151,
"step": 47
},
{
"epoch": 0.075,
"grad_norm": 6.835947036743164,
"learning_rate": 8.555138626189618e-07,
"loss": 6.057,
"step": 48
},
{
"epoch": 0.0765625,
"grad_norm": 8.4927978515625,
"learning_rate": 2.141076761396521e-07,
"loss": 6.0049,
"step": 49
},
{
"epoch": 0.078125,
"grad_norm": 13.51700210571289,
"learning_rate": 0.0,
"loss": 8.1939,
"step": 50
},
{
"epoch": 0.078125,
"eval_loss": 1.156433343887329,
"eval_runtime": 68.938,
"eval_samples_per_second": 3.917,
"eval_steps_per_second": 1.958,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.53458201903104e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}