seblaku's picture
Training in progress, step 50, checkpoint
7252e9f verified
raw
history blame
9.97 kB
{
"best_metric": 1.7351869344711304,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.4784688995215311,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009569377990430622,
"grad_norm": 0.38082700967788696,
"learning_rate": 8.000000000000001e-06,
"loss": 1.6292,
"step": 1
},
{
"epoch": 0.009569377990430622,
"eval_loss": 1.831426739692688,
"eval_runtime": 5.8102,
"eval_samples_per_second": 30.464,
"eval_steps_per_second": 7.745,
"step": 1
},
{
"epoch": 0.019138755980861243,
"grad_norm": 0.3949052691459656,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.5218,
"step": 2
},
{
"epoch": 0.028708133971291867,
"grad_norm": 0.4035767614841461,
"learning_rate": 2.4e-05,
"loss": 1.5025,
"step": 3
},
{
"epoch": 0.03827751196172249,
"grad_norm": 0.41593310236930847,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5595,
"step": 4
},
{
"epoch": 0.04784688995215311,
"grad_norm": 0.40817272663116455,
"learning_rate": 4e-05,
"loss": 1.4547,
"step": 5
},
{
"epoch": 0.05741626794258373,
"grad_norm": 0.42838597297668457,
"learning_rate": 4.8e-05,
"loss": 1.6265,
"step": 6
},
{
"epoch": 0.06698564593301436,
"grad_norm": 0.45393508672714233,
"learning_rate": 5.6e-05,
"loss": 1.6049,
"step": 7
},
{
"epoch": 0.07655502392344497,
"grad_norm": 0.43397629261016846,
"learning_rate": 6.400000000000001e-05,
"loss": 1.5147,
"step": 8
},
{
"epoch": 0.0861244019138756,
"grad_norm": 0.45276832580566406,
"learning_rate": 7.2e-05,
"loss": 1.5187,
"step": 9
},
{
"epoch": 0.09569377990430622,
"grad_norm": 0.4829266369342804,
"learning_rate": 8e-05,
"loss": 1.5023,
"step": 10
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.5055172443389893,
"learning_rate": 7.999453219969877e-05,
"loss": 1.5217,
"step": 11
},
{
"epoch": 0.11483253588516747,
"grad_norm": 0.5195921659469604,
"learning_rate": 7.997813029363704e-05,
"loss": 1.4627,
"step": 12
},
{
"epoch": 0.12440191387559808,
"grad_norm": 0.5627723932266235,
"learning_rate": 7.99507987659322e-05,
"loss": 1.5943,
"step": 13
},
{
"epoch": 0.1339712918660287,
"grad_norm": 0.561619758605957,
"learning_rate": 7.991254508875098e-05,
"loss": 1.633,
"step": 14
},
{
"epoch": 0.14354066985645933,
"grad_norm": 0.5991867780685425,
"learning_rate": 7.98633797202668e-05,
"loss": 1.7317,
"step": 15
},
{
"epoch": 0.15311004784688995,
"grad_norm": 0.6589527130126953,
"learning_rate": 7.980331610180046e-05,
"loss": 1.7673,
"step": 16
},
{
"epoch": 0.16267942583732056,
"grad_norm": 0.6777735948562622,
"learning_rate": 7.973237065414553e-05,
"loss": 1.7703,
"step": 17
},
{
"epoch": 0.1722488038277512,
"grad_norm": 0.7253565192222595,
"learning_rate": 7.965056277307902e-05,
"loss": 1.887,
"step": 18
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.7007018327713013,
"learning_rate": 7.955791482405875e-05,
"loss": 1.8466,
"step": 19
},
{
"epoch": 0.19138755980861244,
"grad_norm": 0.7932828068733215,
"learning_rate": 7.94544521361089e-05,
"loss": 1.9501,
"step": 20
},
{
"epoch": 0.20095693779904306,
"grad_norm": 0.8723483085632324,
"learning_rate": 7.93402029948953e-05,
"loss": 1.8275,
"step": 21
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.9875766038894653,
"learning_rate": 7.921519863499239e-05,
"loss": 1.9756,
"step": 22
},
{
"epoch": 0.22009569377990432,
"grad_norm": 1.1991780996322632,
"learning_rate": 7.907947323134398e-05,
"loss": 2.0454,
"step": 23
},
{
"epoch": 0.22966507177033493,
"grad_norm": 1.3916829824447632,
"learning_rate": 7.893306388992023e-05,
"loss": 2.1839,
"step": 24
},
{
"epoch": 0.23923444976076555,
"grad_norm": 2.0087339878082275,
"learning_rate": 7.877601063757323e-05,
"loss": 1.8768,
"step": 25
},
{
"epoch": 0.24880382775119617,
"grad_norm": 3.617227554321289,
"learning_rate": 7.860835641109395e-05,
"loss": 2.2618,
"step": 26
},
{
"epoch": 0.2583732057416268,
"grad_norm": 0.3412221074104309,
"learning_rate": 7.843014704547393e-05,
"loss": 1.3234,
"step": 27
},
{
"epoch": 0.2679425837320574,
"grad_norm": 0.42941951751708984,
"learning_rate": 7.824143126137431e-05,
"loss": 1.4562,
"step": 28
},
{
"epoch": 0.27751196172248804,
"grad_norm": 0.44896551966667175,
"learning_rate": 7.804226065180615e-05,
"loss": 1.6191,
"step": 29
},
{
"epoch": 0.28708133971291866,
"grad_norm": 0.4686514437198639,
"learning_rate": 7.783268966802539e-05,
"loss": 1.5335,
"step": 30
},
{
"epoch": 0.2966507177033493,
"grad_norm": 0.41287559270858765,
"learning_rate": 7.761277560464645e-05,
"loss": 1.5145,
"step": 31
},
{
"epoch": 0.3062200956937799,
"grad_norm": 0.3899138271808624,
"learning_rate": 7.738257858397844e-05,
"loss": 1.5435,
"step": 32
},
{
"epoch": 0.3157894736842105,
"grad_norm": 0.38428395986557007,
"learning_rate": 7.71421615395883e-05,
"loss": 1.4655,
"step": 33
},
{
"epoch": 0.3253588516746411,
"grad_norm": 0.4115948975086212,
"learning_rate": 7.68915901990954e-05,
"loss": 1.4021,
"step": 34
},
{
"epoch": 0.3349282296650718,
"grad_norm": 0.428444504737854,
"learning_rate": 7.663093306620231e-05,
"loss": 1.5792,
"step": 35
},
{
"epoch": 0.3444976076555024,
"grad_norm": 0.4278987646102905,
"learning_rate": 7.636026140196651e-05,
"loss": 1.4603,
"step": 36
},
{
"epoch": 0.35406698564593303,
"grad_norm": 0.45422014594078064,
"learning_rate": 7.607964920531837e-05,
"loss": 1.5074,
"step": 37
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.4825778007507324,
"learning_rate": 7.578917319283055e-05,
"loss": 1.6137,
"step": 38
},
{
"epoch": 0.37320574162679426,
"grad_norm": 0.49149173498153687,
"learning_rate": 7.548891277774448e-05,
"loss": 1.5871,
"step": 39
},
{
"epoch": 0.3827751196172249,
"grad_norm": 0.5421543121337891,
"learning_rate": 7.517895004825956e-05,
"loss": 1.6206,
"step": 40
},
{
"epoch": 0.3923444976076555,
"grad_norm": 0.5325545072555542,
"learning_rate": 7.48593697450911e-05,
"loss": 1.5887,
"step": 41
},
{
"epoch": 0.4019138755980861,
"grad_norm": 0.583401620388031,
"learning_rate": 7.453025923830296e-05,
"loss": 1.8583,
"step": 42
},
{
"epoch": 0.41148325358851673,
"grad_norm": 0.618493378162384,
"learning_rate": 7.419170850342156e-05,
"loss": 1.7723,
"step": 43
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.6902540922164917,
"learning_rate": 7.384381009683742e-05,
"loss": 1.8642,
"step": 44
},
{
"epoch": 0.430622009569378,
"grad_norm": 0.6965833902359009,
"learning_rate": 7.348665913050115e-05,
"loss": 1.8881,
"step": 45
},
{
"epoch": 0.44019138755980863,
"grad_norm": 0.7248061299324036,
"learning_rate": 7.312035324592081e-05,
"loss": 1.8599,
"step": 46
},
{
"epoch": 0.44976076555023925,
"grad_norm": 0.7599021196365356,
"learning_rate": 7.274499258746771e-05,
"loss": 1.919,
"step": 47
},
{
"epoch": 0.45933014354066987,
"grad_norm": 0.8597122430801392,
"learning_rate": 7.236067977499791e-05,
"loss": 1.8464,
"step": 48
},
{
"epoch": 0.4688995215311005,
"grad_norm": 0.9372475147247314,
"learning_rate": 7.196751987579699e-05,
"loss": 1.8919,
"step": 49
},
{
"epoch": 0.4784688995215311,
"grad_norm": 1.1424167156219482,
"learning_rate": 7.156562037585576e-05,
"loss": 1.875,
"step": 50
},
{
"epoch": 0.4784688995215311,
"eval_loss": 1.7351869344711304,
"eval_runtime": 5.7379,
"eval_samples_per_second": 30.848,
"eval_steps_per_second": 7.843,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.39474174476288e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}