|
{ |
|
"best_metric": 1.8364510536193848, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.33783783783783783, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006756756756756757, |
|
"grad_norm": 1.4424006938934326, |
|
"learning_rate": 1e-05, |
|
"loss": 2.2896, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006756756756756757, |
|
"eval_loss": 2.4700465202331543, |
|
"eval_runtime": 18.613, |
|
"eval_samples_per_second": 13.431, |
|
"eval_steps_per_second": 3.385, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013513513513513514, |
|
"grad_norm": 1.7567722797393799, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3382, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02027027027027027, |
|
"grad_norm": 1.7165125608444214, |
|
"learning_rate": 3e-05, |
|
"loss": 2.3747, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"grad_norm": 1.7339659929275513, |
|
"learning_rate": 4e-05, |
|
"loss": 2.3317, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.033783783783783786, |
|
"grad_norm": 1.658001184463501, |
|
"learning_rate": 5e-05, |
|
"loss": 2.2753, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04054054054054054, |
|
"grad_norm": 1.4608352184295654, |
|
"learning_rate": 6e-05, |
|
"loss": 2.1977, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0472972972972973, |
|
"grad_norm": 1.592839002609253, |
|
"learning_rate": 7e-05, |
|
"loss": 2.0545, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"grad_norm": 1.6094963550567627, |
|
"learning_rate": 8e-05, |
|
"loss": 1.9669, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.060810810810810814, |
|
"grad_norm": 2.4839794635772705, |
|
"learning_rate": 9e-05, |
|
"loss": 2.1095, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06756756756756757, |
|
"grad_norm": 2.2013440132141113, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0207, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07432432432432433, |
|
"grad_norm": 1.868704915046692, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 2.219, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"grad_norm": 1.5163902044296265, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 1.7047, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08783783783783784, |
|
"grad_norm": 1.818183183670044, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 1.7389, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0945945945945946, |
|
"grad_norm": 1.8183751106262207, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 1.8483, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.10135135135135136, |
|
"grad_norm": 1.6848615407943726, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.9458, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 1.4533350467681885, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 1.8722, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11486486486486487, |
|
"grad_norm": 1.4766217470169067, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 1.6756, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12162162162162163, |
|
"grad_norm": 1.6617404222488403, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 1.8676, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12837837837837837, |
|
"grad_norm": 1.667425274848938, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 1.8379, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 1.5992474555969238, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 1.7354, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14189189189189189, |
|
"grad_norm": 1.6577551364898682, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 1.8265, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.14864864864864866, |
|
"grad_norm": 1.6034307479858398, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 1.783, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1554054054054054, |
|
"grad_norm": 2.068225145339966, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 1.9163, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"grad_norm": 1.7155823707580566, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 1.8537, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16891891891891891, |
|
"grad_norm": 1.703191876411438, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.839, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.17567567567567569, |
|
"grad_norm": 2.158104658126831, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 1.9672, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.18243243243243243, |
|
"grad_norm": 1.834190011024475, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 1.8346, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"grad_norm": 1.9021737575531006, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 1.7981, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.19594594594594594, |
|
"grad_norm": 1.9429136514663696, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.8159, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.20270270270270271, |
|
"grad_norm": 2.137256383895874, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.7183, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20945945945945946, |
|
"grad_norm": 2.3155884742736816, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 1.8515, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 2.3982532024383545, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 1.7351, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.22297297297297297, |
|
"grad_norm": 2.658118724822998, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 1.8166, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.22972972972972974, |
|
"grad_norm": 2.4856467247009277, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 1.9582, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.23648648648648649, |
|
"grad_norm": 3.3881924152374268, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 1.8473, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.24324324324324326, |
|
"grad_norm": 2.7870328426361084, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 1.7502, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 4.050192356109619, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 2.1093, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.25675675675675674, |
|
"grad_norm": 2.349740982055664, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 2.2211, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2635135135135135, |
|
"grad_norm": 1.6817964315414429, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 2.1682, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 1.4059157371520996, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.9945, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.27702702702702703, |
|
"grad_norm": 1.2649842500686646, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 1.927, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.28378378378378377, |
|
"grad_norm": 1.327721357345581, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 1.9329, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2905405405405405, |
|
"grad_norm": 1.3768377304077148, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 2.0718, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2972972972972973, |
|
"grad_norm": 1.3469047546386719, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 1.8375, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.30405405405405406, |
|
"grad_norm": 1.2437846660614014, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 1.6175, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3108108108108108, |
|
"grad_norm": 1.410740613937378, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 1.909, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.31756756756756754, |
|
"grad_norm": 1.4747393131256104, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 1.7801, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.32432432432432434, |
|
"grad_norm": 1.3618700504302979, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.8941, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3310810810810811, |
|
"grad_norm": 1.3720588684082031, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 1.7513, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.33783783783783783, |
|
"grad_norm": 1.4571261405944824, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.77, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.33783783783783783, |
|
"eval_loss": 1.8364510536193848, |
|
"eval_runtime": 19.046, |
|
"eval_samples_per_second": 13.126, |
|
"eval_steps_per_second": 3.308, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.54256789372928e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|