|
{ |
|
"best_metric": 2.271299362182617, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.01365280906546522, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002730561813093044, |
|
"grad_norm": 43.446266174316406, |
|
"learning_rate": 1e-05, |
|
"loss": 10.0397, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002730561813093044, |
|
"eval_loss": 4.238278865814209, |
|
"eval_runtime": 693.1619, |
|
"eval_samples_per_second": 8.898, |
|
"eval_steps_per_second": 2.225, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005461123626186088, |
|
"grad_norm": 44.35715866088867, |
|
"learning_rate": 2e-05, |
|
"loss": 11.7295, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008191685439279131, |
|
"grad_norm": 49.75259017944336, |
|
"learning_rate": 3e-05, |
|
"loss": 11.851, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0010922247252372176, |
|
"grad_norm": 46.56555938720703, |
|
"learning_rate": 4e-05, |
|
"loss": 11.3896, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0013652809065465218, |
|
"grad_norm": 40.56822967529297, |
|
"learning_rate": 5e-05, |
|
"loss": 10.777, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016383370878558263, |
|
"grad_norm": 29.243772506713867, |
|
"learning_rate": 6e-05, |
|
"loss": 8.8778, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0019113932691651307, |
|
"grad_norm": 27.35118293762207, |
|
"learning_rate": 7e-05, |
|
"loss": 8.5317, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0021844494504744353, |
|
"grad_norm": 28.01230239868164, |
|
"learning_rate": 8e-05, |
|
"loss": 8.6976, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0024575056317837395, |
|
"grad_norm": 24.44605255126953, |
|
"learning_rate": 9e-05, |
|
"loss": 9.1925, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0027305618130930437, |
|
"grad_norm": 25.969579696655273, |
|
"learning_rate": 0.0001, |
|
"loss": 8.022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0030036179944023483, |
|
"grad_norm": 27.08025550842285, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 9.1533, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0032766741757116525, |
|
"grad_norm": 18.790332794189453, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 7.5891, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003549730357020957, |
|
"grad_norm": 23.858049392700195, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 9.0651, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0038227865383302613, |
|
"grad_norm": 20.80962562561035, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 7.7469, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0040958427196395655, |
|
"grad_norm": 21.938926696777344, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 8.5726, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004368898900948871, |
|
"grad_norm": 19.670955657958984, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 7.9194, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004641955082258175, |
|
"grad_norm": 21.373003005981445, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 8.5548, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.004915011263567479, |
|
"grad_norm": 21.677001953125, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 7.7842, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.005188067444876783, |
|
"grad_norm": 21.132835388183594, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 9.1783, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005461123626186087, |
|
"grad_norm": 19.698659896850586, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 7.6789, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0057341798074953924, |
|
"grad_norm": 26.263071060180664, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 8.8006, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.006007235988804697, |
|
"grad_norm": 19.799365997314453, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 8.5378, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.006280292170114001, |
|
"grad_norm": 25.806068420410156, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 8.5017, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006553348351423305, |
|
"grad_norm": 24.34381866455078, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 8.9881, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00682640453273261, |
|
"grad_norm": 18.999231338500977, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 7.9945, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007099460714041914, |
|
"grad_norm": 20.11553382873535, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 8.4672, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0073725168953512185, |
|
"grad_norm": 21.312238693237305, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 8.6953, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.007645573076660523, |
|
"grad_norm": 31.490800857543945, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 9.7065, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.007918629257969828, |
|
"grad_norm": 20.707792282104492, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 8.5876, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008191685439279131, |
|
"grad_norm": 19.695018768310547, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 8.0395, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008464741620588436, |
|
"grad_norm": 21.563302993774414, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 7.869, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.008737797801897741, |
|
"grad_norm": 25.480226516723633, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 8.0559, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009010853983207045, |
|
"grad_norm": 23.27593231201172, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 8.9705, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00928391016451635, |
|
"grad_norm": 22.600749969482422, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 8.3754, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.009556966345825653, |
|
"grad_norm": 24.033321380615234, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 9.4395, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.009830022527134958, |
|
"grad_norm": 25.064741134643555, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 8.8034, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010103078708444263, |
|
"grad_norm": 28.80820083618164, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 8.2912, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.010376134889753566, |
|
"grad_norm": 29.365997314453125, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 7.6412, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.010649191071062871, |
|
"grad_norm": 22.005088806152344, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 8.3727, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.010922247252372175, |
|
"grad_norm": 22.131328582763672, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 7.4845, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01119530343368148, |
|
"grad_norm": 26.777101516723633, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 8.6799, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.011468359614990785, |
|
"grad_norm": 27.420764923095703, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 8.6992, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.011741415796300088, |
|
"grad_norm": 27.803956985473633, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 9.0848, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.012014471977609393, |
|
"grad_norm": 32.42679214477539, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 10.5448, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012287528158918698, |
|
"grad_norm": 28.605180740356445, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 8.5635, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.012560584340228002, |
|
"grad_norm": 28.457530975341797, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 9.0221, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.012833640521537307, |
|
"grad_norm": 24.8094425201416, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 7.0862, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01310669670284661, |
|
"grad_norm": 30.082542419433594, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 8.431, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013379752884155915, |
|
"grad_norm": 26.035165786743164, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 8.7725, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01365280906546522, |
|
"grad_norm": 48.76018142700195, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 10.7992, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01365280906546522, |
|
"eval_loss": 2.271299362182617, |
|
"eval_runtime": 698.4085, |
|
"eval_samples_per_second": 8.832, |
|
"eval_steps_per_second": 2.208, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.15499004493824e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|