|
{ |
|
"best_metric": 0.6329964995384216, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.35650623885918004, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0071301247771836, |
|
"grad_norm": 4.860265731811523, |
|
"learning_rate": 1e-05, |
|
"loss": 2.2642, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0071301247771836, |
|
"eval_loss": 3.368955373764038, |
|
"eval_runtime": 17.5564, |
|
"eval_samples_per_second": 13.442, |
|
"eval_steps_per_second": 3.361, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0142602495543672, |
|
"grad_norm": 4.241779327392578, |
|
"learning_rate": 2e-05, |
|
"loss": 2.4353, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0213903743315508, |
|
"grad_norm": 5.996744632720947, |
|
"learning_rate": 3e-05, |
|
"loss": 2.4873, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0285204991087344, |
|
"grad_norm": 4.913257122039795, |
|
"learning_rate": 4e-05, |
|
"loss": 2.3505, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.035650623885918005, |
|
"grad_norm": 3.3362646102905273, |
|
"learning_rate": 5e-05, |
|
"loss": 2.137, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0427807486631016, |
|
"grad_norm": 3.666640281677246, |
|
"learning_rate": 6e-05, |
|
"loss": 1.7876, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.049910873440285206, |
|
"grad_norm": 6.828310012817383, |
|
"learning_rate": 7e-05, |
|
"loss": 1.755, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0570409982174688, |
|
"grad_norm": 7.097423553466797, |
|
"learning_rate": 8e-05, |
|
"loss": 1.853, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06417112299465241, |
|
"grad_norm": 5.807572364807129, |
|
"learning_rate": 9e-05, |
|
"loss": 1.7299, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07130124777183601, |
|
"grad_norm": 4.308953285217285, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1758, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0784313725490196, |
|
"grad_norm": 8.310755729675293, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 1.5666, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0855614973262032, |
|
"grad_norm": 4.563036918640137, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 1.3147, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09269162210338681, |
|
"grad_norm": 5.149623394012451, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 1.0886, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09982174688057041, |
|
"grad_norm": 5.882086753845215, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 1.386, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.10695187165775401, |
|
"grad_norm": 12.239624977111816, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.2012, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1140819964349376, |
|
"grad_norm": 7.540602207183838, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 1.2995, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": 5.4989423751831055, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 1.1511, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12834224598930483, |
|
"grad_norm": 6.216000556945801, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 0.8327, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1354723707664884, |
|
"grad_norm": 4.877588748931885, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 0.6784, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14260249554367202, |
|
"grad_norm": 10.093413352966309, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 2.0459, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1497326203208556, |
|
"grad_norm": 7.529651165008545, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 1.4024, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1568627450980392, |
|
"grad_norm": 15.220786094665527, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 1.3263, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.16399286987522282, |
|
"grad_norm": 9.120606422424316, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 1.2663, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1711229946524064, |
|
"grad_norm": 7.058254718780518, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 1.3212, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.17825311942959002, |
|
"grad_norm": 9.098106384277344, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.6655, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18538324420677363, |
|
"grad_norm": 6.505765914916992, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 1.1041, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.1925133689839572, |
|
"grad_norm": 3.545391321182251, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 0.7916, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19964349376114082, |
|
"grad_norm": 4.667784214019775, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 0.8798, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.20677361853832443, |
|
"grad_norm": 15.075148582458496, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.6309, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.21390374331550802, |
|
"grad_norm": 16.655683517456055, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.8559, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.22103386809269163, |
|
"grad_norm": 7.557066440582275, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 1.3669, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2281639928698752, |
|
"grad_norm": 16.793487548828125, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 2.2009, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 7.860363483428955, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 1.2864, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 6.455472946166992, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 1.0194, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24955436720142601, |
|
"grad_norm": 5.507026672363281, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 0.7551, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.25668449197860965, |
|
"grad_norm": 17.20283317565918, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 2.9312, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2638146167557932, |
|
"grad_norm": 14.8767728805542, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 2.2403, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2709447415329768, |
|
"grad_norm": 9.64128303527832, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 1.6086, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.27807486631016043, |
|
"grad_norm": 4.818795680999756, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 0.8585, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.28520499108734404, |
|
"grad_norm": 3.329906940460205, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.6471, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.29233511586452765, |
|
"grad_norm": 3.4146223068237305, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 0.5584, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2994652406417112, |
|
"grad_norm": 2.892646551132202, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 0.541, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3065953654188948, |
|
"grad_norm": 6.643548488616943, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 0.7923, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3137254901960784, |
|
"grad_norm": 3.204444646835327, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 0.5552, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.32085561497326204, |
|
"grad_norm": 2.1902480125427246, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 0.373, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.32798573975044565, |
|
"grad_norm": 3.2742395401000977, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 0.5669, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.33511586452762926, |
|
"grad_norm": 2.8352434635162354, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 0.5118, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3422459893048128, |
|
"grad_norm": 3.1886863708496094, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.4386, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3493761140819964, |
|
"grad_norm": 2.8913755416870117, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 0.2077, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.35650623885918004, |
|
"grad_norm": 4.413907527923584, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.2928, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.35650623885918004, |
|
"eval_loss": 0.6329964995384216, |
|
"eval_runtime": 17.9761, |
|
"eval_samples_per_second": 13.129, |
|
"eval_steps_per_second": 3.282, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.54256789372928e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|