|
{ |
|
"best_metric": 1.83683443069458, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.33783783783783783, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006756756756756757, |
|
"grad_norm": 1.4881850481033325, |
|
"learning_rate": 1e-05, |
|
"loss": 2.2896, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006756756756756757, |
|
"eval_loss": 2.4700465202331543, |
|
"eval_runtime": 18.7535, |
|
"eval_samples_per_second": 13.331, |
|
"eval_steps_per_second": 3.359, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013513513513513514, |
|
"grad_norm": 1.8108983039855957, |
|
"learning_rate": 2e-05, |
|
"loss": 2.3382, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02027027027027027, |
|
"grad_norm": 1.7592962980270386, |
|
"learning_rate": 3e-05, |
|
"loss": 2.3745, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02702702702702703, |
|
"grad_norm": 1.7991939783096313, |
|
"learning_rate": 4e-05, |
|
"loss": 2.3313, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.033783783783783786, |
|
"grad_norm": 1.6926255226135254, |
|
"learning_rate": 5e-05, |
|
"loss": 2.2734, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04054054054054054, |
|
"grad_norm": 1.478215217590332, |
|
"learning_rate": 6e-05, |
|
"loss": 2.1964, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0472972972972973, |
|
"grad_norm": 1.6067980527877808, |
|
"learning_rate": 7e-05, |
|
"loss": 2.055, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05405405405405406, |
|
"grad_norm": 1.6643190383911133, |
|
"learning_rate": 8e-05, |
|
"loss": 1.9669, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.060810810810810814, |
|
"grad_norm": 2.5624747276306152, |
|
"learning_rate": 9e-05, |
|
"loss": 2.108, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06756756756756757, |
|
"grad_norm": 2.2052178382873535, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0193, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07432432432432433, |
|
"grad_norm": 1.8947843313217163, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 2.2201, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08108108108108109, |
|
"grad_norm": 1.5539779663085938, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 1.7039, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08783783783783784, |
|
"grad_norm": 1.84231698513031, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 1.7398, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0945945945945946, |
|
"grad_norm": 1.828019380569458, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 1.8482, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.10135135135135136, |
|
"grad_norm": 1.671506643295288, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.9443, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 1.468369722366333, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 1.8729, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11486486486486487, |
|
"grad_norm": 1.5108083486557007, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 1.6741, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12162162162162163, |
|
"grad_norm": 1.6598519086837769, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 1.8642, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12837837837837837, |
|
"grad_norm": 1.6918882131576538, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 1.8382, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.13513513513513514, |
|
"grad_norm": 1.6050759553909302, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 1.7366, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14189189189189189, |
|
"grad_norm": 1.6648799180984497, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 1.8261, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.14864864864864866, |
|
"grad_norm": 1.6134958267211914, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 1.7829, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1554054054054054, |
|
"grad_norm": 2.065173625946045, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 1.9168, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.16216216216216217, |
|
"grad_norm": 1.8863788843154907, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 1.853, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16891891891891891, |
|
"grad_norm": 1.7413650751113892, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.8379, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.17567567567567569, |
|
"grad_norm": 2.1949267387390137, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 1.9647, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.18243243243243243, |
|
"grad_norm": 1.8597412109375, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 1.8375, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1891891891891892, |
|
"grad_norm": 1.9108561277389526, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 1.7963, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.19594594594594594, |
|
"grad_norm": 1.9742248058319092, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.8186, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.20270270270270271, |
|
"grad_norm": 2.1315524578094482, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.7129, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20945945945945946, |
|
"grad_norm": 2.3003602027893066, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 1.8527, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 2.4031295776367188, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 1.739, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.22297297297297297, |
|
"grad_norm": 2.5433454513549805, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 1.8151, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.22972972972972974, |
|
"grad_norm": 2.519287347793579, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 1.962, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.23648648648648649, |
|
"grad_norm": 3.300830125808716, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 1.8442, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.24324324324324326, |
|
"grad_norm": 2.7878737449645996, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 1.7497, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.9603471755981445, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 2.1078, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.25675675675675674, |
|
"grad_norm": 2.3182380199432373, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 2.2206, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2635135135135135, |
|
"grad_norm": 1.6530444622039795, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 2.1639, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 1.4002788066864014, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.9908, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.27702702702702703, |
|
"grad_norm": 1.275628685951233, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 1.9277, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.28378378378378377, |
|
"grad_norm": 1.3413888216018677, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 1.938, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2905405405405405, |
|
"grad_norm": 1.3855804204940796, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 2.0718, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2972972972972973, |
|
"grad_norm": 1.3543145656585693, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 1.8385, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.30405405405405406, |
|
"grad_norm": 1.2580598592758179, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 1.6167, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3108108108108108, |
|
"grad_norm": 1.4251524209976196, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 1.9111, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.31756756756756754, |
|
"grad_norm": 1.4891302585601807, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 1.7776, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.32432432432432434, |
|
"grad_norm": 1.366258978843689, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.8972, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3310810810810811, |
|
"grad_norm": 1.3790669441223145, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 1.752, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.33783783783783783, |
|
"grad_norm": 1.4619343280792236, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.7721, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.33783783783783783, |
|
"eval_loss": 1.83683443069458, |
|
"eval_runtime": 19.0928, |
|
"eval_samples_per_second": 13.094, |
|
"eval_steps_per_second": 3.3, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.54256789372928e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|