|
{ |
|
"best_metric": 0.6686553955078125, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.05651313930488839, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0011302627860977678, |
|
"grad_norm": 793.308837890625, |
|
"learning_rate": 1e-05, |
|
"loss": 11.6478, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011302627860977678, |
|
"eval_loss": 3.2303900718688965, |
|
"eval_runtime": 105.0885, |
|
"eval_samples_per_second": 14.179, |
|
"eval_steps_per_second": 3.549, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0022605255721955355, |
|
"grad_norm": 1157.48828125, |
|
"learning_rate": 2e-05, |
|
"loss": 12.4896, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003390788358293303, |
|
"grad_norm": 131.5410919189453, |
|
"learning_rate": 3e-05, |
|
"loss": 9.1943, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004521051144391071, |
|
"grad_norm": 68.70294952392578, |
|
"learning_rate": 4e-05, |
|
"loss": 5.6464, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005651313930488839, |
|
"grad_norm": 148.5250244140625, |
|
"learning_rate": 5e-05, |
|
"loss": 4.9647, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006781576716586606, |
|
"grad_norm": 39.88493728637695, |
|
"learning_rate": 6e-05, |
|
"loss": 3.6084, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007911839502684374, |
|
"grad_norm": 13.25629711151123, |
|
"learning_rate": 7e-05, |
|
"loss": 3.0777, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009042102288782142, |
|
"grad_norm": 15.91224193572998, |
|
"learning_rate": 8e-05, |
|
"loss": 2.9451, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010172365074879909, |
|
"grad_norm": 17.490968704223633, |
|
"learning_rate": 9e-05, |
|
"loss": 2.8105, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.011302627860977677, |
|
"grad_norm": 9.884227752685547, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7296, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.012432890647075446, |
|
"grad_norm": 10.834426879882812, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 2.697, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.013563153433173212, |
|
"grad_norm": 40.39820861816406, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 3.3782, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01469341621927098, |
|
"grad_norm": 10.876916885375977, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 2.8799, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.015823679005368747, |
|
"grad_norm": 6.255303382873535, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 2.7307, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.016953941791466517, |
|
"grad_norm": 5.4854416847229, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 2.6597, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018084204577564284, |
|
"grad_norm": 8.040909767150879, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 2.669, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01921446736366205, |
|
"grad_norm": 5.405959606170654, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 2.6124, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.020344730149759818, |
|
"grad_norm": 6.204880237579346, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 2.6543, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.021474992935857588, |
|
"grad_norm": 5.449324607849121, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 2.7599, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.022605255721955354, |
|
"grad_norm": 5.886956691741943, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 2.7869, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02373551850805312, |
|
"grad_norm": 4.629397392272949, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 2.757, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02486578129415089, |
|
"grad_norm": 4.251619815826416, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 2.5278, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.025996044080248658, |
|
"grad_norm": 5.332093715667725, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 2.7128, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.027126306866346424, |
|
"grad_norm": 4.628209590911865, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 2.6893, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.028256569652444195, |
|
"grad_norm": 5.050840377807617, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 2.873, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02938683243854196, |
|
"grad_norm": 15.831549644470215, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 2.764, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.030517095224639728, |
|
"grad_norm": 5.640730857849121, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 2.8243, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.031647358010737495, |
|
"grad_norm": 4.622921943664551, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 2.8895, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.032777620796835265, |
|
"grad_norm": 3.4673752784729004, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.6753, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.033907883582933035, |
|
"grad_norm": 4.400412559509277, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 2.918, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0350381463690308, |
|
"grad_norm": 30.6713809967041, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 2.8105, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03616840915512857, |
|
"grad_norm": 4.252901077270508, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 2.8125, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03729867194122634, |
|
"grad_norm": 4.446712970733643, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 2.7987, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0384289347273241, |
|
"grad_norm": 4.5988688468933105, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 2.8463, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03955919751342187, |
|
"grad_norm": 3.57090163230896, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 2.8395, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.040689460299519635, |
|
"grad_norm": 5.003127574920654, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 2.8054, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.041819723085617405, |
|
"grad_norm": 4.430306911468506, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 2.9095, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.042949985871715175, |
|
"grad_norm": 3.776151180267334, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 2.7794, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04408024865781294, |
|
"grad_norm": 4.0163373947143555, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 2.8656, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04521051144391071, |
|
"grad_norm": 3.8399384021759033, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 2.8012, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04634077423000848, |
|
"grad_norm": 3.859337568283081, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 2.9344, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04747103701610624, |
|
"grad_norm": 3.8300116062164307, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 2.8931, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04860129980220401, |
|
"grad_norm": 3.786048412322998, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 2.8088, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04973156258830178, |
|
"grad_norm": 3.4179418087005615, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 2.8516, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.050861825374399545, |
|
"grad_norm": 3.5727052688598633, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 2.7993, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.051992088160497316, |
|
"grad_norm": 3.7449238300323486, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 2.1631, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.053122350946595086, |
|
"grad_norm": 3.2603912353515625, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 1.8704, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05425261373269285, |
|
"grad_norm": 2.6630172729492188, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.6844, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05538287651879062, |
|
"grad_norm": 3.6026859283447266, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 2.0858, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05651313930488839, |
|
"grad_norm": 3.2763783931732178, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.8216, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05651313930488839, |
|
"eval_loss": 0.6686553955078125, |
|
"eval_runtime": 106.943, |
|
"eval_samples_per_second": 13.933, |
|
"eval_steps_per_second": 3.488, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.15808242139136e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|