|
{ |
|
"best_metric": 0.5707394480705261, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.05370569280343716, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010741138560687433, |
|
"grad_norm": 1.300419807434082, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6542, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010741138560687433, |
|
"eval_loss": 2.067019462585449, |
|
"eval_runtime": 253.6367, |
|
"eval_samples_per_second": 1.546, |
|
"eval_steps_per_second": 0.773, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0021482277121374865, |
|
"grad_norm": 1.5085821151733398, |
|
"learning_rate": 0.0002, |
|
"loss": 3.5856, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00322234156820623, |
|
"grad_norm": 4.469718933105469, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 4.6187, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004296455424274973, |
|
"grad_norm": 3.348555088043213, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 4.2747, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0053705692803437165, |
|
"grad_norm": 5.730258941650391, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 3.7536, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00644468313641246, |
|
"grad_norm": 5.00005578994751, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 3.574, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007518796992481203, |
|
"grad_norm": 5.454052925109863, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 3.3832, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.008592910848549946, |
|
"grad_norm": 3.948171854019165, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 2.9389, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00966702470461869, |
|
"grad_norm": 3.8745367527008057, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 2.6021, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.010741138560687433, |
|
"grad_norm": 6.631568908691406, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.42, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011815252416756176, |
|
"grad_norm": 3.7132058143615723, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 2.8465, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01288936627282492, |
|
"grad_norm": 3.5346767902374268, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 2.2022, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013963480128893663, |
|
"grad_norm": 3.1227457523345947, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 2.2492, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.015037593984962405, |
|
"grad_norm": 2.1377246379852295, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 2.1818, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01611170784103115, |
|
"grad_norm": 1.9351719617843628, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.5732, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.017185821697099892, |
|
"grad_norm": 2.7266533374786377, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.8584, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.018259935553168637, |
|
"grad_norm": 2.3338723182678223, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 1.66, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01933404940923738, |
|
"grad_norm": 3.5971460342407227, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.2369, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02040816326530612, |
|
"grad_norm": 1.9934290647506714, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 1.9199, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.021482277121374866, |
|
"grad_norm": 2.5816171169281006, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.8563, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.022556390977443608, |
|
"grad_norm": 2.795732259750366, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 2.0038, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.023630504833512353, |
|
"grad_norm": 2.574127674102783, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 2.1637, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.024704618689581095, |
|
"grad_norm": 3.4169461727142334, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 2.9757, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02577873254564984, |
|
"grad_norm": 2.764451742172241, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 1.8735, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02685284640171858, |
|
"grad_norm": 3.691221237182617, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 2.1683, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02685284640171858, |
|
"eval_loss": 0.6165056824684143, |
|
"eval_runtime": 254.8895, |
|
"eval_samples_per_second": 1.538, |
|
"eval_steps_per_second": 0.769, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.027926960257787327, |
|
"grad_norm": 3.5959391593933105, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7773, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02900107411385607, |
|
"grad_norm": 3.213015079498291, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 2.4237, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03007518796992481, |
|
"grad_norm": 3.6218926906585693, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 2.8496, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.031149301825993556, |
|
"grad_norm": 3.0756144523620605, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 1.9848, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0322234156820623, |
|
"grad_norm": 3.864434003829956, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 3.1022, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03329752953813104, |
|
"grad_norm": 3.296337842941284, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 2.2655, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.034371643394199784, |
|
"grad_norm": 3.269047498703003, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 2.284, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03544575725026853, |
|
"grad_norm": 3.5344269275665283, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 1.8864, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.036519871106337275, |
|
"grad_norm": 4.668367385864258, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 2.3255, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03759398496240601, |
|
"grad_norm": 4.970749855041504, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 2.8352, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03866809881847476, |
|
"grad_norm": 3.3336353302001953, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 2.0678, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0397422126745435, |
|
"grad_norm": 3.146103620529175, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 1.6564, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04081632653061224, |
|
"grad_norm": 4.855753421783447, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 2.3859, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04189044038668099, |
|
"grad_norm": 4.550353050231934, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 2.1639, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04296455424274973, |
|
"grad_norm": 7.876417636871338, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 2.2466, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04403866809881848, |
|
"grad_norm": 4.67519474029541, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 1.9059, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.045112781954887216, |
|
"grad_norm": 5.316860675811768, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 2.3931, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04618689581095596, |
|
"grad_norm": 4.196220874786377, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 2.2159, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.047261009667024706, |
|
"grad_norm": 4.706377029418945, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 2.0003, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04833512352309345, |
|
"grad_norm": 5.4141154289245605, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 2.9123, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04940923737916219, |
|
"grad_norm": 6.08075475692749, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 2.6994, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.050483351235230935, |
|
"grad_norm": 4.99581241607666, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 3.621, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05155746509129968, |
|
"grad_norm": 7.598954677581787, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 2.4808, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 6.14135217666626, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 2.6089, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05370569280343716, |
|
"grad_norm": 9.300232887268066, |
|
"learning_rate": 0.0, |
|
"loss": 1.8088, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05370569280343716, |
|
"eval_loss": 0.5707394480705261, |
|
"eval_runtime": 255.0357, |
|
"eval_samples_per_second": 1.537, |
|
"eval_steps_per_second": 0.769, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.53464644354048e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|