|
{ |
|
"best_metric": 0.2368658483028412, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.11056595950521733, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0022113191901043465, |
|
"grad_norm": 1266.8173828125, |
|
"learning_rate": 5e-05, |
|
"loss": 130.6273, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0022113191901043465, |
|
"eval_loss": 3.3367362022399902, |
|
"eval_runtime": 3.0918, |
|
"eval_samples_per_second": 16.172, |
|
"eval_steps_per_second": 4.205, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004422638380208693, |
|
"grad_norm": 1228.9942626953125, |
|
"learning_rate": 0.0001, |
|
"loss": 135.8005, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00663395757031304, |
|
"grad_norm": 893.6980590820312, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 81.7229, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.008845276760417386, |
|
"grad_norm": 616.2275390625, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 50.632, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.011056595950521733, |
|
"grad_norm": 1459.5819091796875, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 75.3263, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01326791514062608, |
|
"grad_norm": 315.1625671386719, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 21.8838, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.015479234330730427, |
|
"grad_norm": 1401.111083984375, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 39.3447, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.017690553520834772, |
|
"grad_norm": 298.81280517578125, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 19.4309, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01990187271093912, |
|
"grad_norm": 422.7381591796875, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 22.1325, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.022113191901043466, |
|
"grad_norm": 443.7731628417969, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 19.4608, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02432451109114781, |
|
"grad_norm": 230.34886169433594, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 14.7356, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02653583028125216, |
|
"grad_norm": 370.49786376953125, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 11.2922, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.028747149471356506, |
|
"grad_norm": 249.0677947998047, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 13.2127, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.030958468661460854, |
|
"grad_norm": 180.40122985839844, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 11.3355, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0331697878515652, |
|
"grad_norm": 233.73875427246094, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 12.585, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.035381107041669545, |
|
"grad_norm": 191.15699768066406, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 13.1807, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03759242623177389, |
|
"grad_norm": 191.31100463867188, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 14.3639, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03980374542187824, |
|
"grad_norm": 175.0061798095703, |
|
"learning_rate": 7.75e-05, |
|
"loss": 11.2044, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04201506461198259, |
|
"grad_norm": 147.7163848876953, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 12.4057, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04422638380208693, |
|
"grad_norm": 163.385009765625, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 9.6362, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04643770299219128, |
|
"grad_norm": 140.2444610595703, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 7.4195, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04864902218229562, |
|
"grad_norm": 136.61859130859375, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 10.4054, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.050860341372399975, |
|
"grad_norm": 157.8791961669922, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 10.6133, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05307166056250432, |
|
"grad_norm": 169.0653839111328, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 9.754, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.055282979752608666, |
|
"grad_norm": 191.57464599609375, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 11.1938, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.055282979752608666, |
|
"eval_loss": 0.30715107917785645, |
|
"eval_runtime": 3.0653, |
|
"eval_samples_per_second": 16.312, |
|
"eval_steps_per_second": 4.241, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05749429894271301, |
|
"grad_norm": 112.55488586425781, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 9.1382, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.059705618132817356, |
|
"grad_norm": 166.33534240722656, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 9.9689, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06191693732292171, |
|
"grad_norm": 146.36526489257812, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 10.5303, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06412825651302605, |
|
"grad_norm": 121.34736633300781, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 9.7238, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0663395757031304, |
|
"grad_norm": 87.24940490722656, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 6.6125, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06855089489323475, |
|
"grad_norm": 96.96258544921875, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 8.0596, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07076221408333909, |
|
"grad_norm": 122.00680541992188, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 8.7681, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07297353327344344, |
|
"grad_norm": 114.9303207397461, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 10.3328, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07518485246354778, |
|
"grad_norm": 112.72858428955078, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 8.9349, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07739617165365213, |
|
"grad_norm": 100.95106506347656, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 8.6053, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07960749084375648, |
|
"grad_norm": 95.06898498535156, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 7.9513, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08181881003386082, |
|
"grad_norm": 101.6524658203125, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 8.3089, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08403012922396517, |
|
"grad_norm": 89.02762603759766, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 6.7646, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.08624144841406951, |
|
"grad_norm": 184.59141540527344, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 12.0435, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.08845276760417387, |
|
"grad_norm": 123.04720306396484, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 8.9847, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09066408679427822, |
|
"grad_norm": 127.1465072631836, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 9.742, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.09287540598438256, |
|
"grad_norm": 91.50184631347656, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 8.8613, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.09508672517448691, |
|
"grad_norm": 121.56517791748047, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 11.1114, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.09729804436459125, |
|
"grad_norm": 167.5218048095703, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 8.5462, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0995093635546956, |
|
"grad_norm": 109.18016815185547, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 6.4312, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10172068274479995, |
|
"grad_norm": 119.26715087890625, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 6.6959, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.10393200193490429, |
|
"grad_norm": 88.47723388671875, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 7.4441, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.10614332112500864, |
|
"grad_norm": 90.03621673583984, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 6.4245, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.10835464031511298, |
|
"grad_norm": 180.63720703125, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 9.3648, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.11056595950521733, |
|
"grad_norm": 119.29557800292969, |
|
"learning_rate": 1e-05, |
|
"loss": 9.1602, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11056595950521733, |
|
"eval_loss": 0.2368658483028412, |
|
"eval_runtime": 3.0471, |
|
"eval_samples_per_second": 16.409, |
|
"eval_steps_per_second": 4.266, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658027780734976e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|