|
{ |
|
"best_metric": 1.356024980545044, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-500", |
|
"epoch": 0.18392495861688432, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003678499172337686, |
|
"eval_loss": 4.303531169891357, |
|
"eval_runtime": 107.5379, |
|
"eval_samples_per_second": 10.647, |
|
"eval_steps_per_second": 2.669, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036784991723376862, |
|
"grad_norm": 1.8898500204086304, |
|
"learning_rate": 4.36e-05, |
|
"loss": 3.7144, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0073569983446753725, |
|
"grad_norm": 1.033040165901184, |
|
"learning_rate": 8.72e-05, |
|
"loss": 2.381, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011035497517013059, |
|
"grad_norm": 0.8872012495994568, |
|
"learning_rate": 0.0001308, |
|
"loss": 1.7757, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014713996689350745, |
|
"grad_norm": 0.9310850501060486, |
|
"learning_rate": 0.0001744, |
|
"loss": 1.6203, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01839249586168843, |
|
"grad_norm": 0.9658544063568115, |
|
"learning_rate": 0.000218, |
|
"loss": 1.4421, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01839249586168843, |
|
"eval_loss": 1.661220669746399, |
|
"eval_runtime": 107.1147, |
|
"eval_samples_per_second": 10.689, |
|
"eval_steps_per_second": 2.679, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.022070995034026118, |
|
"grad_norm": 0.6637513637542725, |
|
"learning_rate": 0.00021773448147832086, |
|
"loss": 1.695, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.025749494206363802, |
|
"grad_norm": 0.7009419202804565, |
|
"learning_rate": 0.0002169392194928312, |
|
"loss": 1.5961, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02942799337870149, |
|
"grad_norm": 0.7631779313087463, |
|
"learning_rate": 0.00021561808847998484, |
|
"loss": 1.5457, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.033106492551039174, |
|
"grad_norm": 0.7046871185302734, |
|
"learning_rate": 0.00021377752485727676, |
|
"loss": 1.4687, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03678499172337686, |
|
"grad_norm": 0.961662769317627, |
|
"learning_rate": 0.00021142649566566402, |
|
"loss": 1.4402, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03678499172337686, |
|
"eval_loss": 1.5570803880691528, |
|
"eval_runtime": 107.2267, |
|
"eval_samples_per_second": 10.678, |
|
"eval_steps_per_second": 2.677, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04046349089571455, |
|
"grad_norm": 0.6058881878852844, |
|
"learning_rate": 0.0002085764548830435, |
|
"loss": 1.6218, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.044141990068052236, |
|
"grad_norm": 0.73408043384552, |
|
"learning_rate": 0.00020524128762162305, |
|
"loss": 1.4987, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.047820489240389924, |
|
"grad_norm": 0.6772788763046265, |
|
"learning_rate": 0.00020143724248105043, |
|
"loss": 1.5004, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.051498988412727605, |
|
"grad_norm": 0.6476417183876038, |
|
"learning_rate": 0.0001971828523868693, |
|
"loss": 1.4431, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05517748758506529, |
|
"grad_norm": 0.8651573657989502, |
|
"learning_rate": 0.0001924988442999686, |
|
"loss": 1.3421, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05517748758506529, |
|
"eval_loss": 1.5231226682662964, |
|
"eval_runtime": 107.1783, |
|
"eval_samples_per_second": 10.683, |
|
"eval_steps_per_second": 2.678, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05885598675740298, |
|
"grad_norm": 0.6997436285018921, |
|
"learning_rate": 0.00018740803823691298, |
|
"loss": 1.5923, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06253448592974066, |
|
"grad_norm": 0.657393217086792, |
|
"learning_rate": 0.00018193523609311556, |
|
"loss": 1.5695, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06621298510207835, |
|
"grad_norm": 0.6620543003082275, |
|
"learning_rate": 0.00017610710081049675, |
|
"loss": 1.4328, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06989148427441604, |
|
"grad_norm": 0.6951943635940552, |
|
"learning_rate": 0.00016995202647831142, |
|
"loss": 1.3771, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07356998344675372, |
|
"grad_norm": 0.8763971924781799, |
|
"learning_rate": 0.00016350000000000002, |
|
"loss": 1.3408, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07356998344675372, |
|
"eval_loss": 1.497921347618103, |
|
"eval_runtime": 107.1984, |
|
"eval_samples_per_second": 10.681, |
|
"eval_steps_per_second": 2.677, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07724848261909141, |
|
"grad_norm": 0.6477477550506592, |
|
"learning_rate": 0.00015678245500000943, |
|
"loss": 1.533, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.0809269817914291, |
|
"grad_norm": 0.6087270975112915, |
|
"learning_rate": 0.00014983211868233444, |
|
"loss": 1.4701, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08460548096376679, |
|
"grad_norm": 0.6840710043907166, |
|
"learning_rate": 0.00014268285238686927, |
|
"loss": 1.4357, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08828398013610447, |
|
"grad_norm": 0.6622298359870911, |
|
"learning_rate": 0.00013536948662036378, |
|
"loss": 1.3774, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.09196247930844216, |
|
"grad_norm": 0.7629197835922241, |
|
"learning_rate": 0.00012792765136569544, |
|
"loss": 1.2905, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09196247930844216, |
|
"eval_loss": 1.455014944076538, |
|
"eval_runtime": 107.2008, |
|
"eval_samples_per_second": 10.681, |
|
"eval_steps_per_second": 2.677, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09564097848077985, |
|
"grad_norm": 0.595523476600647, |
|
"learning_rate": 0.00012039360249617425, |
|
"loss": 1.582, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09931947765311752, |
|
"grad_norm": 0.5969820618629456, |
|
"learning_rate": 0.00011280404514057264, |
|
"loss": 1.4637, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.10299797682545521, |
|
"grad_norm": 0.6512609124183655, |
|
"learning_rate": 0.00010519595485942743, |
|
"loss": 1.4196, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.1066764759977929, |
|
"grad_norm": 0.6744258403778076, |
|
"learning_rate": 9.76063975038258e-05, |
|
"loss": 1.3897, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.11035497517013058, |
|
"grad_norm": 0.8375758528709412, |
|
"learning_rate": 9.00723486343046e-05, |
|
"loss": 1.2334, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11035497517013058, |
|
"eval_loss": 1.4276777505874634, |
|
"eval_runtime": 107.3683, |
|
"eval_samples_per_second": 10.664, |
|
"eval_steps_per_second": 2.673, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.11403347434246827, |
|
"grad_norm": 0.576554000377655, |
|
"learning_rate": 8.263051337963623e-05, |
|
"loss": 1.5197, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.11771197351480596, |
|
"grad_norm": 0.5713962912559509, |
|
"learning_rate": 7.531714761313074e-05, |
|
"loss": 1.5168, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.12139047268714365, |
|
"grad_norm": 0.5897490382194519, |
|
"learning_rate": 6.816788131766559e-05, |
|
"loss": 1.4228, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.12506897185948132, |
|
"grad_norm": 0.5949689745903015, |
|
"learning_rate": 6.121754499999055e-05, |
|
"loss": 1.2882, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.12874747103181902, |
|
"grad_norm": 0.8545028567314148, |
|
"learning_rate": 5.450000000000003e-05, |
|
"loss": 1.2774, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.12874747103181902, |
|
"eval_loss": 1.39708411693573, |
|
"eval_runtime": 107.4334, |
|
"eval_samples_per_second": 10.658, |
|
"eval_steps_per_second": 2.671, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1324259702041567, |
|
"grad_norm": 0.5843435525894165, |
|
"learning_rate": 4.804797352168861e-05, |
|
"loss": 1.4973, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.1361044693764944, |
|
"grad_norm": 0.6044478416442871, |
|
"learning_rate": 4.189289918950325e-05, |
|
"loss": 1.4441, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.13978296854883207, |
|
"grad_norm": 0.597599446773529, |
|
"learning_rate": 3.606476390688449e-05, |
|
"loss": 1.3198, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.14346146772116977, |
|
"grad_norm": 0.6726983785629272, |
|
"learning_rate": 3.0591961763087043e-05, |
|
"loss": 1.3688, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.14713996689350745, |
|
"grad_norm": 0.7687901258468628, |
|
"learning_rate": 2.550115570003141e-05, |
|
"loss": 1.2919, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.14713996689350745, |
|
"eval_loss": 1.367588758468628, |
|
"eval_runtime": 107.2738, |
|
"eval_samples_per_second": 10.674, |
|
"eval_steps_per_second": 2.675, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.15081846606584515, |
|
"grad_norm": 0.5578694343566895, |
|
"learning_rate": 2.081714761313074e-05, |
|
"loss": 1.458, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.15449696523818282, |
|
"grad_norm": 0.5921757817268372, |
|
"learning_rate": 1.656275751894957e-05, |
|
"loss": 1.409, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.1581754644105205, |
|
"grad_norm": 0.6388791799545288, |
|
"learning_rate": 1.275871237837696e-05, |
|
"loss": 1.3534, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.1618539635828582, |
|
"grad_norm": 0.6216919422149658, |
|
"learning_rate": 9.423545116956494e-06, |
|
"loss": 1.3128, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.16553246275519587, |
|
"grad_norm": 0.7831223607063293, |
|
"learning_rate": 6.573504334335994e-06, |
|
"loss": 1.2688, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.16553246275519587, |
|
"eval_loss": 1.3581713438034058, |
|
"eval_runtime": 107.1785, |
|
"eval_samples_per_second": 10.683, |
|
"eval_steps_per_second": 2.678, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.16921096192753357, |
|
"grad_norm": 0.6016820073127747, |
|
"learning_rate": 4.22247514272324e-06, |
|
"loss": 1.4669, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.17288946109987124, |
|
"grad_norm": 0.5830768942832947, |
|
"learning_rate": 2.38191152001518e-06, |
|
"loss": 1.3645, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.17656796027220895, |
|
"grad_norm": 0.591723620891571, |
|
"learning_rate": 1.0607805071688306e-06, |
|
"loss": 1.344, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.18024645944454662, |
|
"grad_norm": 0.6654506325721741, |
|
"learning_rate": 2.655185216791625e-07, |
|
"loss": 1.2938, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.18392495861688432, |
|
"grad_norm": 0.8405947685241699, |
|
"learning_rate": 0.0, |
|
"loss": 1.2712, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18392495861688432, |
|
"eval_loss": 1.356024980545044, |
|
"eval_runtime": 107.3265, |
|
"eval_samples_per_second": 10.668, |
|
"eval_steps_per_second": 2.674, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6758482141184e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|