{ "best_metric": 1.3565335273742676, "best_model_checkpoint": "miner_id_24/checkpoint-500", "epoch": 0.18392495861688432, "eval_steps": 50, "global_step": 500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0003678499172337686, "eval_loss": 4.303531169891357, "eval_runtime": 107.0593, "eval_samples_per_second": 10.695, "eval_steps_per_second": 2.681, "step": 1 }, { "epoch": 0.0036784991723376862, "grad_norm": 1.8631224632263184, "learning_rate": 4.36e-05, "loss": 3.7124, "step": 10 }, { "epoch": 0.0073569983446753725, "grad_norm": 1.0357885360717773, "learning_rate": 8.72e-05, "loss": 2.3794, "step": 20 }, { "epoch": 0.011035497517013059, "grad_norm": 0.8365756273269653, "learning_rate": 0.0001308, "loss": 1.7752, "step": 30 }, { "epoch": 0.014713996689350745, "grad_norm": 0.9363265037536621, "learning_rate": 0.0001744, "loss": 1.62, "step": 40 }, { "epoch": 0.01839249586168843, "grad_norm": 0.9486646056175232, "learning_rate": 0.000218, "loss": 1.4426, "step": 50 }, { "epoch": 0.01839249586168843, "eval_loss": 1.658852458000183, "eval_runtime": 107.1756, "eval_samples_per_second": 10.683, "eval_steps_per_second": 2.678, "step": 50 }, { "epoch": 0.022070995034026118, "grad_norm": 0.6641014814376831, "learning_rate": 0.00021773448147832086, "loss": 1.6935, "step": 60 }, { "epoch": 0.025749494206363802, "grad_norm": 0.6979497671127319, "learning_rate": 0.0002169392194928312, "loss": 1.5954, "step": 70 }, { "epoch": 0.02942799337870149, "grad_norm": 0.8027070760726929, "learning_rate": 0.00021561808847998484, "loss": 1.5455, "step": 80 }, { "epoch": 0.033106492551039174, "grad_norm": 0.6991447806358337, "learning_rate": 0.00021377752485727676, "loss": 1.4689, "step": 90 }, { "epoch": 0.03678499172337686, "grad_norm": 0.9628053903579712, "learning_rate": 0.00021142649566566402, "loss": 1.4403, "step": 100 }, { "epoch": 0.03678499172337686, "eval_loss": 1.5630460977554321, "eval_runtime": 107.2017, "eval_samples_per_second": 10.681, "eval_steps_per_second": 2.677, "step": 100 }, { "epoch": 0.04046349089571455, "grad_norm": 0.6026347875595093, "learning_rate": 0.0002085764548830435, "loss": 1.6232, "step": 110 }, { "epoch": 0.044141990068052236, "grad_norm": 0.7379695773124695, "learning_rate": 0.00020524128762162305, "loss": 1.4991, "step": 120 }, { "epoch": 0.047820489240389924, "grad_norm": 0.6722536683082581, "learning_rate": 0.00020143724248105043, "loss": 1.5017, "step": 130 }, { "epoch": 0.051498988412727605, "grad_norm": 0.6395397186279297, "learning_rate": 0.0001971828523868693, "loss": 1.4441, "step": 140 }, { "epoch": 0.05517748758506529, "grad_norm": 0.8695403933525085, "learning_rate": 0.0001924988442999686, "loss": 1.3411, "step": 150 }, { "epoch": 0.05517748758506529, "eval_loss": 1.5262999534606934, "eval_runtime": 107.2361, "eval_samples_per_second": 10.677, "eval_steps_per_second": 2.676, "step": 150 }, { "epoch": 0.05885598675740298, "grad_norm": 0.6810901165008545, "learning_rate": 0.00018740803823691298, "loss": 1.5936, "step": 160 }, { "epoch": 0.06253448592974066, "grad_norm": 0.657184362411499, "learning_rate": 0.00018193523609311556, "loss": 1.5681, "step": 170 }, { "epoch": 0.06621298510207835, "grad_norm": 0.6586466431617737, "learning_rate": 0.00017610710081049675, "loss": 1.4321, "step": 180 }, { "epoch": 0.06989148427441604, "grad_norm": 0.7010196447372437, "learning_rate": 0.00016995202647831142, "loss": 1.3784, "step": 190 }, { "epoch": 0.07356998344675372, "grad_norm": 0.8979406356811523, "learning_rate": 0.00016350000000000002, "loss": 1.341, "step": 200 }, { "epoch": 0.07356998344675372, "eval_loss": 1.4965966939926147, "eval_runtime": 107.3593, "eval_samples_per_second": 10.665, "eval_steps_per_second": 2.673, "step": 200 }, { "epoch": 0.07724848261909141, "grad_norm": 0.6285237073898315, "learning_rate": 0.00015678245500000943, "loss": 1.5321, "step": 210 }, { "epoch": 0.0809269817914291, "grad_norm": 0.6141915321350098, "learning_rate": 0.00014983211868233444, "loss": 1.4692, "step": 220 }, { "epoch": 0.08460548096376679, "grad_norm": 0.6756523251533508, "learning_rate": 0.00014268285238686927, "loss": 1.436, "step": 230 }, { "epoch": 0.08828398013610447, "grad_norm": 0.6676588654518127, "learning_rate": 0.00013536948662036378, "loss": 1.3784, "step": 240 }, { "epoch": 0.09196247930844216, "grad_norm": 0.7657578587532043, "learning_rate": 0.00012792765136569544, "loss": 1.2896, "step": 250 }, { "epoch": 0.09196247930844216, "eval_loss": 1.4555139541625977, "eval_runtime": 107.2583, "eval_samples_per_second": 10.675, "eval_steps_per_second": 2.676, "step": 250 }, { "epoch": 0.09564097848077985, "grad_norm": 0.596019446849823, "learning_rate": 0.00012039360249617425, "loss": 1.5842, "step": 260 }, { "epoch": 0.09931947765311752, "grad_norm": 0.6011001467704773, "learning_rate": 0.00011280404514057264, "loss": 1.4633, "step": 270 }, { "epoch": 0.10299797682545521, "grad_norm": 0.6597139835357666, "learning_rate": 0.00010519595485942743, "loss": 1.4199, "step": 280 }, { "epoch": 0.1066764759977929, "grad_norm": 0.6663433313369751, "learning_rate": 9.76063975038258e-05, "loss": 1.3892, "step": 290 }, { "epoch": 0.11035497517013058, "grad_norm": 0.845330536365509, "learning_rate": 9.00723486343046e-05, "loss": 1.2336, "step": 300 }, { "epoch": 0.11035497517013058, "eval_loss": 1.4274946451187134, "eval_runtime": 107.2541, "eval_samples_per_second": 10.676, "eval_steps_per_second": 2.676, "step": 300 }, { "epoch": 0.11403347434246827, "grad_norm": 0.5652741193771362, "learning_rate": 8.263051337963623e-05, "loss": 1.5192, "step": 310 }, { "epoch": 0.11771197351480596, "grad_norm": 0.5754914879798889, "learning_rate": 7.531714761313074e-05, "loss": 1.5169, "step": 320 }, { "epoch": 0.12139047268714365, "grad_norm": 0.5873494744300842, "learning_rate": 6.816788131766559e-05, "loss": 1.4227, "step": 330 }, { "epoch": 0.12506897185948132, "grad_norm": 0.5985251069068909, "learning_rate": 6.121754499999055e-05, "loss": 1.2899, "step": 340 }, { "epoch": 0.12874747103181902, "grad_norm": 0.8635291457176208, "learning_rate": 5.450000000000003e-05, "loss": 1.2777, "step": 350 }, { "epoch": 0.12874747103181902, "eval_loss": 1.4000242948532104, "eval_runtime": 107.1832, "eval_samples_per_second": 10.683, "eval_steps_per_second": 2.678, "step": 350 }, { "epoch": 0.1324259702041567, "grad_norm": 0.5911391973495483, "learning_rate": 4.804797352168861e-05, "loss": 1.4978, "step": 360 }, { "epoch": 0.1361044693764944, "grad_norm": 0.6037421226501465, "learning_rate": 4.189289918950325e-05, "loss": 1.4423, "step": 370 }, { "epoch": 0.13978296854883207, "grad_norm": 0.5900255441665649, "learning_rate": 3.606476390688449e-05, "loss": 1.3197, "step": 380 }, { "epoch": 0.14346146772116977, "grad_norm": 0.6815835237503052, "learning_rate": 3.0591961763087043e-05, "loss": 1.3703, "step": 390 }, { "epoch": 0.14713996689350745, "grad_norm": 0.7701212167739868, "learning_rate": 2.550115570003141e-05, "loss": 1.2902, "step": 400 }, { "epoch": 0.14713996689350745, "eval_loss": 1.3680524826049805, "eval_runtime": 107.1594, "eval_samples_per_second": 10.685, "eval_steps_per_second": 2.678, "step": 400 }, { "epoch": 0.15081846606584515, "grad_norm": 0.5527030229568481, "learning_rate": 2.081714761313074e-05, "loss": 1.458, "step": 410 }, { "epoch": 0.15449696523818282, "grad_norm": 0.5901045203208923, "learning_rate": 1.656275751894957e-05, "loss": 1.4088, "step": 420 }, { "epoch": 0.1581754644105205, "grad_norm": 0.6328750848770142, "learning_rate": 1.275871237837696e-05, "loss": 1.3526, "step": 430 }, { "epoch": 0.1618539635828582, "grad_norm": 0.619103193283081, "learning_rate": 9.423545116956494e-06, "loss": 1.3128, "step": 440 }, { "epoch": 0.16553246275519587, "grad_norm": 0.7719127535820007, "learning_rate": 6.573504334335994e-06, "loss": 1.2669, "step": 450 }, { "epoch": 0.16553246275519587, "eval_loss": 1.3588438034057617, "eval_runtime": 107.2217, "eval_samples_per_second": 10.679, "eval_steps_per_second": 2.677, "step": 450 }, { "epoch": 0.16921096192753357, "grad_norm": 0.6076942682266235, "learning_rate": 4.22247514272324e-06, "loss": 1.4677, "step": 460 }, { "epoch": 0.17288946109987124, "grad_norm": 0.5937750935554504, "learning_rate": 2.38191152001518e-06, "loss": 1.3644, "step": 470 }, { "epoch": 0.17656796027220895, "grad_norm": 0.5931702852249146, "learning_rate": 1.0607805071688306e-06, "loss": 1.3443, "step": 480 }, { "epoch": 0.18024645944454662, "grad_norm": 0.6748533844947815, "learning_rate": 2.655185216791625e-07, "loss": 1.2941, "step": 490 }, { "epoch": 0.18392495861688432, "grad_norm": 0.8280065059661865, "learning_rate": 0.0, "loss": 1.2717, "step": 500 }, { "epoch": 0.18392495861688432, "eval_loss": 1.3565335273742676, "eval_runtime": 107.2901, "eval_samples_per_second": 10.672, "eval_steps_per_second": 2.675, "step": 500 } ], "logging_steps": 10, "max_steps": 500, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 3, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.6758482141184e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }