{ "best_metric": 0.20289373397827148, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.1272264631043257, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.002544529262086514, "grad_norm": 2.0976202487945557, "learning_rate": 1.0017e-05, "loss": 0.4494, "step": 1 }, { "epoch": 0.002544529262086514, "eval_loss": 0.6885223388671875, "eval_runtime": 10.1676, "eval_samples_per_second": 16.326, "eval_steps_per_second": 4.131, "step": 1 }, { "epoch": 0.005089058524173028, "grad_norm": 2.4291188716888428, "learning_rate": 2.0034e-05, "loss": 0.4714, "step": 2 }, { "epoch": 0.007633587786259542, "grad_norm": 2.6177380084991455, "learning_rate": 3.0050999999999997e-05, "loss": 0.4572, "step": 3 }, { "epoch": 0.010178117048346057, "grad_norm": 2.227078676223755, "learning_rate": 4.0068e-05, "loss": 0.3751, "step": 4 }, { "epoch": 0.01272264631043257, "grad_norm": 1.521238923072815, "learning_rate": 5.0085e-05, "loss": 0.3044, "step": 5 }, { "epoch": 0.015267175572519083, "grad_norm": 1.3237571716308594, "learning_rate": 6.0101999999999995e-05, "loss": 0.2047, "step": 6 }, { "epoch": 0.017811704834605598, "grad_norm": 1.6091803312301636, "learning_rate": 7.0119e-05, "loss": 0.1256, "step": 7 }, { "epoch": 0.020356234096692113, "grad_norm": 1.4948714971542358, "learning_rate": 8.0136e-05, "loss": 0.1606, "step": 8 }, { "epoch": 0.022900763358778626, "grad_norm": 1.0482193231582642, "learning_rate": 9.0153e-05, "loss": 0.1225, "step": 9 }, { "epoch": 0.02544529262086514, "grad_norm": 1.3892583847045898, "learning_rate": 0.00010017, "loss": 0.1892, "step": 10 }, { "epoch": 0.027989821882951654, "grad_norm": 1.202789306640625, "learning_rate": 9.964278947368421e-05, "loss": 0.1744, "step": 11 }, { "epoch": 0.030534351145038167, "grad_norm": 1.1245602369308472, "learning_rate": 9.911557894736841e-05, "loss": 0.0761, "step": 12 }, { "epoch": 0.03307888040712468, "grad_norm": 1.1420965194702148, "learning_rate": 9.858836842105263e-05, "loss": 0.0954, "step": 13 }, { "epoch": 0.035623409669211195, "grad_norm": 0.8061597943305969, "learning_rate": 9.806115789473684e-05, "loss": 0.1012, "step": 14 }, { "epoch": 0.03816793893129771, "grad_norm": 1.2784687280654907, "learning_rate": 9.753394736842106e-05, "loss": 0.1408, "step": 15 }, { "epoch": 0.04071246819338423, "grad_norm": 1.0279990434646606, "learning_rate": 9.700673684210526e-05, "loss": 0.1174, "step": 16 }, { "epoch": 0.043256997455470736, "grad_norm": 1.0930790901184082, "learning_rate": 9.647952631578948e-05, "loss": 0.0984, "step": 17 }, { "epoch": 0.04580152671755725, "grad_norm": 0.4375361502170563, "learning_rate": 9.595231578947368e-05, "loss": 0.0476, "step": 18 }, { "epoch": 0.04834605597964377, "grad_norm": 0.5908359289169312, "learning_rate": 9.542510526315789e-05, "loss": 0.0612, "step": 19 }, { "epoch": 0.05089058524173028, "grad_norm": 0.4914228916168213, "learning_rate": 9.48978947368421e-05, "loss": 0.0473, "step": 20 }, { "epoch": 0.05343511450381679, "grad_norm": 1.2252209186553955, "learning_rate": 9.437068421052632e-05, "loss": 0.1821, "step": 21 }, { "epoch": 0.05597964376590331, "grad_norm": 0.7145554423332214, "learning_rate": 9.384347368421052e-05, "loss": 0.0709, "step": 22 }, { "epoch": 0.058524173027989825, "grad_norm": 0.2932605445384979, "learning_rate": 9.331626315789474e-05, "loss": 0.021, "step": 23 }, { "epoch": 0.061068702290076333, "grad_norm": 0.628063440322876, "learning_rate": 9.278905263157894e-05, "loss": 0.0734, "step": 24 }, { "epoch": 0.06361323155216285, "grad_norm": 0.48500677943229675, "learning_rate": 9.226184210526316e-05, "loss": 0.0503, "step": 25 }, { "epoch": 0.06615776081424936, "grad_norm": 0.5622182488441467, "learning_rate": 9.173463157894736e-05, "loss": 0.0558, "step": 26 }, { "epoch": 0.06870229007633588, "grad_norm": 0.5120857357978821, "learning_rate": 9.120742105263159e-05, "loss": 0.045, "step": 27 }, { "epoch": 0.07124681933842239, "grad_norm": 0.12333207577466965, "learning_rate": 9.068021052631579e-05, "loss": 0.0056, "step": 28 }, { "epoch": 0.0737913486005089, "grad_norm": 0.8002417087554932, "learning_rate": 9.0153e-05, "loss": 0.1115, "step": 29 }, { "epoch": 0.07633587786259542, "grad_norm": 3.1628360748291016, "learning_rate": 8.96257894736842e-05, "loss": 0.661, "step": 30 }, { "epoch": 0.07888040712468193, "grad_norm": 1.9972189664840698, "learning_rate": 8.909857894736842e-05, "loss": 0.3344, "step": 31 }, { "epoch": 0.08142493638676845, "grad_norm": 1.6704767942428589, "learning_rate": 8.857136842105263e-05, "loss": 0.3315, "step": 32 }, { "epoch": 0.08396946564885496, "grad_norm": 1.8845316171646118, "learning_rate": 8.804415789473684e-05, "loss": 0.3487, "step": 33 }, { "epoch": 0.08651399491094147, "grad_norm": 2.4319205284118652, "learning_rate": 8.751694736842105e-05, "loss": 0.3297, "step": 34 }, { "epoch": 0.089058524173028, "grad_norm": 2.7092981338500977, "learning_rate": 8.698973684210527e-05, "loss": 0.5175, "step": 35 }, { "epoch": 0.0916030534351145, "grad_norm": 1.65862238407135, "learning_rate": 8.646252631578948e-05, "loss": 0.3395, "step": 36 }, { "epoch": 0.09414758269720101, "grad_norm": 1.9453610181808472, "learning_rate": 8.593531578947368e-05, "loss": 0.3279, "step": 37 }, { "epoch": 0.09669211195928754, "grad_norm": 1.8823449611663818, "learning_rate": 8.54081052631579e-05, "loss": 0.3059, "step": 38 }, { "epoch": 0.09923664122137404, "grad_norm": 1.427621603012085, "learning_rate": 8.48808947368421e-05, "loss": 0.2756, "step": 39 }, { "epoch": 0.10178117048346055, "grad_norm": 1.9383624792099, "learning_rate": 8.435368421052631e-05, "loss": 0.3652, "step": 40 }, { "epoch": 0.10432569974554708, "grad_norm": 0.5723309516906738, "learning_rate": 8.382647368421053e-05, "loss": 0.0681, "step": 41 }, { "epoch": 0.10687022900763359, "grad_norm": 1.692962884902954, "learning_rate": 8.329926315789474e-05, "loss": 0.2737, "step": 42 }, { "epoch": 0.10941475826972011, "grad_norm": 5.488102436065674, "learning_rate": 8.277205263157894e-05, "loss": 0.3806, "step": 43 }, { "epoch": 0.11195928753180662, "grad_norm": 2.793001890182495, "learning_rate": 8.224484210526316e-05, "loss": 0.3236, "step": 44 }, { "epoch": 0.11450381679389313, "grad_norm": 0.8564298152923584, "learning_rate": 8.171763157894736e-05, "loss": 0.0697, "step": 45 }, { "epoch": 0.11704834605597965, "grad_norm": 1.465958595275879, "learning_rate": 8.119042105263158e-05, "loss": 0.1509, "step": 46 }, { "epoch": 0.11959287531806616, "grad_norm": 1.0620710849761963, "learning_rate": 8.066321052631578e-05, "loss": 0.0998, "step": 47 }, { "epoch": 0.12213740458015267, "grad_norm": 0.551638126373291, "learning_rate": 8.0136e-05, "loss": 0.0336, "step": 48 }, { "epoch": 0.12468193384223919, "grad_norm": 1.185890555381775, "learning_rate": 7.960878947368421e-05, "loss": 0.0431, "step": 49 }, { "epoch": 0.1272264631043257, "grad_norm": 0.4987131953239441, "learning_rate": 7.908157894736842e-05, "loss": 0.017, "step": 50 }, { "epoch": 0.1272264631043257, "eval_loss": 0.20289373397827148, "eval_runtime": 10.1481, "eval_samples_per_second": 16.358, "eval_steps_per_second": 4.139, "step": 50 } ], "logging_steps": 1, "max_steps": 200, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 8419190243328000.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }