{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.980891719745223, "eval_steps": 500, "global_step": 78, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03821656050955414, "grad_norm": 6.475443363189697, "learning_rate": 1.25e-06, "loss": 0.9435, "step": 1 }, { "epoch": 0.07643312101910828, "grad_norm": 6.0149922370910645, "learning_rate": 2.5e-06, "loss": 0.9088, "step": 2 }, { "epoch": 0.11464968152866242, "grad_norm": 6.234716892242432, "learning_rate": 3.7500000000000005e-06, "loss": 0.946, "step": 3 }, { "epoch": 0.15286624203821655, "grad_norm": 5.534856796264648, "learning_rate": 5e-06, "loss": 0.9178, "step": 4 }, { "epoch": 0.1910828025477707, "grad_norm": 4.338842868804932, "learning_rate": 6.25e-06, "loss": 0.8717, "step": 5 }, { "epoch": 0.22929936305732485, "grad_norm": 2.4903976917266846, "learning_rate": 7.500000000000001e-06, "loss": 0.7575, "step": 6 }, { "epoch": 0.267515923566879, "grad_norm": 3.8526647090911865, "learning_rate": 8.750000000000001e-06, "loss": 0.7954, "step": 7 }, { "epoch": 0.3057324840764331, "grad_norm": 4.179830551147461, "learning_rate": 1e-05, "loss": 0.8056, "step": 8 }, { "epoch": 0.34394904458598724, "grad_norm": 3.9865660667419434, "learning_rate": 9.994965332706574e-06, "loss": 0.7168, "step": 9 }, { "epoch": 0.3821656050955414, "grad_norm": 4.04654598236084, "learning_rate": 9.979871469976197e-06, "loss": 0.7573, "step": 10 }, { "epoch": 0.42038216560509556, "grad_norm": 2.9917404651641846, "learning_rate": 9.954748808839675e-06, "loss": 0.7598, "step": 11 }, { "epoch": 0.4585987261146497, "grad_norm": 2.017793655395508, "learning_rate": 9.91964794299315e-06, "loss": 0.7146, "step": 12 }, { "epoch": 0.4968152866242038, "grad_norm": 2.0348641872406006, "learning_rate": 9.874639560909118e-06, "loss": 0.7697, "step": 13 }, { "epoch": 0.535031847133758, "grad_norm": 1.8617016077041626, "learning_rate": 9.819814303479268e-06, "loss": 0.6807, "step": 14 }, { "epoch": 0.5732484076433121, "grad_norm": 1.5839647054672241, "learning_rate": 9.755282581475769e-06, "loss": 0.6682, "step": 15 }, { "epoch": 0.6114649681528662, "grad_norm": 1.2628346681594849, "learning_rate": 9.681174353198687e-06, "loss": 0.6205, "step": 16 }, { "epoch": 0.6496815286624203, "grad_norm": 1.4339826107025146, "learning_rate": 9.597638862757255e-06, "loss": 0.6606, "step": 17 }, { "epoch": 0.6878980891719745, "grad_norm": 1.0776035785675049, "learning_rate": 9.504844339512096e-06, "loss": 0.6367, "step": 18 }, { "epoch": 0.7261146496815286, "grad_norm": 1.2484275102615356, "learning_rate": 9.40297765928369e-06, "loss": 0.7081, "step": 19 }, { "epoch": 0.7643312101910829, "grad_norm": 1.166043996810913, "learning_rate": 9.292243968009332e-06, "loss": 0.6139, "step": 20 }, { "epoch": 0.802547770700637, "grad_norm": 1.0878933668136597, "learning_rate": 9.172866268606514e-06, "loss": 0.6364, "step": 21 }, { "epoch": 0.8407643312101911, "grad_norm": 0.8449444770812988, "learning_rate": 9.045084971874738e-06, "loss": 0.6327, "step": 22 }, { "epoch": 0.8789808917197452, "grad_norm": 0.9466868042945862, "learning_rate": 8.90915741234015e-06, "loss": 0.7027, "step": 23 }, { "epoch": 0.9171974522292994, "grad_norm": 0.9680841565132141, "learning_rate": 8.765357330018056e-06, "loss": 0.6549, "step": 24 }, { "epoch": 0.9554140127388535, "grad_norm": 0.7421926259994507, "learning_rate": 8.613974319136959e-06, "loss": 0.6129, "step": 25 }, { "epoch": 0.9936305732484076, "grad_norm": 0.8124146461486816, "learning_rate": 8.455313244934324e-06, "loss": 0.609, "step": 26 }, { "epoch": 1.0318471337579618, "grad_norm": 1.6263659000396729, "learning_rate": 8.289693629698564e-06, "loss": 0.9629, "step": 27 }, { "epoch": 1.070063694267516, "grad_norm": 0.7524719834327698, "learning_rate": 8.117449009293668e-06, "loss": 0.6027, "step": 28 }, { "epoch": 1.10828025477707, "grad_norm": 0.592254102230072, "learning_rate": 7.938926261462366e-06, "loss": 0.5297, "step": 29 }, { "epoch": 1.1464968152866242, "grad_norm": 0.7852796912193298, "learning_rate": 7.754484907260513e-06, "loss": 0.6182, "step": 30 }, { "epoch": 1.1847133757961783, "grad_norm": 0.830742359161377, "learning_rate": 7.564496387029532e-06, "loss": 0.6581, "step": 31 }, { "epoch": 1.2229299363057324, "grad_norm": 0.6159372925758362, "learning_rate": 7.369343312364994e-06, "loss": 0.6236, "step": 32 }, { "epoch": 1.2611464968152866, "grad_norm": 0.7226252555847168, "learning_rate": 7.169418695587791e-06, "loss": 0.618, "step": 33 }, { "epoch": 1.2993630573248407, "grad_norm": 0.6193709373474121, "learning_rate": 6.965125158269619e-06, "loss": 0.542, "step": 34 }, { "epoch": 1.3375796178343948, "grad_norm": 0.7392263412475586, "learning_rate": 6.7568741204067145e-06, "loss": 0.53, "step": 35 }, { "epoch": 1.3757961783439492, "grad_norm": 0.6274488568305969, "learning_rate": 6.545084971874738e-06, "loss": 0.6762, "step": 36 }, { "epoch": 1.4140127388535033, "grad_norm": 0.5716177821159363, "learning_rate": 6.330184227833376e-06, "loss": 0.5749, "step": 37 }, { "epoch": 1.4522292993630574, "grad_norm": 0.7565421462059021, "learning_rate": 6.112604669781572e-06, "loss": 0.5814, "step": 38 }, { "epoch": 1.4904458598726116, "grad_norm": 0.7007409334182739, "learning_rate": 5.892784473993184e-06, "loss": 0.6141, "step": 39 }, { "epoch": 1.5286624203821657, "grad_norm": 0.5515427589416504, "learning_rate": 5.671166329088278e-06, "loss": 0.4922, "step": 40 }, { "epoch": 1.5668789808917198, "grad_norm": 0.701950192451477, "learning_rate": 5.448196544517168e-06, "loss": 0.5835, "step": 41 }, { "epoch": 1.605095541401274, "grad_norm": 0.6372572779655457, "learning_rate": 5.224324151752575e-06, "loss": 0.5014, "step": 42 }, { "epoch": 1.643312101910828, "grad_norm": 0.6354508996009827, "learning_rate": 5e-06, "loss": 0.6538, "step": 43 }, { "epoch": 1.6815286624203822, "grad_norm": 0.5548213720321655, "learning_rate": 4.775675848247427e-06, "loss": 0.5819, "step": 44 }, { "epoch": 1.7197452229299364, "grad_norm": 0.4872737228870392, "learning_rate": 4.551803455482833e-06, "loss": 0.4779, "step": 45 }, { "epoch": 1.7579617834394905, "grad_norm": 0.5852910876274109, "learning_rate": 4.3288336709117246e-06, "loss": 0.6624, "step": 46 }, { "epoch": 1.7961783439490446, "grad_norm": 0.49762701988220215, "learning_rate": 4.107215526006818e-06, "loss": 0.5426, "step": 47 }, { "epoch": 1.8343949044585988, "grad_norm": 0.5693338513374329, "learning_rate": 3.887395330218429e-06, "loss": 0.5471, "step": 48 }, { "epoch": 1.872611464968153, "grad_norm": 0.6038720607757568, "learning_rate": 3.669815772166625e-06, "loss": 0.6022, "step": 49 }, { "epoch": 1.910828025477707, "grad_norm": 0.49600663781166077, "learning_rate": 3.4549150281252635e-06, "loss": 0.507, "step": 50 }, { "epoch": 1.9490445859872612, "grad_norm": 0.5460913181304932, "learning_rate": 3.2431258795932863e-06, "loss": 0.5469, "step": 51 }, { "epoch": 1.9872611464968153, "grad_norm": 0.530115008354187, "learning_rate": 3.0348748417303826e-06, "loss": 0.5327, "step": 52 }, { "epoch": 2.0254777070063694, "grad_norm": 1.14858877658844, "learning_rate": 2.83058130441221e-06, "loss": 0.837, "step": 53 }, { "epoch": 2.0636942675159236, "grad_norm": 0.5498645901679993, "learning_rate": 2.6306566876350072e-06, "loss": 0.5906, "step": 54 }, { "epoch": 2.1019108280254777, "grad_norm": 0.5188542604446411, "learning_rate": 2.43550361297047e-06, "loss": 0.5788, "step": 55 }, { "epoch": 2.140127388535032, "grad_norm": 0.4956539273262024, "learning_rate": 2.245515092739488e-06, "loss": 0.5361, "step": 56 }, { "epoch": 2.178343949044586, "grad_norm": 0.6037235260009766, "learning_rate": 2.061073738537635e-06, "loss": 0.5584, "step": 57 }, { "epoch": 2.21656050955414, "grad_norm": 0.4257795810699463, "learning_rate": 1.8825509907063328e-06, "loss": 0.4353, "step": 58 }, { "epoch": 2.254777070063694, "grad_norm": 0.5109358429908752, "learning_rate": 1.7103063703014372e-06, "loss": 0.5412, "step": 59 }, { "epoch": 2.2929936305732483, "grad_norm": 0.4674205780029297, "learning_rate": 1.544686755065677e-06, "loss": 0.5459, "step": 60 }, { "epoch": 2.3312101910828025, "grad_norm": 0.47286278009414673, "learning_rate": 1.3860256808630429e-06, "loss": 0.4947, "step": 61 }, { "epoch": 2.3694267515923566, "grad_norm": 0.49508610367774963, "learning_rate": 1.234642669981946e-06, "loss": 0.4856, "step": 62 }, { "epoch": 2.4076433121019107, "grad_norm": 0.42003560066223145, "learning_rate": 1.0908425876598512e-06, "loss": 0.4792, "step": 63 }, { "epoch": 2.445859872611465, "grad_norm": 0.48869258165359497, "learning_rate": 9.549150281252633e-07, "loss": 0.5505, "step": 64 }, { "epoch": 2.484076433121019, "grad_norm": 0.5102881789207458, "learning_rate": 8.271337313934869e-07, "loss": 0.4618, "step": 65 }, { "epoch": 2.522292993630573, "grad_norm": 0.5060379505157471, "learning_rate": 7.077560319906696e-07, "loss": 0.5975, "step": 66 }, { "epoch": 2.5605095541401273, "grad_norm": 0.4596272110939026, "learning_rate": 5.9702234071631e-07, "loss": 0.5184, "step": 67 }, { "epoch": 2.5987261146496814, "grad_norm": 0.4477904736995697, "learning_rate": 4.951556604879049e-07, "loss": 0.4971, "step": 68 }, { "epoch": 2.6369426751592355, "grad_norm": 0.4290829002857208, "learning_rate": 4.0236113724274716e-07, "loss": 0.4872, "step": 69 }, { "epoch": 2.6751592356687897, "grad_norm": 0.4998835325241089, "learning_rate": 3.18825646801314e-07, "loss": 0.4768, "step": 70 }, { "epoch": 2.713375796178344, "grad_norm": 0.5172039270401001, "learning_rate": 2.447174185242324e-07, "loss": 0.5693, "step": 71 }, { "epoch": 2.7515923566878984, "grad_norm": 0.4305104613304138, "learning_rate": 1.801856965207338e-07, "loss": 0.5935, "step": 72 }, { "epoch": 2.789808917197452, "grad_norm": 0.4256463348865509, "learning_rate": 1.253604390908819e-07, "loss": 0.511, "step": 73 }, { "epoch": 2.8280254777070066, "grad_norm": 0.5269811153411865, "learning_rate": 8.035205700685167e-08, "loss": 0.577, "step": 74 }, { "epoch": 2.8662420382165603, "grad_norm": 0.4299260377883911, "learning_rate": 4.52511911603265e-08, "loss": 0.5992, "step": 75 }, { "epoch": 2.904458598726115, "grad_norm": 0.45451387763023376, "learning_rate": 2.012853002380466e-08, "loss": 0.5475, "step": 76 }, { "epoch": 2.9426751592356686, "grad_norm": 0.4655923843383789, "learning_rate": 5.034667293427053e-09, "loss": 0.5231, "step": 77 }, { "epoch": 2.980891719745223, "grad_norm": 0.4951322376728058, "learning_rate": 0.0, "loss": 0.5876, "step": 78 }, { "epoch": 2.980891719745223, "step": 78, "total_flos": 47460738555904.0, "train_loss": 0.6236666376009966, "train_runtime": 2174.9739, "train_samples_per_second": 3.448, "train_steps_per_second": 0.036 } ], "logging_steps": 1.0, "max_steps": 78, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 47460738555904.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }