{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.0, "eval_steps": 500, "global_step": 184, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.010869565217391304, "grad_norm": 0.03240465296922696, "learning_rate": 1.0526315789473685e-06, "loss": 0.5318, "step": 1 }, { "epoch": 0.021739130434782608, "grad_norm": 0.031148191782486748, "learning_rate": 2.105263157894737e-06, "loss": 0.505, "step": 2 }, { "epoch": 0.03260869565217391, "grad_norm": 0.03287048185671828, "learning_rate": 3.157894736842105e-06, "loss": 0.517, "step": 3 }, { "epoch": 0.043478260869565216, "grad_norm": 0.03645975365762924, "learning_rate": 4.210526315789474e-06, "loss": 0.5547, "step": 4 }, { "epoch": 0.05434782608695652, "grad_norm": 0.03208238827298806, "learning_rate": 5.263157894736842e-06, "loss": 0.5284, "step": 5 }, { "epoch": 0.06521739130434782, "grad_norm": 0.038367637681218175, "learning_rate": 6.31578947368421e-06, "loss": 0.5838, "step": 6 }, { "epoch": 0.07608695652173914, "grad_norm": 0.03450300334168957, "learning_rate": 7.368421052631579e-06, "loss": 0.5382, "step": 7 }, { "epoch": 0.08695652173913043, "grad_norm": 0.03388554313817737, "learning_rate": 8.421052631578948e-06, "loss": 0.524, "step": 8 }, { "epoch": 0.09782608695652174, "grad_norm": 0.03544242363659419, "learning_rate": 9.473684210526315e-06, "loss": 0.5501, "step": 9 }, { "epoch": 0.10869565217391304, "grad_norm": 0.03352231239492054, "learning_rate": 1.0526315789473684e-05, "loss": 0.5069, "step": 10 }, { "epoch": 0.11956521739130435, "grad_norm": 0.03572670440459065, "learning_rate": 1.1578947368421053e-05, "loss": 0.5223, "step": 11 }, { "epoch": 0.13043478260869565, "grad_norm": 0.03663832921257728, "learning_rate": 1.263157894736842e-05, "loss": 0.5509, "step": 12 }, { "epoch": 0.14130434782608695, "grad_norm": 0.03809520900571492, "learning_rate": 1.3684210526315791e-05, "loss": 0.5401, "step": 13 }, { "epoch": 0.15217391304347827, "grad_norm": 0.03789719542276196, "learning_rate": 1.4736842105263159e-05, "loss": 0.5408, "step": 14 }, { "epoch": 0.16304347826086957, "grad_norm": 0.03420580590941963, "learning_rate": 1.578947368421053e-05, "loss": 0.4729, "step": 15 }, { "epoch": 0.17391304347826086, "grad_norm": 0.03602266807911341, "learning_rate": 1.6842105263157896e-05, "loss": 0.4934, "step": 16 }, { "epoch": 0.18478260869565216, "grad_norm": 0.04138637589841341, "learning_rate": 1.7894736842105264e-05, "loss": 0.5376, "step": 17 }, { "epoch": 0.1956521739130435, "grad_norm": 0.04435583894932485, "learning_rate": 1.894736842105263e-05, "loss": 0.5642, "step": 18 }, { "epoch": 0.20652173913043478, "grad_norm": 0.04141900808302216, "learning_rate": 2e-05, "loss": 0.5591, "step": 19 }, { "epoch": 0.21739130434782608, "grad_norm": 0.03675591300747317, "learning_rate": 1.999818745523526e-05, "loss": 0.5022, "step": 20 }, { "epoch": 0.22826086956521738, "grad_norm": 0.035936674391353805, "learning_rate": 1.999275047800474e-05, "loss": 0.5342, "step": 21 }, { "epoch": 0.2391304347826087, "grad_norm": 0.03766128882621122, "learning_rate": 1.9983691039261358e-05, "loss": 0.5437, "step": 22 }, { "epoch": 0.25, "grad_norm": 0.03747365385813838, "learning_rate": 1.9971012423132776e-05, "loss": 0.5113, "step": 23 }, { "epoch": 0.2608695652173913, "grad_norm": 0.037562789979165546, "learning_rate": 1.9954719225730847e-05, "loss": 0.5219, "step": 24 }, { "epoch": 0.2717391304347826, "grad_norm": 0.033732964863589725, "learning_rate": 1.99348173534855e-05, "loss": 0.4899, "step": 25 }, { "epoch": 0.2826086956521739, "grad_norm": 0.032706454390573285, "learning_rate": 1.9911314021003614e-05, "loss": 0.46, "step": 26 }, { "epoch": 0.29347826086956524, "grad_norm": 0.03299013215961956, "learning_rate": 1.9884217748453625e-05, "loss": 0.4464, "step": 27 }, { "epoch": 0.30434782608695654, "grad_norm": 0.036322686035091326, "learning_rate": 1.9853538358476933e-05, "loss": 0.525, "step": 28 }, { "epoch": 0.31521739130434784, "grad_norm": 0.035349035280986665, "learning_rate": 1.9819286972627066e-05, "loss": 0.481, "step": 29 }, { "epoch": 0.32608695652173914, "grad_norm": 0.03304667685973592, "learning_rate": 1.9781476007338058e-05, "loss": 0.486, "step": 30 }, { "epoch": 0.33695652173913043, "grad_norm": 0.03594690118986878, "learning_rate": 1.9740119169423337e-05, "loss": 0.4459, "step": 31 }, { "epoch": 0.34782608695652173, "grad_norm": 0.03722435746668587, "learning_rate": 1.9695231451106914e-05, "loss": 0.4884, "step": 32 }, { "epoch": 0.358695652173913, "grad_norm": 0.03234546772265382, "learning_rate": 1.964682912458856e-05, "loss": 0.4538, "step": 33 }, { "epoch": 0.3695652173913043, "grad_norm": 0.03332715856138033, "learning_rate": 1.9594929736144978e-05, "loss": 0.4684, "step": 34 }, { "epoch": 0.3804347826086957, "grad_norm": 0.03192258021162577, "learning_rate": 1.9539552099769128e-05, "loss": 0.5074, "step": 35 }, { "epoch": 0.391304347826087, "grad_norm": 0.028905879450475694, "learning_rate": 1.9480716290349998e-05, "loss": 0.4329, "step": 36 }, { "epoch": 0.40217391304347827, "grad_norm": 0.026272771750420486, "learning_rate": 1.941844363639525e-05, "loss": 0.4104, "step": 37 }, { "epoch": 0.41304347826086957, "grad_norm": 0.029512600659781462, "learning_rate": 1.9352756712299467e-05, "loss": 0.4352, "step": 38 }, { "epoch": 0.42391304347826086, "grad_norm": 0.03106074409081994, "learning_rate": 1.9283679330160726e-05, "loss": 0.4617, "step": 39 }, { "epoch": 0.43478260869565216, "grad_norm": 0.02877012176170005, "learning_rate": 1.92112365311485e-05, "loss": 0.4492, "step": 40 }, { "epoch": 0.44565217391304346, "grad_norm": 0.028504695790476927, "learning_rate": 1.913545457642601e-05, "loss": 0.4388, "step": 41 }, { "epoch": 0.45652173913043476, "grad_norm": 0.02776562257630969, "learning_rate": 1.905636093763031e-05, "loss": 0.457, "step": 42 }, { "epoch": 0.4673913043478261, "grad_norm": 0.02279898042963453, "learning_rate": 1.8973984286913584e-05, "loss": 0.4228, "step": 43 }, { "epoch": 0.4782608695652174, "grad_norm": 0.022273693107528456, "learning_rate": 1.8888354486549238e-05, "loss": 0.4141, "step": 44 }, { "epoch": 0.4891304347826087, "grad_norm": 0.022833837988057187, "learning_rate": 1.8799502578106533e-05, "loss": 0.4252, "step": 45 }, { "epoch": 0.5, "grad_norm": 0.021546758234096523, "learning_rate": 1.8707460771197773e-05, "loss": 0.4417, "step": 46 }, { "epoch": 0.5108695652173914, "grad_norm": 0.019884209926995258, "learning_rate": 1.861226243180201e-05, "loss": 0.4249, "step": 47 }, { "epoch": 0.5217391304347826, "grad_norm": 0.02067326340118169, "learning_rate": 1.8513942070169572e-05, "loss": 0.4498, "step": 48 }, { "epoch": 0.532608695652174, "grad_norm": 0.018558394593515687, "learning_rate": 1.8412535328311813e-05, "loss": 0.4188, "step": 49 }, { "epoch": 0.5434782608695652, "grad_norm": 0.019420981988919603, "learning_rate": 1.8308078967080547e-05, "loss": 0.425, "step": 50 }, { "epoch": 0.5543478260869565, "grad_norm": 0.01678791068237689, "learning_rate": 1.8200610852841913e-05, "loss": 0.3955, "step": 51 }, { "epoch": 0.5652173913043478, "grad_norm": 0.018788790486373518, "learning_rate": 1.8090169943749477e-05, "loss": 0.4273, "step": 52 }, { "epoch": 0.5760869565217391, "grad_norm": 0.01812533816051695, "learning_rate": 1.7976796275621556e-05, "loss": 0.3664, "step": 53 }, { "epoch": 0.5869565217391305, "grad_norm": 0.017190627828475347, "learning_rate": 1.7860530947427878e-05, "loss": 0.4481, "step": 54 }, { "epoch": 0.5978260869565217, "grad_norm": 0.019689749698733695, "learning_rate": 1.7741416106390828e-05, "loss": 0.4529, "step": 55 }, { "epoch": 0.6086956521739131, "grad_norm": 0.017252168109986435, "learning_rate": 1.761949493270671e-05, "loss": 0.4251, "step": 56 }, { "epoch": 0.6195652173913043, "grad_norm": 0.017554978035306083, "learning_rate": 1.7494811623892543e-05, "loss": 0.4348, "step": 57 }, { "epoch": 0.6304347826086957, "grad_norm": 0.01728596934943526, "learning_rate": 1.736741137876405e-05, "loss": 0.4298, "step": 58 }, { "epoch": 0.6413043478260869, "grad_norm": 0.016841718915483313, "learning_rate": 1.72373403810507e-05, "loss": 0.424, "step": 59 }, { "epoch": 0.6521739130434783, "grad_norm": 0.017988846143621267, "learning_rate": 1.710464578265369e-05, "loss": 0.4192, "step": 60 }, { "epoch": 0.6630434782608695, "grad_norm": 0.016118964656136582, "learning_rate": 1.696937568655294e-05, "loss": 0.4388, "step": 61 }, { "epoch": 0.6739130434782609, "grad_norm": 0.016980548999802704, "learning_rate": 1.6831579129369347e-05, "loss": 0.4337, "step": 62 }, { "epoch": 0.6847826086956522, "grad_norm": 0.016896592627422335, "learning_rate": 1.6691306063588583e-05, "loss": 0.4004, "step": 63 }, { "epoch": 0.6956521739130435, "grad_norm": 0.015889660322036148, "learning_rate": 1.6548607339452853e-05, "loss": 0.4393, "step": 64 }, { "epoch": 0.7065217391304348, "grad_norm": 0.016428852385924283, "learning_rate": 1.6403534686527223e-05, "loss": 0.4239, "step": 65 }, { "epoch": 0.717391304347826, "grad_norm": 0.01539489673351542, "learning_rate": 1.6256140694947217e-05, "loss": 0.3979, "step": 66 }, { "epoch": 0.7282608695652174, "grad_norm": 0.017016571801118784, "learning_rate": 1.6106478796354382e-05, "loss": 0.4531, "step": 67 }, { "epoch": 0.7391304347826086, "grad_norm": 0.018614609000821066, "learning_rate": 1.595460324452688e-05, "loss": 0.3969, "step": 68 }, { "epoch": 0.75, "grad_norm": 0.01603990010550668, "learning_rate": 1.5800569095711983e-05, "loss": 0.4444, "step": 69 }, { "epoch": 0.7608695652173914, "grad_norm": 0.016345231925260058, "learning_rate": 1.5644432188667695e-05, "loss": 0.43, "step": 70 }, { "epoch": 0.7717391304347826, "grad_norm": 0.014545507199229903, "learning_rate": 1.5486249124420702e-05, "loss": 0.4395, "step": 71 }, { "epoch": 0.782608695652174, "grad_norm": 0.016159427204217675, "learning_rate": 1.5326077245747998e-05, "loss": 0.4324, "step": 72 }, { "epoch": 0.7934782608695652, "grad_norm": 0.01577781927643107, "learning_rate": 1.5163974616389621e-05, "loss": 0.3783, "step": 73 }, { "epoch": 0.8043478260869565, "grad_norm": 0.020543349930487027, "learning_rate": 1.5000000000000002e-05, "loss": 0.4517, "step": 74 }, { "epoch": 0.8152173913043478, "grad_norm": 0.0162387560400049, "learning_rate": 1.4834212838845639e-05, "loss": 0.4323, "step": 75 }, { "epoch": 0.8260869565217391, "grad_norm": 0.017194073308575693, "learning_rate": 1.4666673232256738e-05, "loss": 0.4531, "step": 76 }, { "epoch": 0.8369565217391305, "grad_norm": 0.014830688698626977, "learning_rate": 1.449744191484066e-05, "loss": 0.4044, "step": 77 }, { "epoch": 0.8478260869565217, "grad_norm": 0.017667656527989815, "learning_rate": 1.4326580234465084e-05, "loss": 0.4084, "step": 78 }, { "epoch": 0.8586956521739131, "grad_norm": 0.01665626106397431, "learning_rate": 1.4154150130018867e-05, "loss": 0.3811, "step": 79 }, { "epoch": 0.8695652173913043, "grad_norm": 0.015487133239067268, "learning_rate": 1.3980214108958626e-05, "loss": 0.3792, "step": 80 }, { "epoch": 0.8804347826086957, "grad_norm": 0.015980794746876442, "learning_rate": 1.380483522464923e-05, "loss": 0.3567, "step": 81 }, { "epoch": 0.8913043478260869, "grad_norm": 0.01742924875832292, "learning_rate": 1.362807705350641e-05, "loss": 0.4108, "step": 82 }, { "epoch": 0.9021739130434783, "grad_norm": 0.01621883533665314, "learning_rate": 1.3450003671949707e-05, "loss": 0.3885, "step": 83 }, { "epoch": 0.9130434782608695, "grad_norm": 0.01816609517986378, "learning_rate": 1.3270679633174219e-05, "loss": 0.4195, "step": 84 }, { "epoch": 0.9239130434782609, "grad_norm": 0.01700022402834444, "learning_rate": 1.3090169943749475e-05, "loss": 0.4008, "step": 85 }, { "epoch": 0.9347826086956522, "grad_norm": 0.015423948003415178, "learning_rate": 1.2908540040053992e-05, "loss": 0.4284, "step": 86 }, { "epoch": 0.9456521739130435, "grad_norm": 0.016648244343970338, "learning_rate": 1.2725855764553981e-05, "loss": 0.4353, "step": 87 }, { "epoch": 0.9565217391304348, "grad_norm": 0.018695896408094728, "learning_rate": 1.2542183341934873e-05, "loss": 0.4454, "step": 88 }, { "epoch": 0.967391304347826, "grad_norm": 0.014985563910038587, "learning_rate": 1.2357589355094275e-05, "loss": 0.4254, "step": 89 }, { "epoch": 0.9782608695652174, "grad_norm": 0.0158615730686512, "learning_rate": 1.217214072100508e-05, "loss": 0.4141, "step": 90 }, { "epoch": 0.9891304347826086, "grad_norm": 0.01615877678103288, "learning_rate": 1.1985904666457455e-05, "loss": 0.3743, "step": 91 }, { "epoch": 1.0, "grad_norm": 0.017892852375543514, "learning_rate": 1.179894870368854e-05, "loss": 0.4298, "step": 92 }, { "epoch": 1.0, "eval_loss": 0.48603561520576477, "eval_runtime": 5.8299, "eval_samples_per_second": 0.172, "eval_steps_per_second": 0.172, "step": 92 }, { "epoch": 1.0108695652173914, "grad_norm": 0.017298462574072342, "learning_rate": 1.1611340605908643e-05, "loss": 0.4148, "step": 93 }, { "epoch": 1.0217391304347827, "grad_norm": 0.017838476342072244, "learning_rate": 1.1423148382732854e-05, "loss": 0.4034, "step": 94 }, { "epoch": 1.0326086956521738, "grad_norm": 0.01861199740817311, "learning_rate": 1.1234440255526948e-05, "loss": 0.4219, "step": 95 }, { "epoch": 1.0434782608695652, "grad_norm": 0.01671425099245661, "learning_rate": 1.1045284632676535e-05, "loss": 0.4046, "step": 96 }, { "epoch": 1.0543478260869565, "grad_norm": 0.01861001152790246, "learning_rate": 1.08557500847884e-05, "loss": 0.4009, "step": 97 }, { "epoch": 1.065217391304348, "grad_norm": 0.018107901828337766, "learning_rate": 1.066590531983304e-05, "loss": 0.4089, "step": 98 }, { "epoch": 1.0760869565217392, "grad_norm": 0.0174920546188433, "learning_rate": 1.0475819158237426e-05, "loss": 0.3954, "step": 99 }, { "epoch": 1.0869565217391304, "grad_norm": 0.016682152706871897, "learning_rate": 1.0285560507936962e-05, "loss": 0.4289, "step": 100 }, { "epoch": 1.0978260869565217, "grad_norm": 0.018098011701254673, "learning_rate": 1.0095198339395769e-05, "loss": 0.4302, "step": 101 }, { "epoch": 1.108695652173913, "grad_norm": 0.015568335864025949, "learning_rate": 9.904801660604234e-06, "loss": 0.409, "step": 102 }, { "epoch": 1.1195652173913044, "grad_norm": 0.016958799953214254, "learning_rate": 9.71443949206304e-06, "loss": 0.4039, "step": 103 }, { "epoch": 1.1304347826086956, "grad_norm": 0.017521827484537653, "learning_rate": 9.524180841762577e-06, "loss": 0.4142, "step": 104 }, { "epoch": 1.141304347826087, "grad_norm": 0.018438050806712614, "learning_rate": 9.334094680166962e-06, "loss": 0.4085, "step": 105 }, { "epoch": 1.1521739130434783, "grad_norm": 0.01941444882552903, "learning_rate": 9.144249915211605e-06, "loss": 0.4298, "step": 106 }, { "epoch": 1.1630434782608696, "grad_norm": 0.017649511484447825, "learning_rate": 8.954715367323468e-06, "loss": 0.4161, "step": 107 }, { "epoch": 1.1739130434782608, "grad_norm": 0.01677483752186088, "learning_rate": 8.765559744473054e-06, "loss": 0.4042, "step": 108 }, { "epoch": 1.184782608695652, "grad_norm": 0.017213373571849412, "learning_rate": 8.576851617267151e-06, "loss": 0.409, "step": 109 }, { "epoch": 1.1956521739130435, "grad_norm": 0.01804604085921314, "learning_rate": 8.388659394091362e-06, "loss": 0.4195, "step": 110 }, { "epoch": 1.2065217391304348, "grad_norm": 0.019524779611806008, "learning_rate": 8.201051296311462e-06, "loss": 0.4144, "step": 111 }, { "epoch": 1.2173913043478262, "grad_norm": 0.01961583109753067, "learning_rate": 8.014095333542548e-06, "loss": 0.4169, "step": 112 }, { "epoch": 1.2282608695652173, "grad_norm": 0.01828151965518523, "learning_rate": 7.827859278994924e-06, "loss": 0.3989, "step": 113 }, { "epoch": 1.2391304347826086, "grad_norm": 0.01958662355054947, "learning_rate": 7.642410644905726e-06, "loss": 0.4166, "step": 114 }, { "epoch": 1.25, "grad_norm": 0.018324698258002987, "learning_rate": 7.4578166580651335e-06, "loss": 0.4191, "step": 115 }, { "epoch": 1.2608695652173914, "grad_norm": 0.01711860737234338, "learning_rate": 7.274144235446024e-06, "loss": 0.3976, "step": 116 }, { "epoch": 1.2717391304347827, "grad_norm": 0.015977161100683545, "learning_rate": 7.0914599599460095e-06, "loss": 0.4301, "step": 117 }, { "epoch": 1.2826086956521738, "grad_norm": 0.017423189335602548, "learning_rate": 6.909830056250527e-06, "loss": 0.3972, "step": 118 }, { "epoch": 1.2934782608695652, "grad_norm": 0.020183820621289872, "learning_rate": 6.729320366825785e-06, "loss": 0.406, "step": 119 }, { "epoch": 1.3043478260869565, "grad_norm": 0.0188473400999686, "learning_rate": 6.549996328050296e-06, "loss": 0.3886, "step": 120 }, { "epoch": 1.315217391304348, "grad_norm": 0.02064149059917863, "learning_rate": 6.3719229464935915e-06, "loss": 0.456, "step": 121 }, { "epoch": 1.3260869565217392, "grad_norm": 0.02178374309057221, "learning_rate": 6.19516477535077e-06, "loss": 0.4171, "step": 122 }, { "epoch": 1.3369565217391304, "grad_norm": 0.02027707601043217, "learning_rate": 6.019785891041381e-06, "loss": 0.3642, "step": 123 }, { "epoch": 1.3478260869565217, "grad_norm": 0.021269660172408174, "learning_rate": 5.845849869981137e-06, "loss": 0.4017, "step": 124 }, { "epoch": 1.358695652173913, "grad_norm": 0.022411495134838297, "learning_rate": 5.673419765534915e-06, "loss": 0.4071, "step": 125 }, { "epoch": 1.3695652173913042, "grad_norm": 0.01951721173355996, "learning_rate": 5.502558085159344e-06, "loss": 0.3481, "step": 126 }, { "epoch": 1.3804347826086958, "grad_norm": 0.024116070335566953, "learning_rate": 5.333326767743263e-06, "loss": 0.4188, "step": 127 }, { "epoch": 1.391304347826087, "grad_norm": 0.026359215985631845, "learning_rate": 5.165787161154361e-06, "loss": 0.4169, "step": 128 }, { "epoch": 1.4021739130434783, "grad_norm": 0.022217706817923774, "learning_rate": 5.000000000000003e-06, "loss": 0.4236, "step": 129 }, { "epoch": 1.4130434782608696, "grad_norm": 0.021753073317964423, "learning_rate": 4.836025383610382e-06, "loss": 0.432, "step": 130 }, { "epoch": 1.4239130434782608, "grad_norm": 0.022436739303883135, "learning_rate": 4.673922754252001e-06, "loss": 0.4187, "step": 131 }, { "epoch": 1.434782608695652, "grad_norm": 0.021898203612681105, "learning_rate": 4.513750875579303e-06, "loss": 0.4006, "step": 132 }, { "epoch": 1.4456521739130435, "grad_norm": 0.022105558271218153, "learning_rate": 4.355567811332311e-06, "loss": 0.428, "step": 133 }, { "epoch": 1.4565217391304348, "grad_norm": 0.02033202844138333, "learning_rate": 4.19943090428802e-06, "loss": 0.3973, "step": 134 }, { "epoch": 1.4673913043478262, "grad_norm": 0.01963872298504158, "learning_rate": 4.045396755473121e-06, "loss": 0.4008, "step": 135 }, { "epoch": 1.4782608695652173, "grad_norm": 0.020345281605454676, "learning_rate": 3.893521203645618e-06, "loss": 0.4147, "step": 136 }, { "epoch": 1.4891304347826086, "grad_norm": 0.021452868059861192, "learning_rate": 3.743859305052785e-06, "loss": 0.4149, "step": 137 }, { "epoch": 1.5, "grad_norm": 0.020207806262184335, "learning_rate": 3.596465313472778e-06, "loss": 0.3836, "step": 138 }, { "epoch": 1.5108695652173914, "grad_norm": 0.02088539870362614, "learning_rate": 3.4513926605471504e-06, "loss": 0.4138, "step": 139 }, { "epoch": 1.5217391304347827, "grad_norm": 0.019077086049604102, "learning_rate": 3.308693936411421e-06, "loss": 0.3832, "step": 140 }, { "epoch": 1.5326086956521738, "grad_norm": 0.020912600263192083, "learning_rate": 3.1684208706306572e-06, "loss": 0.4136, "step": 141 }, { "epoch": 1.5434782608695652, "grad_norm": 0.02070907606152171, "learning_rate": 3.0306243134470668e-06, "loss": 0.4311, "step": 142 }, { "epoch": 1.5543478260869565, "grad_norm": 0.02054030797884719, "learning_rate": 2.8953542173463133e-06, "loss": 0.3938, "step": 143 }, { "epoch": 1.5652173913043477, "grad_norm": 0.022068383731631152, "learning_rate": 2.7626596189492983e-06, "loss": 0.3869, "step": 144 }, { "epoch": 1.5760869565217392, "grad_norm": 0.022095291706298183, "learning_rate": 2.6325886212359496e-06, "loss": 0.4151, "step": 145 }, { "epoch": 1.5869565217391304, "grad_norm": 0.021111713836599773, "learning_rate": 2.5051883761074613e-06, "loss": 0.4354, "step": 146 }, { "epoch": 1.5978260869565217, "grad_norm": 0.02237517232258718, "learning_rate": 2.380505067293293e-06, "loss": 0.4146, "step": 147 }, { "epoch": 1.608695652173913, "grad_norm": 0.019623457979118124, "learning_rate": 2.2585838936091753e-06, "loss": 0.4119, "step": 148 }, { "epoch": 1.6195652173913042, "grad_norm": 0.021829275784838623, "learning_rate": 2.1394690525721275e-06, "loss": 0.4135, "step": 149 }, { "epoch": 1.6304347826086958, "grad_norm": 0.021180171763894674, "learning_rate": 2.0232037243784475e-06, "loss": 0.423, "step": 150 }, { "epoch": 1.641304347826087, "grad_norm": 0.02703521598184206, "learning_rate": 1.9098300562505266e-06, "loss": 0.4279, "step": 151 }, { "epoch": 1.6521739130434783, "grad_norm": 0.020030953141784397, "learning_rate": 1.7993891471580894e-06, "loss": 0.3998, "step": 152 }, { "epoch": 1.6630434782608696, "grad_norm": 0.02096005243951767, "learning_rate": 1.6919210329194535e-06, "loss": 0.4117, "step": 153 }, { "epoch": 1.6739130434782608, "grad_norm": 0.02114720153165263, "learning_rate": 1.587464671688187e-06, "loss": 0.4267, "step": 154 }, { "epoch": 1.6847826086956523, "grad_norm": 0.025390809807677033, "learning_rate": 1.4860579298304311e-06, "loss": 0.415, "step": 155 }, { "epoch": 1.6956521739130435, "grad_norm": 0.021410687428214302, "learning_rate": 1.3877375681979944e-06, "loss": 0.4043, "step": 156 }, { "epoch": 1.7065217391304348, "grad_norm": 0.023909950655856917, "learning_rate": 1.2925392288022299e-06, "loss": 0.4445, "step": 157 }, { "epoch": 1.7173913043478262, "grad_norm": 0.021428679509317893, "learning_rate": 1.2004974218934695e-06, "loss": 0.4238, "step": 158 }, { "epoch": 1.7282608695652173, "grad_norm": 0.02137946805669467, "learning_rate": 1.1116455134507665e-06, "loss": 0.3674, "step": 159 }, { "epoch": 1.7391304347826086, "grad_norm": 0.022243431881806403, "learning_rate": 1.0260157130864178e-06, "loss": 0.426, "step": 160 }, { "epoch": 1.75, "grad_norm": 0.024681458739084422, "learning_rate": 9.436390623696911e-07, "loss": 0.4313, "step": 161 }, { "epoch": 1.7608695652173914, "grad_norm": 0.020315997518542663, "learning_rate": 8.645454235739903e-07, "loss": 0.3921, "step": 162 }, { "epoch": 1.7717391304347827, "grad_norm": 0.02332809992280282, "learning_rate": 7.887634688515e-07, "loss": 0.4273, "step": 163 }, { "epoch": 1.7826086956521738, "grad_norm": 0.020023620557189287, "learning_rate": 7.163206698392744e-07, "loss": 0.3738, "step": 164 }, { "epoch": 1.7934782608695652, "grad_norm": 0.022892114041331832, "learning_rate": 6.472432877005341e-07, "loss": 0.404, "step": 165 }, { "epoch": 1.8043478260869565, "grad_norm": 0.021905535119289433, "learning_rate": 5.815563636047539e-07, "loss": 0.4049, "step": 166 }, { "epoch": 1.8152173913043477, "grad_norm": 0.02074267534357053, "learning_rate": 5.192837096500058e-07, "loss": 0.4166, "step": 167 }, { "epoch": 1.8260869565217392, "grad_norm": 0.01843268176253824, "learning_rate": 4.6044790023087373e-07, "loss": 0.357, "step": 168 }, { "epoch": 1.8369565217391304, "grad_norm": 0.02079361540838812, "learning_rate": 4.0507026385502747e-07, "loss": 0.4344, "step": 169 }, { "epoch": 1.8478260869565217, "grad_norm": 0.021623517427470682, "learning_rate": 3.531708754114438e-07, "loss": 0.3878, "step": 170 }, { "epoch": 1.858695652173913, "grad_norm": 0.02038214952768136, "learning_rate": 3.0476854889308737e-07, "loss": 0.3856, "step": 171 }, { "epoch": 1.8695652173913042, "grad_norm": 0.023803627119720294, "learning_rate": 2.5988083057666534e-07, "loss": 0.416, "step": 172 }, { "epoch": 1.8804347826086958, "grad_norm": 0.02284511042725229, "learning_rate": 2.1852399266194312e-07, "loss": 0.4268, "step": 173 }, { "epoch": 1.891304347826087, "grad_norm": 0.022105485087235137, "learning_rate": 1.8071302737293294e-07, "loss": 0.3996, "step": 174 }, { "epoch": 1.9021739130434783, "grad_norm": 0.02196082349655336, "learning_rate": 1.464616415230702e-07, "loss": 0.3977, "step": 175 }, { "epoch": 1.9130434782608696, "grad_norm": 0.02582111983328924, "learning_rate": 1.1578225154637579e-07, "loss": 0.4167, "step": 176 }, { "epoch": 1.9239130434782608, "grad_norm": 0.020467066579464847, "learning_rate": 8.868597899638897e-08, "loss": 0.4022, "step": 177 }, { "epoch": 1.9347826086956523, "grad_norm": 0.018578643308574946, "learning_rate": 6.51826465144978e-08, "loss": 0.4305, "step": 178 }, { "epoch": 1.9456521739130435, "grad_norm": 0.020351166995274374, "learning_rate": 4.528077426915412e-08, "loss": 0.4323, "step": 179 }, { "epoch": 1.9565217391304348, "grad_norm": 0.01983648675971413, "learning_rate": 2.898757686722542e-08, "loss": 0.3953, "step": 180 }, { "epoch": 1.9673913043478262, "grad_norm": 0.01924389485648581, "learning_rate": 1.630896073864352e-08, "loss": 0.4024, "step": 181 }, { "epoch": 1.9782608695652173, "grad_norm": 0.021496764470333395, "learning_rate": 7.2495219952639636e-09, "loss": 0.4284, "step": 182 }, { "epoch": 1.9891304347826086, "grad_norm": 0.02172124469681476, "learning_rate": 1.8125447647421302e-09, "loss": 0.3954, "step": 183 }, { "epoch": 2.0, "grad_norm": 0.021341261717375007, "learning_rate": 0.0, "loss": 0.3989, "step": 184 }, { "epoch": 2.0, "eval_loss": 0.481020450592041, "eval_runtime": 2.8131, "eval_samples_per_second": 0.355, "eval_steps_per_second": 0.355, "step": 184 }, { "epoch": 2.0, "step": 184, "total_flos": 2780892130967552.0, "train_loss": 0.43346462998053303, "train_runtime": 8632.6044, "train_samples_per_second": 0.675, "train_steps_per_second": 0.021 } ], "logging_steps": 1, "max_steps": 184, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2780892130967552.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }