{ "best_metric": null, "best_model_checkpoint": null, "epoch": 6.0, "eval_steps": 500, "global_step": 1680, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03571428571428571, "grad_norm": 0.2544403374195099, "learning_rate": 9.970149253731344e-05, "loss": 1.4916, "step": 10 }, { "epoch": 0.07142857142857142, "grad_norm": 0.17301955819129944, "learning_rate": 9.91044776119403e-05, "loss": 0.908, "step": 20 }, { "epoch": 0.10714285714285714, "grad_norm": 0.11398938298225403, "learning_rate": 9.850746268656717e-05, "loss": 0.4285, "step": 30 }, { "epoch": 0.14285714285714285, "grad_norm": 0.08889368921518326, "learning_rate": 9.791044776119404e-05, "loss": 0.3305, "step": 40 }, { "epoch": 0.17857142857142858, "grad_norm": 0.07145170122385025, "learning_rate": 9.731343283582089e-05, "loss": 0.3709, "step": 50 }, { "epoch": 0.21428571428571427, "grad_norm": 0.08652878552675247, "learning_rate": 9.671641791044777e-05, "loss": 0.3822, "step": 60 }, { "epoch": 0.25, "grad_norm": 0.0885363519191742, "learning_rate": 9.611940298507464e-05, "loss": 0.3799, "step": 70 }, { "epoch": 0.2857142857142857, "grad_norm": 0.07026822865009308, "learning_rate": 9.552238805970149e-05, "loss": 0.4085, "step": 80 }, { "epoch": 0.32142857142857145, "grad_norm": 0.07123386859893799, "learning_rate": 9.492537313432837e-05, "loss": 0.2925, "step": 90 }, { "epoch": 0.35714285714285715, "grad_norm": 0.07839447259902954, "learning_rate": 9.432835820895522e-05, "loss": 0.3455, "step": 100 }, { "epoch": 0.39285714285714285, "grad_norm": 0.0755082443356514, "learning_rate": 9.373134328358209e-05, "loss": 0.3406, "step": 110 }, { "epoch": 0.42857142857142855, "grad_norm": 0.0823201835155487, "learning_rate": 9.313432835820896e-05, "loss": 0.3259, "step": 120 }, { "epoch": 0.4642857142857143, "grad_norm": 0.08403510600328445, "learning_rate": 9.253731343283582e-05, "loss": 0.3017, "step": 130 }, { "epoch": 0.5, "grad_norm": 0.10145727545022964, "learning_rate": 9.194029850746269e-05, "loss": 0.3473, "step": 140 }, { "epoch": 0.5357142857142857, "grad_norm": 0.07516448199748993, "learning_rate": 9.134328358208956e-05, "loss": 0.2865, "step": 150 }, { "epoch": 0.5714285714285714, "grad_norm": 0.07223515212535858, "learning_rate": 9.074626865671642e-05, "loss": 0.3045, "step": 160 }, { "epoch": 0.6071428571428571, "grad_norm": 0.07799769937992096, "learning_rate": 9.014925373134329e-05, "loss": 0.3306, "step": 170 }, { "epoch": 0.6428571428571429, "grad_norm": 0.08388201892375946, "learning_rate": 8.955223880597016e-05, "loss": 0.309, "step": 180 }, { "epoch": 0.6785714285714286, "grad_norm": 0.0838930755853653, "learning_rate": 8.895522388059702e-05, "loss": 0.3356, "step": 190 }, { "epoch": 0.7142857142857143, "grad_norm": 0.07891852408647537, "learning_rate": 8.835820895522389e-05, "loss": 0.3048, "step": 200 }, { "epoch": 0.75, "grad_norm": 0.07239601761102676, "learning_rate": 8.776119402985074e-05, "loss": 0.3601, "step": 210 }, { "epoch": 0.7857142857142857, "grad_norm": 0.07549150288105011, "learning_rate": 8.716417910447762e-05, "loss": 0.3415, "step": 220 }, { "epoch": 0.8214285714285714, "grad_norm": 0.08595260232686996, "learning_rate": 8.656716417910447e-05, "loss": 0.3521, "step": 230 }, { "epoch": 0.8571428571428571, "grad_norm": 0.08356776833534241, "learning_rate": 8.597014925373134e-05, "loss": 0.2898, "step": 240 }, { "epoch": 0.8928571428571429, "grad_norm": 0.07699938863515854, "learning_rate": 8.537313432835822e-05, "loss": 0.3038, "step": 250 }, { "epoch": 0.9285714285714286, "grad_norm": 0.07691926509141922, "learning_rate": 8.477611940298507e-05, "loss": 0.3403, "step": 260 }, { "epoch": 0.9642857142857143, "grad_norm": 0.07808340340852737, "learning_rate": 8.417910447761194e-05, "loss": 0.3164, "step": 270 }, { "epoch": 1.0, "grad_norm": 0.07639653235673904, "learning_rate": 8.358208955223881e-05, "loss": 0.2872, "step": 280 }, { "epoch": 1.0357142857142858, "grad_norm": 0.07973363250494003, "learning_rate": 8.298507462686567e-05, "loss": 0.3111, "step": 290 }, { "epoch": 1.0714285714285714, "grad_norm": 0.08878948539495468, "learning_rate": 8.238805970149254e-05, "loss": 0.317, "step": 300 }, { "epoch": 1.1071428571428572, "grad_norm": 0.07322458922863007, "learning_rate": 8.179104477611941e-05, "loss": 0.2999, "step": 310 }, { "epoch": 1.1428571428571428, "grad_norm": 0.08571141958236694, "learning_rate": 8.119402985074627e-05, "loss": 0.2642, "step": 320 }, { "epoch": 1.1785714285714286, "grad_norm": 0.10807636380195618, "learning_rate": 8.059701492537314e-05, "loss": 0.3024, "step": 330 }, { "epoch": 1.2142857142857142, "grad_norm": 0.1146746426820755, "learning_rate": 8e-05, "loss": 0.2788, "step": 340 }, { "epoch": 1.25, "grad_norm": 0.10096397995948792, "learning_rate": 7.940298507462687e-05, "loss": 0.278, "step": 350 }, { "epoch": 1.2857142857142856, "grad_norm": 0.08820641040802002, "learning_rate": 7.880597014925374e-05, "loss": 0.3123, "step": 360 }, { "epoch": 1.3214285714285714, "grad_norm": 0.09731289744377136, "learning_rate": 7.820895522388059e-05, "loss": 0.2962, "step": 370 }, { "epoch": 1.3571428571428572, "grad_norm": 0.09726134687662125, "learning_rate": 7.761194029850747e-05, "loss": 0.2723, "step": 380 }, { "epoch": 1.3928571428571428, "grad_norm": 0.1366291046142578, "learning_rate": 7.701492537313433e-05, "loss": 0.2549, "step": 390 }, { "epoch": 1.4285714285714286, "grad_norm": 0.09392824023962021, "learning_rate": 7.641791044776119e-05, "loss": 0.2785, "step": 400 }, { "epoch": 1.4642857142857144, "grad_norm": 0.10537943243980408, "learning_rate": 7.582089552238806e-05, "loss": 0.2692, "step": 410 }, { "epoch": 1.5, "grad_norm": 0.11430277675390244, "learning_rate": 7.522388059701493e-05, "loss": 0.2787, "step": 420 }, { "epoch": 1.5357142857142856, "grad_norm": 0.1093386635184288, "learning_rate": 7.46268656716418e-05, "loss": 0.2953, "step": 430 }, { "epoch": 1.5714285714285714, "grad_norm": 0.10960441827774048, "learning_rate": 7.402985074626866e-05, "loss": 0.2906, "step": 440 }, { "epoch": 1.6071428571428572, "grad_norm": 0.11768423765897751, "learning_rate": 7.343283582089552e-05, "loss": 0.3433, "step": 450 }, { "epoch": 1.6428571428571428, "grad_norm": 0.10420206189155579, "learning_rate": 7.283582089552239e-05, "loss": 0.2769, "step": 460 }, { "epoch": 1.6785714285714286, "grad_norm": 0.090525321662426, "learning_rate": 7.223880597014926e-05, "loss": 0.2859, "step": 470 }, { "epoch": 1.7142857142857144, "grad_norm": 0.11874176561832428, "learning_rate": 7.164179104477612e-05, "loss": 0.3243, "step": 480 }, { "epoch": 1.75, "grad_norm": 0.08634401112794876, "learning_rate": 7.104477611940299e-05, "loss": 0.2398, "step": 490 }, { "epoch": 1.7857142857142856, "grad_norm": 0.10205753147602081, "learning_rate": 7.044776119402984e-05, "loss": 0.3725, "step": 500 }, { "epoch": 1.8214285714285714, "grad_norm": 0.09119073301553726, "learning_rate": 6.985074626865672e-05, "loss": 0.2374, "step": 510 }, { "epoch": 1.8571428571428572, "grad_norm": 0.11212314665317535, "learning_rate": 6.925373134328359e-05, "loss": 0.2958, "step": 520 }, { "epoch": 1.8928571428571428, "grad_norm": 0.11491134017705917, "learning_rate": 6.865671641791044e-05, "loss": 0.291, "step": 530 }, { "epoch": 1.9285714285714286, "grad_norm": 0.11586255580186844, "learning_rate": 6.805970149253732e-05, "loss": 0.3222, "step": 540 }, { "epoch": 1.9642857142857144, "grad_norm": 0.10049410909414291, "learning_rate": 6.746268656716418e-05, "loss": 0.2914, "step": 550 }, { "epoch": 2.0, "grad_norm": 0.08521942794322968, "learning_rate": 6.686567164179106e-05, "loss": 0.2999, "step": 560 }, { "epoch": 2.0357142857142856, "grad_norm": 0.11586008220911026, "learning_rate": 6.626865671641791e-05, "loss": 0.2531, "step": 570 }, { "epoch": 2.0714285714285716, "grad_norm": 0.1241249069571495, "learning_rate": 6.567164179104478e-05, "loss": 0.2422, "step": 580 }, { "epoch": 2.107142857142857, "grad_norm": 0.15100309252738953, "learning_rate": 6.507462686567164e-05, "loss": 0.2177, "step": 590 }, { "epoch": 2.142857142857143, "grad_norm": 0.1219455674290657, "learning_rate": 6.447761194029851e-05, "loss": 0.2349, "step": 600 }, { "epoch": 2.1785714285714284, "grad_norm": 0.1460845172405243, "learning_rate": 6.388059701492538e-05, "loss": 0.2407, "step": 610 }, { "epoch": 2.2142857142857144, "grad_norm": 0.1512862741947174, "learning_rate": 6.328358208955224e-05, "loss": 0.284, "step": 620 }, { "epoch": 2.25, "grad_norm": 0.14087745547294617, "learning_rate": 6.268656716417911e-05, "loss": 0.2561, "step": 630 }, { "epoch": 2.2857142857142856, "grad_norm": 0.11492959409952164, "learning_rate": 6.208955223880598e-05, "loss": 0.2708, "step": 640 }, { "epoch": 2.3214285714285716, "grad_norm": 0.15090124309062958, "learning_rate": 6.149253731343284e-05, "loss": 0.2092, "step": 650 }, { "epoch": 2.357142857142857, "grad_norm": 0.1459421068429947, "learning_rate": 6.08955223880597e-05, "loss": 0.2442, "step": 660 }, { "epoch": 2.392857142857143, "grad_norm": 0.1471620351076126, "learning_rate": 6.029850746268657e-05, "loss": 0.267, "step": 670 }, { "epoch": 2.4285714285714284, "grad_norm": 0.1476091742515564, "learning_rate": 5.970149253731343e-05, "loss": 0.2867, "step": 680 }, { "epoch": 2.4642857142857144, "grad_norm": 0.15003158152103424, "learning_rate": 5.91044776119403e-05, "loss": 0.2711, "step": 690 }, { "epoch": 2.5, "grad_norm": 0.16726762056350708, "learning_rate": 5.8507462686567175e-05, "loss": 0.2398, "step": 700 }, { "epoch": 2.5357142857142856, "grad_norm": 0.1285712569952011, "learning_rate": 5.7910447761194034e-05, "loss": 0.2768, "step": 710 }, { "epoch": 2.571428571428571, "grad_norm": 0.13130241632461548, "learning_rate": 5.73134328358209e-05, "loss": 0.2486, "step": 720 }, { "epoch": 2.607142857142857, "grad_norm": 0.15794238448143005, "learning_rate": 5.671641791044776e-05, "loss": 0.2367, "step": 730 }, { "epoch": 2.642857142857143, "grad_norm": 0.16475516557693481, "learning_rate": 5.6119402985074634e-05, "loss": 0.2593, "step": 740 }, { "epoch": 2.678571428571429, "grad_norm": 0.14723050594329834, "learning_rate": 5.5522388059701494e-05, "loss": 0.2398, "step": 750 }, { "epoch": 2.7142857142857144, "grad_norm": 0.14579030871391296, "learning_rate": 5.492537313432836e-05, "loss": 0.2603, "step": 760 }, { "epoch": 2.75, "grad_norm": 0.14156433939933777, "learning_rate": 5.432835820895522e-05, "loss": 0.2241, "step": 770 }, { "epoch": 2.7857142857142856, "grad_norm": 0.1483439803123474, "learning_rate": 5.373134328358209e-05, "loss": 0.2514, "step": 780 }, { "epoch": 2.821428571428571, "grad_norm": 0.1641162484884262, "learning_rate": 5.313432835820896e-05, "loss": 0.2479, "step": 790 }, { "epoch": 2.857142857142857, "grad_norm": 0.1514202207326889, "learning_rate": 5.253731343283582e-05, "loss": 0.2317, "step": 800 }, { "epoch": 2.892857142857143, "grad_norm": 0.13422024250030518, "learning_rate": 5.194029850746269e-05, "loss": 0.2639, "step": 810 }, { "epoch": 2.928571428571429, "grad_norm": 0.12607942521572113, "learning_rate": 5.134328358208955e-05, "loss": 0.2111, "step": 820 }, { "epoch": 2.9642857142857144, "grad_norm": 0.13523127138614655, "learning_rate": 5.074626865671642e-05, "loss": 0.2522, "step": 830 }, { "epoch": 3.0, "grad_norm": 0.17928624153137207, "learning_rate": 5.014925373134328e-05, "loss": 0.2688, "step": 840 }, { "epoch": 3.0357142857142856, "grad_norm": 0.15228058397769928, "learning_rate": 4.955223880597015e-05, "loss": 0.1855, "step": 850 }, { "epoch": 3.0714285714285716, "grad_norm": 0.15598702430725098, "learning_rate": 4.895522388059702e-05, "loss": 0.2103, "step": 860 }, { "epoch": 3.107142857142857, "grad_norm": 0.16875101625919342, "learning_rate": 4.8358208955223885e-05, "loss": 0.2006, "step": 870 }, { "epoch": 3.142857142857143, "grad_norm": 0.16867318749427795, "learning_rate": 4.7761194029850745e-05, "loss": 0.1751, "step": 880 }, { "epoch": 3.1785714285714284, "grad_norm": 0.17668938636779785, "learning_rate": 4.716417910447761e-05, "loss": 0.1818, "step": 890 }, { "epoch": 3.2142857142857144, "grad_norm": 0.19076865911483765, "learning_rate": 4.656716417910448e-05, "loss": 0.2115, "step": 900 }, { "epoch": 3.25, "grad_norm": 0.21991585195064545, "learning_rate": 4.5970149253731345e-05, "loss": 0.2076, "step": 910 }, { "epoch": 3.2857142857142856, "grad_norm": 0.1979696899652481, "learning_rate": 4.537313432835821e-05, "loss": 0.2359, "step": 920 }, { "epoch": 3.3214285714285716, "grad_norm": 0.20029081404209137, "learning_rate": 4.477611940298508e-05, "loss": 0.1868, "step": 930 }, { "epoch": 3.357142857142857, "grad_norm": 0.20280781388282776, "learning_rate": 4.4179104477611944e-05, "loss": 0.2117, "step": 940 }, { "epoch": 3.392857142857143, "grad_norm": 0.22452795505523682, "learning_rate": 4.358208955223881e-05, "loss": 0.2021, "step": 950 }, { "epoch": 3.4285714285714284, "grad_norm": 0.1844116449356079, "learning_rate": 4.298507462686567e-05, "loss": 0.2223, "step": 960 }, { "epoch": 3.4642857142857144, "grad_norm": 0.19978521764278412, "learning_rate": 4.238805970149254e-05, "loss": 0.1995, "step": 970 }, { "epoch": 3.5, "grad_norm": 0.19762808084487915, "learning_rate": 4.1791044776119404e-05, "loss": 0.1821, "step": 980 }, { "epoch": 3.5357142857142856, "grad_norm": 0.19516855478286743, "learning_rate": 4.119402985074627e-05, "loss": 0.1961, "step": 990 }, { "epoch": 3.571428571428571, "grad_norm": 0.1498711109161377, "learning_rate": 4.059701492537314e-05, "loss": 0.1688, "step": 1000 }, { "epoch": 3.607142857142857, "grad_norm": 0.2100873589515686, "learning_rate": 4e-05, "loss": 0.2069, "step": 1010 }, { "epoch": 3.642857142857143, "grad_norm": 0.19105233252048492, "learning_rate": 3.940298507462687e-05, "loss": 0.1784, "step": 1020 }, { "epoch": 3.678571428571429, "grad_norm": 0.20961785316467285, "learning_rate": 3.8805970149253736e-05, "loss": 0.2109, "step": 1030 }, { "epoch": 3.7142857142857144, "grad_norm": 0.2236211895942688, "learning_rate": 3.8208955223880596e-05, "loss": 0.1971, "step": 1040 }, { "epoch": 3.75, "grad_norm": 0.2324950248003006, "learning_rate": 3.761194029850746e-05, "loss": 0.2296, "step": 1050 }, { "epoch": 3.7857142857142856, "grad_norm": 0.1860949546098709, "learning_rate": 3.701492537313433e-05, "loss": 0.1956, "step": 1060 }, { "epoch": 3.821428571428571, "grad_norm": 0.18874789774417877, "learning_rate": 3.6417910447761196e-05, "loss": 0.1876, "step": 1070 }, { "epoch": 3.857142857142857, "grad_norm": 0.20682772994041443, "learning_rate": 3.582089552238806e-05, "loss": 0.1775, "step": 1080 }, { "epoch": 3.892857142857143, "grad_norm": 0.1853661686182022, "learning_rate": 3.522388059701492e-05, "loss": 0.2158, "step": 1090 }, { "epoch": 3.928571428571429, "grad_norm": 0.2258283495903015, "learning_rate": 3.4626865671641795e-05, "loss": 0.2124, "step": 1100 }, { "epoch": 3.9642857142857144, "grad_norm": 0.22331808507442474, "learning_rate": 3.402985074626866e-05, "loss": 0.197, "step": 1110 }, { "epoch": 4.0, "grad_norm": 0.19635970890522003, "learning_rate": 3.343283582089553e-05, "loss": 0.2208, "step": 1120 }, { "epoch": 4.035714285714286, "grad_norm": 0.23908133804798126, "learning_rate": 3.283582089552239e-05, "loss": 0.1415, "step": 1130 }, { "epoch": 4.071428571428571, "grad_norm": 0.18970990180969238, "learning_rate": 3.2238805970149255e-05, "loss": 0.1568, "step": 1140 }, { "epoch": 4.107142857142857, "grad_norm": 0.19420242309570312, "learning_rate": 3.164179104477612e-05, "loss": 0.1341, "step": 1150 }, { "epoch": 4.142857142857143, "grad_norm": 0.23171567916870117, "learning_rate": 3.104477611940299e-05, "loss": 0.1501, "step": 1160 }, { "epoch": 4.178571428571429, "grad_norm": 0.2723298668861389, "learning_rate": 3.044776119402985e-05, "loss": 0.1318, "step": 1170 }, { "epoch": 4.214285714285714, "grad_norm": 0.23267976939678192, "learning_rate": 2.9850746268656714e-05, "loss": 0.1461, "step": 1180 }, { "epoch": 4.25, "grad_norm": 0.1981896311044693, "learning_rate": 2.9253731343283587e-05, "loss": 0.1456, "step": 1190 }, { "epoch": 4.285714285714286, "grad_norm": 0.23760183155536652, "learning_rate": 2.865671641791045e-05, "loss": 0.1363, "step": 1200 }, { "epoch": 4.321428571428571, "grad_norm": 0.2675766348838806, "learning_rate": 2.8059701492537317e-05, "loss": 0.1682, "step": 1210 }, { "epoch": 4.357142857142857, "grad_norm": 0.28035709261894226, "learning_rate": 2.746268656716418e-05, "loss": 0.1655, "step": 1220 }, { "epoch": 4.392857142857143, "grad_norm": 0.25161004066467285, "learning_rate": 2.6865671641791047e-05, "loss": 0.1716, "step": 1230 }, { "epoch": 4.428571428571429, "grad_norm": 0.22732681035995483, "learning_rate": 2.626865671641791e-05, "loss": 0.1834, "step": 1240 }, { "epoch": 4.464285714285714, "grad_norm": 0.27060192823410034, "learning_rate": 2.5671641791044776e-05, "loss": 0.1746, "step": 1250 }, { "epoch": 4.5, "grad_norm": 0.2511671185493469, "learning_rate": 2.507462686567164e-05, "loss": 0.138, "step": 1260 }, { "epoch": 4.535714285714286, "grad_norm": 0.2796226441860199, "learning_rate": 2.447761194029851e-05, "loss": 0.1491, "step": 1270 }, { "epoch": 4.571428571428571, "grad_norm": 0.2223314791917801, "learning_rate": 2.3880597014925373e-05, "loss": 0.1608, "step": 1280 }, { "epoch": 4.607142857142857, "grad_norm": 0.36256590485572815, "learning_rate": 2.328358208955224e-05, "loss": 0.1617, "step": 1290 }, { "epoch": 4.642857142857143, "grad_norm": 0.2692199647426605, "learning_rate": 2.2686567164179106e-05, "loss": 0.1569, "step": 1300 }, { "epoch": 4.678571428571429, "grad_norm": 0.24237553775310516, "learning_rate": 2.2089552238805972e-05, "loss": 0.1464, "step": 1310 }, { "epoch": 4.714285714285714, "grad_norm": 0.3302353024482727, "learning_rate": 2.1492537313432835e-05, "loss": 0.152, "step": 1320 }, { "epoch": 4.75, "grad_norm": 0.30900838971138, "learning_rate": 2.0895522388059702e-05, "loss": 0.1583, "step": 1330 }, { "epoch": 4.785714285714286, "grad_norm": 0.24671396613121033, "learning_rate": 2.029850746268657e-05, "loss": 0.173, "step": 1340 }, { "epoch": 4.821428571428571, "grad_norm": 0.24723851680755615, "learning_rate": 1.9701492537313435e-05, "loss": 0.1596, "step": 1350 }, { "epoch": 4.857142857142857, "grad_norm": 0.25336161255836487, "learning_rate": 1.9104477611940298e-05, "loss": 0.1502, "step": 1360 }, { "epoch": 4.892857142857143, "grad_norm": 0.31974342465400696, "learning_rate": 1.8507462686567165e-05, "loss": 0.168, "step": 1370 }, { "epoch": 4.928571428571429, "grad_norm": 0.26496830582618713, "learning_rate": 1.791044776119403e-05, "loss": 0.1679, "step": 1380 }, { "epoch": 4.964285714285714, "grad_norm": 0.258620947599411, "learning_rate": 1.7313432835820898e-05, "loss": 0.147, "step": 1390 }, { "epoch": 5.0, "grad_norm": 0.23998712003231049, "learning_rate": 1.6716417910447764e-05, "loss": 0.1307, "step": 1400 }, { "epoch": 5.035714285714286, "grad_norm": 0.23837468028068542, "learning_rate": 1.6119402985074627e-05, "loss": 0.1111, "step": 1410 }, { "epoch": 5.071428571428571, "grad_norm": 0.25923416018486023, "learning_rate": 1.5522388059701494e-05, "loss": 0.1247, "step": 1420 }, { "epoch": 5.107142857142857, "grad_norm": 0.28286945819854736, "learning_rate": 1.4925373134328357e-05, "loss": 0.1243, "step": 1430 }, { "epoch": 5.142857142857143, "grad_norm": 0.2576877772808075, "learning_rate": 1.4328358208955225e-05, "loss": 0.1196, "step": 1440 }, { "epoch": 5.178571428571429, "grad_norm": 0.25924208760261536, "learning_rate": 1.373134328358209e-05, "loss": 0.1039, "step": 1450 }, { "epoch": 5.214285714285714, "grad_norm": 0.24316276609897614, "learning_rate": 1.3134328358208955e-05, "loss": 0.1308, "step": 1460 }, { "epoch": 5.25, "grad_norm": 0.43592455983161926, "learning_rate": 1.253731343283582e-05, "loss": 0.1185, "step": 1470 }, { "epoch": 5.285714285714286, "grad_norm": 0.23961405456066132, "learning_rate": 1.1940298507462686e-05, "loss": 0.1278, "step": 1480 }, { "epoch": 5.321428571428571, "grad_norm": 0.3024062514305115, "learning_rate": 1.1343283582089553e-05, "loss": 0.1082, "step": 1490 }, { "epoch": 5.357142857142857, "grad_norm": 0.22534772753715515, "learning_rate": 1.0746268656716418e-05, "loss": 0.1055, "step": 1500 }, { "epoch": 5.392857142857143, "grad_norm": 0.27198487520217896, "learning_rate": 1.0149253731343284e-05, "loss": 0.1244, "step": 1510 }, { "epoch": 5.428571428571429, "grad_norm": 0.3203181326389313, "learning_rate": 9.552238805970149e-06, "loss": 0.152, "step": 1520 }, { "epoch": 5.464285714285714, "grad_norm": 0.26531946659088135, "learning_rate": 8.955223880597016e-06, "loss": 0.1159, "step": 1530 }, { "epoch": 5.5, "grad_norm": 0.3129872977733612, "learning_rate": 8.358208955223882e-06, "loss": 0.0963, "step": 1540 }, { "epoch": 5.535714285714286, "grad_norm": 0.25911077857017517, "learning_rate": 7.761194029850747e-06, "loss": 0.1187, "step": 1550 }, { "epoch": 5.571428571428571, "grad_norm": 0.2714295983314514, "learning_rate": 7.164179104477613e-06, "loss": 0.1168, "step": 1560 }, { "epoch": 5.607142857142857, "grad_norm": 0.3102964460849762, "learning_rate": 6.5671641791044775e-06, "loss": 0.1029, "step": 1570 }, { "epoch": 5.642857142857143, "grad_norm": 0.25025418400764465, "learning_rate": 5.970149253731343e-06, "loss": 0.1193, "step": 1580 }, { "epoch": 5.678571428571429, "grad_norm": 0.30030593276023865, "learning_rate": 5.373134328358209e-06, "loss": 0.1278, "step": 1590 }, { "epoch": 5.714285714285714, "grad_norm": 0.2856218218803406, "learning_rate": 4.7761194029850745e-06, "loss": 0.1197, "step": 1600 }, { "epoch": 5.75, "grad_norm": 0.29614830017089844, "learning_rate": 4.179104477611941e-06, "loss": 0.1144, "step": 1610 }, { "epoch": 5.785714285714286, "grad_norm": 0.2907536029815674, "learning_rate": 3.5820895522388063e-06, "loss": 0.1026, "step": 1620 }, { "epoch": 5.821428571428571, "grad_norm": 0.29087764024734497, "learning_rate": 2.9850746268656716e-06, "loss": 0.1133, "step": 1630 }, { "epoch": 5.857142857142857, "grad_norm": 0.29417696595191956, "learning_rate": 2.3880597014925373e-06, "loss": 0.1063, "step": 1640 }, { "epoch": 5.892857142857143, "grad_norm": 0.2516196370124817, "learning_rate": 1.7910447761194032e-06, "loss": 0.1214, "step": 1650 }, { "epoch": 5.928571428571429, "grad_norm": 0.2914963364601135, "learning_rate": 1.1940298507462686e-06, "loss": 0.1352, "step": 1660 }, { "epoch": 5.964285714285714, "grad_norm": 0.28471362590789795, "learning_rate": 5.970149253731343e-07, "loss": 0.1096, "step": 1670 }, { "epoch": 6.0, "grad_norm": 0.26661351323127747, "learning_rate": 0.0, "loss": 0.108, "step": 1680 } ], "logging_steps": 10, "max_steps": 1680, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.58674567892992e+17, "train_batch_size": 5, "trial_name": null, "trial_params": null }