eddysang's picture
Training in progress, step 200, checkpoint
0516ea2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2944559466298597,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0014722797331492984,
"grad_norm": 0.26644960045814514,
"learning_rate": 7.499999999999999e-06,
"loss": 0.296,
"step": 1
},
{
"epoch": 0.0014722797331492984,
"eval_loss": 0.4772031009197235,
"eval_runtime": 847.2862,
"eval_samples_per_second": 2.7,
"eval_steps_per_second": 1.35,
"step": 1
},
{
"epoch": 0.0029445594662985968,
"grad_norm": 0.4351513385772705,
"learning_rate": 1.4999999999999999e-05,
"loss": 0.408,
"step": 2
},
{
"epoch": 0.004416839199447895,
"grad_norm": 0.3042258024215698,
"learning_rate": 2.2499999999999998e-05,
"loss": 0.393,
"step": 3
},
{
"epoch": 0.0058891189325971935,
"grad_norm": 0.3164433538913727,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.409,
"step": 4
},
{
"epoch": 0.007361398665746492,
"grad_norm": 0.37835752964019775,
"learning_rate": 3.75e-05,
"loss": 0.2635,
"step": 5
},
{
"epoch": 0.00883367839889579,
"grad_norm": 0.22859500348567963,
"learning_rate": 4.4999999999999996e-05,
"loss": 0.2615,
"step": 6
},
{
"epoch": 0.010305958132045089,
"grad_norm": 0.37722575664520264,
"learning_rate": 5.2499999999999995e-05,
"loss": 0.296,
"step": 7
},
{
"epoch": 0.011778237865194387,
"grad_norm": 0.8506933450698853,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.2655,
"step": 8
},
{
"epoch": 0.013250517598343685,
"grad_norm": 0.45279720425605774,
"learning_rate": 6.75e-05,
"loss": 0.259,
"step": 9
},
{
"epoch": 0.014722797331492983,
"grad_norm": 0.18526464700698853,
"learning_rate": 7.5e-05,
"loss": 0.2053,
"step": 10
},
{
"epoch": 0.016195077064642283,
"grad_norm": 0.20497167110443115,
"learning_rate": 8.25e-05,
"loss": 0.1554,
"step": 11
},
{
"epoch": 0.01766735679779158,
"grad_norm": 0.14627239108085632,
"learning_rate": 8.999999999999999e-05,
"loss": 0.1306,
"step": 12
},
{
"epoch": 0.01913963653094088,
"grad_norm": 0.240932434797287,
"learning_rate": 9.75e-05,
"loss": 0.1564,
"step": 13
},
{
"epoch": 0.020611916264090178,
"grad_norm": 0.3040555715560913,
"learning_rate": 0.00010499999999999999,
"loss": 0.1677,
"step": 14
},
{
"epoch": 0.022084195997239476,
"grad_norm": 0.4959651231765747,
"learning_rate": 0.0001125,
"loss": 0.3867,
"step": 15
},
{
"epoch": 0.023556475730388774,
"grad_norm": 0.20291835069656372,
"learning_rate": 0.00011999999999999999,
"loss": 0.2207,
"step": 16
},
{
"epoch": 0.025028755463538072,
"grad_norm": 0.16518566012382507,
"learning_rate": 0.00012749999999999998,
"loss": 0.1309,
"step": 17
},
{
"epoch": 0.02650103519668737,
"grad_norm": 0.18677189946174622,
"learning_rate": 0.000135,
"loss": 0.181,
"step": 18
},
{
"epoch": 0.02797331492983667,
"grad_norm": 0.1576640009880066,
"learning_rate": 0.0001425,
"loss": 0.1562,
"step": 19
},
{
"epoch": 0.029445594662985967,
"grad_norm": 0.19666102528572083,
"learning_rate": 0.00015,
"loss": 0.2439,
"step": 20
},
{
"epoch": 0.030917874396135265,
"grad_norm": 0.12512515485286713,
"learning_rate": 0.00014998857713672935,
"loss": 0.144,
"step": 21
},
{
"epoch": 0.03239015412928457,
"grad_norm": 0.1219751164317131,
"learning_rate": 0.00014995431202643217,
"loss": 0.1047,
"step": 22
},
{
"epoch": 0.033862433862433865,
"grad_norm": 0.1390693038702011,
"learning_rate": 0.000149897215106593,
"loss": 0.1022,
"step": 23
},
{
"epoch": 0.03533471359558316,
"grad_norm": 0.19809921085834503,
"learning_rate": 0.0001498173037694868,
"loss": 0.24,
"step": 24
},
{
"epoch": 0.03680699332873246,
"grad_norm": 0.17452572286128998,
"learning_rate": 0.0001497146023568809,
"loss": 0.2497,
"step": 25
},
{
"epoch": 0.03827927306188176,
"grad_norm": 0.10941721498966217,
"learning_rate": 0.00014958914215262048,
"loss": 0.0914,
"step": 26
},
{
"epoch": 0.03975155279503106,
"grad_norm": 0.12793776392936707,
"learning_rate": 0.00014944096137309914,
"loss": 0.114,
"step": 27
},
{
"epoch": 0.041223832528180356,
"grad_norm": 0.14925755560398102,
"learning_rate": 0.00014927010515561776,
"loss": 0.1931,
"step": 28
},
{
"epoch": 0.042696112261329654,
"grad_norm": 0.12749770283699036,
"learning_rate": 0.00014907662554463532,
"loss": 0.1342,
"step": 29
},
{
"epoch": 0.04416839199447895,
"grad_norm": 0.12666098773479462,
"learning_rate": 0.0001488605814759156,
"loss": 0.1404,
"step": 30
},
{
"epoch": 0.04564067172762825,
"grad_norm": 0.12451935559511185,
"learning_rate": 0.00014862203875857477,
"loss": 0.1297,
"step": 31
},
{
"epoch": 0.04711295146077755,
"grad_norm": 0.12354013323783875,
"learning_rate": 0.0001483610700550354,
"loss": 0.0667,
"step": 32
},
{
"epoch": 0.048585231193926846,
"grad_norm": 0.11861127614974976,
"learning_rate": 0.00014807775485889264,
"loss": 0.1057,
"step": 33
},
{
"epoch": 0.050057510927076145,
"grad_norm": 0.11489235609769821,
"learning_rate": 0.0001477721794706997,
"loss": 0.0717,
"step": 34
},
{
"epoch": 0.05152979066022544,
"grad_norm": 0.15129899978637695,
"learning_rate": 0.0001474444369716801,
"loss": 0.1103,
"step": 35
},
{
"epoch": 0.05300207039337474,
"grad_norm": 0.14446967840194702,
"learning_rate": 0.0001470946271953739,
"loss": 0.1674,
"step": 36
},
{
"epoch": 0.05447435012652404,
"grad_norm": 0.10094312578439713,
"learning_rate": 0.00014672285669722765,
"loss": 0.0696,
"step": 37
},
{
"epoch": 0.05594662985967334,
"grad_norm": 0.17120350897312164,
"learning_rate": 0.00014632923872213652,
"loss": 0.2139,
"step": 38
},
{
"epoch": 0.057418909592822635,
"grad_norm": 0.14146435260772705,
"learning_rate": 0.00014591389316994876,
"loss": 0.0925,
"step": 39
},
{
"epoch": 0.058891189325971934,
"grad_norm": 0.14251448214054108,
"learning_rate": 0.0001454769465589431,
"loss": 0.1002,
"step": 40
},
{
"epoch": 0.06036346905912123,
"grad_norm": 0.07004090398550034,
"learning_rate": 0.00014501853198729012,
"loss": 0.0538,
"step": 41
},
{
"epoch": 0.06183574879227053,
"grad_norm": 0.14318852126598358,
"learning_rate": 0.00014453878909250904,
"loss": 0.1316,
"step": 42
},
{
"epoch": 0.06330802852541983,
"grad_norm": 0.10623105615377426,
"learning_rate": 0.00014403786400893302,
"loss": 0.0866,
"step": 43
},
{
"epoch": 0.06478030825856913,
"grad_norm": 0.10893028974533081,
"learning_rate": 0.00014351590932319504,
"loss": 0.0518,
"step": 44
},
{
"epoch": 0.06625258799171843,
"grad_norm": 0.1411529928445816,
"learning_rate": 0.00014297308402774875,
"loss": 0.1357,
"step": 45
},
{
"epoch": 0.06772486772486773,
"grad_norm": 0.10105417668819427,
"learning_rate": 0.0001424095534724375,
"loss": 0.0654,
"step": 46
},
{
"epoch": 0.06919714745801703,
"grad_norm": 0.14420634508132935,
"learning_rate": 0.00014182548931412757,
"loss": 0.0935,
"step": 47
},
{
"epoch": 0.07066942719116633,
"grad_norm": 0.12569449841976166,
"learning_rate": 0.0001412210694644195,
"loss": 0.0848,
"step": 48
},
{
"epoch": 0.07214170692431562,
"grad_norm": 0.09209802001714706,
"learning_rate": 0.00014059647803545467,
"loss": 0.0473,
"step": 49
},
{
"epoch": 0.07361398665746492,
"grad_norm": 0.12560804188251495,
"learning_rate": 0.0001399519052838329,
"loss": 0.0785,
"step": 50
},
{
"epoch": 0.07361398665746492,
"eval_loss": 0.08997488021850586,
"eval_runtime": 784.5444,
"eval_samples_per_second": 2.916,
"eval_steps_per_second": 1.458,
"step": 50
},
{
"epoch": 0.07508626639061422,
"grad_norm": 0.15386323630809784,
"learning_rate": 0.00013928754755265842,
"loss": 0.1427,
"step": 51
},
{
"epoch": 0.07655854612376352,
"grad_norm": 0.12584620714187622,
"learning_rate": 0.00013860360721173193,
"loss": 0.0863,
"step": 52
},
{
"epoch": 0.07803082585691282,
"grad_norm": 0.11394777148962021,
"learning_rate": 0.0001379002925959068,
"loss": 0.0691,
"step": 53
},
{
"epoch": 0.07950310559006211,
"grad_norm": 0.10938312113285065,
"learning_rate": 0.0001371778179416281,
"loss": 0.0826,
"step": 54
},
{
"epoch": 0.08097538532321141,
"grad_norm": 0.12558647990226746,
"learning_rate": 0.00013643640332167438,
"loss": 0.0766,
"step": 55
},
{
"epoch": 0.08244766505636071,
"grad_norm": 0.10897130519151688,
"learning_rate": 0.00013567627457812106,
"loss": 0.062,
"step": 56
},
{
"epoch": 0.08391994478951001,
"grad_norm": 0.09030541032552719,
"learning_rate": 0.00013489766325354695,
"loss": 0.0393,
"step": 57
},
{
"epoch": 0.08539222452265931,
"grad_norm": 0.1143849790096283,
"learning_rate": 0.00013410080652050412,
"loss": 0.1219,
"step": 58
},
{
"epoch": 0.0868645042558086,
"grad_norm": 0.1283547431230545,
"learning_rate": 0.0001332859471092728,
"loss": 0.1195,
"step": 59
},
{
"epoch": 0.0883367839889579,
"grad_norm": 0.11828132718801498,
"learning_rate": 0.00013245333323392333,
"loss": 0.0925,
"step": 60
},
{
"epoch": 0.0898090637221072,
"grad_norm": 0.10878538340330124,
"learning_rate": 0.0001316032185167079,
"loss": 0.0542,
"step": 61
},
{
"epoch": 0.0912813434552565,
"grad_norm": 0.1320784091949463,
"learning_rate": 0.00013073586191080457,
"loss": 0.1005,
"step": 62
},
{
"epoch": 0.0927536231884058,
"grad_norm": 0.10936389863491058,
"learning_rate": 0.00012985152762143778,
"loss": 0.0526,
"step": 63
},
{
"epoch": 0.0942259029215551,
"grad_norm": 0.13538584113121033,
"learning_rate": 0.00012895048502539882,
"loss": 0.0956,
"step": 64
},
{
"epoch": 0.0956981826547044,
"grad_norm": 0.13087764382362366,
"learning_rate": 0.00012803300858899104,
"loss": 0.0795,
"step": 65
},
{
"epoch": 0.09717046238785369,
"grad_norm": 0.1554757058620453,
"learning_rate": 0.0001270993777844248,
"loss": 0.1455,
"step": 66
},
{
"epoch": 0.09864274212100299,
"grad_norm": 0.12131679803133011,
"learning_rate": 0.0001261498770046874,
"loss": 0.0625,
"step": 67
},
{
"epoch": 0.10011502185415229,
"grad_norm": 0.11470583826303482,
"learning_rate": 0.00012518479547691435,
"loss": 0.0903,
"step": 68
},
{
"epoch": 0.10158730158730159,
"grad_norm": 0.1023801937699318,
"learning_rate": 0.00012420442717428804,
"loss": 0.0845,
"step": 69
},
{
"epoch": 0.10305958132045089,
"grad_norm": 0.10249310731887817,
"learning_rate": 0.00012320907072649044,
"loss": 0.0539,
"step": 70
},
{
"epoch": 0.10453186105360018,
"grad_norm": 0.10111914575099945,
"learning_rate": 0.0001221990293287378,
"loss": 0.0424,
"step": 71
},
{
"epoch": 0.10600414078674948,
"grad_norm": 0.16136892139911652,
"learning_rate": 0.00012117461064942435,
"loss": 0.1277,
"step": 72
},
{
"epoch": 0.10747642051989878,
"grad_norm": 0.13022761046886444,
"learning_rate": 0.00012013612673640363,
"loss": 0.118,
"step": 73
},
{
"epoch": 0.10894870025304808,
"grad_norm": 0.10115568339824677,
"learning_rate": 0.00011908389392193547,
"loss": 0.0554,
"step": 74
},
{
"epoch": 0.11042097998619738,
"grad_norm": 0.1352306455373764,
"learning_rate": 0.00011801823272632844,
"loss": 0.0683,
"step": 75
},
{
"epoch": 0.11189325971934667,
"grad_norm": 0.11654029786586761,
"learning_rate": 0.00011693946776030599,
"loss": 0.0656,
"step": 76
},
{
"epoch": 0.11336553945249597,
"grad_norm": 0.1405310332775116,
"learning_rate": 0.00011584792762612703,
"loss": 0.0681,
"step": 77
},
{
"epoch": 0.11483781918564527,
"grad_norm": 0.19620081782341003,
"learning_rate": 0.00011474394481749035,
"loss": 0.1183,
"step": 78
},
{
"epoch": 0.11631009891879457,
"grad_norm": 0.09413562715053558,
"learning_rate": 0.00011362785561825406,
"loss": 0.0377,
"step": 79
},
{
"epoch": 0.11778237865194387,
"grad_norm": 0.10567747801542282,
"learning_rate": 0.0001125,
"loss": 0.0776,
"step": 80
},
{
"epoch": 0.11925465838509317,
"grad_norm": 0.15690375864505768,
"learning_rate": 0.00011136072151847529,
"loss": 0.0366,
"step": 81
},
{
"epoch": 0.12072693811824246,
"grad_norm": 0.09006724506616592,
"learning_rate": 0.00011021036720894179,
"loss": 0.0319,
"step": 82
},
{
"epoch": 0.12219921785139176,
"grad_norm": 0.1135464459657669,
"learning_rate": 0.00010904928748046599,
"loss": 0.0482,
"step": 83
},
{
"epoch": 0.12367149758454106,
"grad_norm": 0.09596288949251175,
"learning_rate": 0.0001078778360091808,
"loss": 0.0454,
"step": 84
},
{
"epoch": 0.12514377731769036,
"grad_norm": 0.17637494206428528,
"learning_rate": 0.00010669636963055245,
"loss": 0.1116,
"step": 85
},
{
"epoch": 0.12661605705083967,
"grad_norm": 0.1744721382856369,
"learning_rate": 0.00010550524823068502,
"loss": 0.0638,
"step": 86
},
{
"epoch": 0.12808833678398895,
"grad_norm": 0.13608935475349426,
"learning_rate": 0.00010430483463669551,
"loss": 0.0788,
"step": 87
},
{
"epoch": 0.12956061651713827,
"grad_norm": 0.12516197562217712,
"learning_rate": 0.0001030954945061934,
"loss": 0.0565,
"step": 88
},
{
"epoch": 0.13103289625028755,
"grad_norm": 0.09498213976621628,
"learning_rate": 0.0001018775962158975,
"loss": 0.0372,
"step": 89
},
{
"epoch": 0.13250517598343686,
"grad_norm": 0.09999972581863403,
"learning_rate": 0.00010065151074942516,
"loss": 0.0393,
"step": 90
},
{
"epoch": 0.13397745571658615,
"grad_norm": 0.09636418521404266,
"learning_rate": 9.941761158428674e-05,
"loss": 0.0314,
"step": 91
},
{
"epoch": 0.13544973544973546,
"grad_norm": 0.0872374102473259,
"learning_rate": 9.817627457812105e-05,
"loss": 0.0322,
"step": 92
},
{
"epoch": 0.13692201518288474,
"grad_norm": 0.11897428333759308,
"learning_rate": 9.692787785420525e-05,
"loss": 0.0726,
"step": 93
},
{
"epoch": 0.13839429491603406,
"grad_norm": 0.1489570438861847,
"learning_rate": 9.567280168627493e-05,
"loss": 0.0753,
"step": 94
},
{
"epoch": 0.13986657464918334,
"grad_norm": 0.1237846091389656,
"learning_rate": 9.441142838268905e-05,
"loss": 0.0304,
"step": 95
},
{
"epoch": 0.14133885438233265,
"grad_norm": 0.11417360603809357,
"learning_rate": 9.314414216997507e-05,
"loss": 0.0466,
"step": 96
},
{
"epoch": 0.14281113411548194,
"grad_norm": 0.13448922336101532,
"learning_rate": 9.187132907578987e-05,
"loss": 0.0676,
"step": 97
},
{
"epoch": 0.14428341384863125,
"grad_norm": 0.12376630306243896,
"learning_rate": 9.059337681133192e-05,
"loss": 0.0653,
"step": 98
},
{
"epoch": 0.14575569358178053,
"grad_norm": 0.13168717920780182,
"learning_rate": 8.931067465324085e-05,
"loss": 0.0414,
"step": 99
},
{
"epoch": 0.14722797331492984,
"grad_norm": 0.12176292389631271,
"learning_rate": 8.802361332501978e-05,
"loss": 0.0545,
"step": 100
},
{
"epoch": 0.14722797331492984,
"eval_loss": 0.05667497217655182,
"eval_runtime": 573.8343,
"eval_samples_per_second": 3.987,
"eval_steps_per_second": 1.994,
"step": 100
},
{
"epoch": 0.14870025304807913,
"grad_norm": 0.11033546179533005,
"learning_rate": 8.673258487801731e-05,
"loss": 0.0368,
"step": 101
},
{
"epoch": 0.15017253278122844,
"grad_norm": 0.14303916692733765,
"learning_rate": 8.54379825720049e-05,
"loss": 0.0527,
"step": 102
},
{
"epoch": 0.15164481251437772,
"grad_norm": 0.10324705392122269,
"learning_rate": 8.414020075538605e-05,
"loss": 0.0217,
"step": 103
},
{
"epoch": 0.15311709224752704,
"grad_norm": 0.11965189129114151,
"learning_rate": 8.2839634745074e-05,
"loss": 0.0518,
"step": 104
},
{
"epoch": 0.15458937198067632,
"grad_norm": 0.14544668793678284,
"learning_rate": 8.153668070607437e-05,
"loss": 0.0936,
"step": 105
},
{
"epoch": 0.15606165171382563,
"grad_norm": 0.11407126486301422,
"learning_rate": 8.023173553080938e-05,
"loss": 0.0279,
"step": 106
},
{
"epoch": 0.15753393144697492,
"grad_norm": 0.11697705090045929,
"learning_rate": 7.89251967182208e-05,
"loss": 0.044,
"step": 107
},
{
"epoch": 0.15900621118012423,
"grad_norm": 0.18119023740291595,
"learning_rate": 7.761746225268758e-05,
"loss": 0.1192,
"step": 108
},
{
"epoch": 0.1604784909132735,
"grad_norm": 0.19359427690505981,
"learning_rate": 7.630893048279627e-05,
"loss": 0.1415,
"step": 109
},
{
"epoch": 0.16195077064642283,
"grad_norm": 0.11051613837480545,
"learning_rate": 7.5e-05,
"loss": 0.0614,
"step": 110
},
{
"epoch": 0.1634230503795721,
"grad_norm": 0.11084026098251343,
"learning_rate": 7.369106951720373e-05,
"loss": 0.0506,
"step": 111
},
{
"epoch": 0.16489533011272142,
"grad_norm": 0.08058993518352509,
"learning_rate": 7.238253774731244e-05,
"loss": 0.0232,
"step": 112
},
{
"epoch": 0.1663676098458707,
"grad_norm": 0.11398748308420181,
"learning_rate": 7.10748032817792e-05,
"loss": 0.077,
"step": 113
},
{
"epoch": 0.16783988957902002,
"grad_norm": 0.10695286840200424,
"learning_rate": 6.976826446919059e-05,
"loss": 0.0586,
"step": 114
},
{
"epoch": 0.1693121693121693,
"grad_norm": 0.06323719769716263,
"learning_rate": 6.846331929392562e-05,
"loss": 0.0096,
"step": 115
},
{
"epoch": 0.17078444904531861,
"grad_norm": 0.09416350722312927,
"learning_rate": 6.7160365254926e-05,
"loss": 0.0552,
"step": 116
},
{
"epoch": 0.1722567287784679,
"grad_norm": 0.10175611078739166,
"learning_rate": 6.585979924461394e-05,
"loss": 0.0447,
"step": 117
},
{
"epoch": 0.1737290085116172,
"grad_norm": 0.0726943388581276,
"learning_rate": 6.45620174279951e-05,
"loss": 0.0183,
"step": 118
},
{
"epoch": 0.1752012882447665,
"grad_norm": 0.12314460426568985,
"learning_rate": 6.326741512198266e-05,
"loss": 0.0912,
"step": 119
},
{
"epoch": 0.1766735679779158,
"grad_norm": 0.11144654452800751,
"learning_rate": 6.197638667498022e-05,
"loss": 0.0336,
"step": 120
},
{
"epoch": 0.1781458477110651,
"grad_norm": 0.1129511296749115,
"learning_rate": 6.068932534675913e-05,
"loss": 0.0605,
"step": 121
},
{
"epoch": 0.1796181274442144,
"grad_norm": 0.10868009179830551,
"learning_rate": 5.9406623188668055e-05,
"loss": 0.0592,
"step": 122
},
{
"epoch": 0.1810904071773637,
"grad_norm": 0.11240071803331375,
"learning_rate": 5.812867092421013e-05,
"loss": 0.0422,
"step": 123
},
{
"epoch": 0.182562686910513,
"grad_norm": 0.08504848182201385,
"learning_rate": 5.685585783002493e-05,
"loss": 0.0281,
"step": 124
},
{
"epoch": 0.18403496664366228,
"grad_norm": 0.11616303771734238,
"learning_rate": 5.558857161731093e-05,
"loss": 0.0606,
"step": 125
},
{
"epoch": 0.1855072463768116,
"grad_norm": 0.09546354413032532,
"learning_rate": 5.4327198313725064e-05,
"loss": 0.0295,
"step": 126
},
{
"epoch": 0.18697952610996088,
"grad_norm": 0.16614162921905518,
"learning_rate": 5.307212214579474e-05,
"loss": 0.0679,
"step": 127
},
{
"epoch": 0.1884518058431102,
"grad_norm": 0.1870947629213333,
"learning_rate": 5.182372542187895e-05,
"loss": 0.1084,
"step": 128
},
{
"epoch": 0.18992408557625948,
"grad_norm": 0.12429474294185638,
"learning_rate": 5.058238841571326e-05,
"loss": 0.0768,
"step": 129
},
{
"epoch": 0.1913963653094088,
"grad_norm": 0.10866294801235199,
"learning_rate": 4.934848925057484e-05,
"loss": 0.0293,
"step": 130
},
{
"epoch": 0.19286864504255807,
"grad_norm": 0.09623893350362778,
"learning_rate": 4.812240378410248e-05,
"loss": 0.0279,
"step": 131
},
{
"epoch": 0.19434092477570739,
"grad_norm": 0.12153147906064987,
"learning_rate": 4.690450549380659e-05,
"loss": 0.062,
"step": 132
},
{
"epoch": 0.19581320450885667,
"grad_norm": 0.14355087280273438,
"learning_rate": 4.569516536330447e-05,
"loss": 0.0639,
"step": 133
},
{
"epoch": 0.19728548424200598,
"grad_norm": 0.13181179761886597,
"learning_rate": 4.449475176931499e-05,
"loss": 0.0764,
"step": 134
},
{
"epoch": 0.19875776397515527,
"grad_norm": 0.16418027877807617,
"learning_rate": 4.3303630369447554e-05,
"loss": 0.088,
"step": 135
},
{
"epoch": 0.20023004370830458,
"grad_norm": 0.10785653442144394,
"learning_rate": 4.212216399081918e-05,
"loss": 0.0341,
"step": 136
},
{
"epoch": 0.20170232344145386,
"grad_norm": 0.18119709193706512,
"learning_rate": 4.095071251953399e-05,
"loss": 0.0831,
"step": 137
},
{
"epoch": 0.20317460317460317,
"grad_norm": 0.09372559934854507,
"learning_rate": 3.978963279105821e-05,
"loss": 0.0261,
"step": 138
},
{
"epoch": 0.2046468829077525,
"grad_norm": 0.08984406292438507,
"learning_rate": 3.863927848152472e-05,
"loss": 0.0399,
"step": 139
},
{
"epoch": 0.20611916264090177,
"grad_norm": 0.0921633318066597,
"learning_rate": 3.750000000000001e-05,
"loss": 0.0189,
"step": 140
},
{
"epoch": 0.20759144237405108,
"grad_norm": 0.11086931824684143,
"learning_rate": 3.637214438174593e-05,
"loss": 0.0431,
"step": 141
},
{
"epoch": 0.20906372210720037,
"grad_norm": 0.1010395959019661,
"learning_rate": 3.525605518250964e-05,
"loss": 0.0696,
"step": 142
},
{
"epoch": 0.21053600184034968,
"grad_norm": 0.07738685607910156,
"learning_rate": 3.415207237387297e-05,
"loss": 0.0134,
"step": 143
},
{
"epoch": 0.21200828157349896,
"grad_norm": 0.08697827160358429,
"learning_rate": 3.3060532239693994e-05,
"loss": 0.0294,
"step": 144
},
{
"epoch": 0.21348056130664828,
"grad_norm": 0.13390277326107025,
"learning_rate": 3.198176727367156e-05,
"loss": 0.064,
"step": 145
},
{
"epoch": 0.21495284103979756,
"grad_norm": 0.12035319209098816,
"learning_rate": 3.091610607806452e-05,
"loss": 0.0378,
"step": 146
},
{
"epoch": 0.21642512077294687,
"grad_norm": 0.09978077560663223,
"learning_rate": 2.986387326359637e-05,
"loss": 0.0356,
"step": 147
},
{
"epoch": 0.21789740050609616,
"grad_norm": 0.09956356137990952,
"learning_rate": 2.8825389350575624e-05,
"loss": 0.0476,
"step": 148
},
{
"epoch": 0.21936968023924547,
"grad_norm": 0.09759137779474258,
"learning_rate": 2.78009706712622e-05,
"loss": 0.0383,
"step": 149
},
{
"epoch": 0.22084195997239475,
"grad_norm": 0.06408429890871048,
"learning_rate": 2.6790929273509545e-05,
"loss": 0.0172,
"step": 150
},
{
"epoch": 0.22084195997239475,
"eval_loss": 0.04416579380631447,
"eval_runtime": 1045.846,
"eval_samples_per_second": 2.188,
"eval_steps_per_second": 1.094,
"step": 150
},
{
"epoch": 0.22231423970554406,
"grad_norm": 0.07642810791730881,
"learning_rate": 2.579557282571196e-05,
"loss": 0.0281,
"step": 151
},
{
"epoch": 0.22378651943869335,
"grad_norm": 0.06974484771490097,
"learning_rate": 2.4815204523085654e-05,
"loss": 0.0204,
"step": 152
},
{
"epoch": 0.22525879917184266,
"grad_norm": 0.0952039286494255,
"learning_rate": 2.385012299531262e-05,
"loss": 0.0515,
"step": 153
},
{
"epoch": 0.22673107890499195,
"grad_norm": 0.10095140337944031,
"learning_rate": 2.2900622215575197e-05,
"loss": 0.0577,
"step": 154
},
{
"epoch": 0.22820335863814126,
"grad_norm": 0.09335016459226608,
"learning_rate": 2.1966991411008938e-05,
"loss": 0.0334,
"step": 155
},
{
"epoch": 0.22967563837129054,
"grad_norm": 0.14227519929409027,
"learning_rate": 2.1049514974601175e-05,
"loss": 0.078,
"step": 156
},
{
"epoch": 0.23114791810443985,
"grad_norm": 0.09830557554960251,
"learning_rate": 2.0148472378562215e-05,
"loss": 0.0481,
"step": 157
},
{
"epoch": 0.23262019783758914,
"grad_norm": 0.10622940212488174,
"learning_rate": 1.926413808919542e-05,
"loss": 0.0451,
"step": 158
},
{
"epoch": 0.23409247757073845,
"grad_norm": 0.12101616710424423,
"learning_rate": 1.8396781483292098e-05,
"loss": 0.0713,
"step": 159
},
{
"epoch": 0.23556475730388773,
"grad_norm": 0.08884954452514648,
"learning_rate": 1.7546666766076655e-05,
"loss": 0.0447,
"step": 160
},
{
"epoch": 0.23703703703703705,
"grad_norm": 0.08267998695373535,
"learning_rate": 1.671405289072718e-05,
"loss": 0.0157,
"step": 161
},
{
"epoch": 0.23850931677018633,
"grad_norm": 0.13504156470298767,
"learning_rate": 1.5899193479495857e-05,
"loss": 0.1002,
"step": 162
},
{
"epoch": 0.23998159650333564,
"grad_norm": 0.09033524990081787,
"learning_rate": 1.5102336746453053e-05,
"loss": 0.0555,
"step": 163
},
{
"epoch": 0.24145387623648493,
"grad_norm": 0.11912113428115845,
"learning_rate": 1.4323725421878949e-05,
"loss": 0.0801,
"step": 164
},
{
"epoch": 0.24292615596963424,
"grad_norm": 0.10221156477928162,
"learning_rate": 1.3563596678325606e-05,
"loss": 0.0453,
"step": 165
},
{
"epoch": 0.24439843570278352,
"grad_norm": 0.10165137052536011,
"learning_rate": 1.2822182058371878e-05,
"loss": 0.0465,
"step": 166
},
{
"epoch": 0.24587071543593284,
"grad_norm": 0.07042307406663895,
"learning_rate": 1.2099707404093203e-05,
"loss": 0.0156,
"step": 167
},
{
"epoch": 0.24734299516908212,
"grad_norm": 0.13063064217567444,
"learning_rate": 1.1396392788268052e-05,
"loss": 0.0635,
"step": 168
},
{
"epoch": 0.24881527490223143,
"grad_norm": 0.12741073966026306,
"learning_rate": 1.0712452447341582e-05,
"loss": 0.0275,
"step": 169
},
{
"epoch": 0.2502875546353807,
"grad_norm": 0.05055376887321472,
"learning_rate": 1.0048094716167095e-05,
"loss": 0.0101,
"step": 170
},
{
"epoch": 0.25175983436853,
"grad_norm": 0.11024050414562225,
"learning_rate": 9.40352196454532e-06,
"loss": 0.0496,
"step": 171
},
{
"epoch": 0.25323211410167934,
"grad_norm": 0.09290501475334167,
"learning_rate": 8.778930535580474e-06,
"loss": 0.042,
"step": 172
},
{
"epoch": 0.2547043938348286,
"grad_norm": 0.10324755311012268,
"learning_rate": 8.174510685872415e-06,
"loss": 0.0466,
"step": 173
},
{
"epoch": 0.2561766735679779,
"grad_norm": 0.1255979835987091,
"learning_rate": 7.5904465275624884e-06,
"loss": 0.0731,
"step": 174
},
{
"epoch": 0.2576489533011272,
"grad_norm": 0.14712050557136536,
"learning_rate": 7.026915972251254e-06,
"loss": 0.0501,
"step": 175
},
{
"epoch": 0.25912123303427653,
"grad_norm": 0.10622108727693558,
"learning_rate": 6.484090676804926e-06,
"loss": 0.0465,
"step": 176
},
{
"epoch": 0.2605935127674258,
"grad_norm": 0.08225951343774796,
"learning_rate": 5.962135991066971e-06,
"loss": 0.0322,
"step": 177
},
{
"epoch": 0.2620657925005751,
"grad_norm": 0.06552782654762268,
"learning_rate": 5.461210907490951e-06,
"loss": 0.0156,
"step": 178
},
{
"epoch": 0.2635380722337244,
"grad_norm": 0.09848985821008682,
"learning_rate": 4.981468012709877e-06,
"loss": 0.0319,
"step": 179
},
{
"epoch": 0.2650103519668737,
"grad_norm": 0.11006014794111252,
"learning_rate": 4.523053441056876e-06,
"loss": 0.0384,
"step": 180
},
{
"epoch": 0.266482631700023,
"grad_norm": 0.07918134331703186,
"learning_rate": 4.086106830051236e-06,
"loss": 0.0424,
"step": 181
},
{
"epoch": 0.2679549114331723,
"grad_norm": 0.043903008103370667,
"learning_rate": 3.670761277863485e-06,
"loss": 0.0097,
"step": 182
},
{
"epoch": 0.2694271911663216,
"grad_norm": 0.08103958517313004,
"learning_rate": 3.277143302772342e-06,
"loss": 0.049,
"step": 183
},
{
"epoch": 0.2708994708994709,
"grad_norm": 0.12223482131958008,
"learning_rate": 2.9053728046260825e-06,
"loss": 0.0317,
"step": 184
},
{
"epoch": 0.2723717506326202,
"grad_norm": 0.04852719232439995,
"learning_rate": 2.555563028319885e-06,
"loss": 0.0094,
"step": 185
},
{
"epoch": 0.2738440303657695,
"grad_norm": 0.0753386989235878,
"learning_rate": 2.227820529300264e-06,
"loss": 0.0332,
"step": 186
},
{
"epoch": 0.27531631009891877,
"grad_norm": 0.11057372391223907,
"learning_rate": 1.9222451411073645e-06,
"loss": 0.0736,
"step": 187
},
{
"epoch": 0.2767885898320681,
"grad_norm": 0.0846346840262413,
"learning_rate": 1.6389299449645733e-06,
"loss": 0.029,
"step": 188
},
{
"epoch": 0.2782608695652174,
"grad_norm": 0.1014692485332489,
"learning_rate": 1.3779612414252017e-06,
"loss": 0.0394,
"step": 189
},
{
"epoch": 0.2797331492983667,
"grad_norm": 0.1289111077785492,
"learning_rate": 1.1394185240843983e-06,
"loss": 0.0617,
"step": 190
},
{
"epoch": 0.28120542903151596,
"grad_norm": 0.11205513775348663,
"learning_rate": 9.233744553646754e-07,
"loss": 0.0493,
"step": 191
},
{
"epoch": 0.2826777087646653,
"grad_norm": 0.07097858935594559,
"learning_rate": 7.298948443822228e-07,
"loss": 0.0197,
"step": 192
},
{
"epoch": 0.2841499884978146,
"grad_norm": 0.0789109319448471,
"learning_rate": 5.590386269008512e-07,
"loss": 0.0231,
"step": 193
},
{
"epoch": 0.28562226823096387,
"grad_norm": 0.12673795223236084,
"learning_rate": 4.108578473795032e-07,
"loss": 0.0552,
"step": 194
},
{
"epoch": 0.28709454796411316,
"grad_norm": 0.10274315625429153,
"learning_rate": 2.8539764311908407e-07,
"loss": 0.0666,
"step": 195
},
{
"epoch": 0.2885668276972625,
"grad_norm": 0.10355023294687271,
"learning_rate": 1.8269623051318515e-07,
"loss": 0.0383,
"step": 196
},
{
"epoch": 0.2900391074304118,
"grad_norm": 0.0999678373336792,
"learning_rate": 1.027848934069625e-07,
"loss": 0.0521,
"step": 197
},
{
"epoch": 0.29151138716356106,
"grad_norm": 0.1064368337392807,
"learning_rate": 4.568797356781784e-08,
"loss": 0.0569,
"step": 198
},
{
"epoch": 0.29298366689671035,
"grad_norm": 0.09704666584730148,
"learning_rate": 1.142286327065478e-08,
"loss": 0.0554,
"step": 199
},
{
"epoch": 0.2944559466298597,
"grad_norm": 0.12633852660655975,
"learning_rate": 0.0,
"loss": 0.0479,
"step": 200
},
{
"epoch": 0.2944559466298597,
"eval_loss": 0.04169866442680359,
"eval_runtime": 1047.6995,
"eval_samples_per_second": 2.184,
"eval_steps_per_second": 1.092,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.1106500717867172e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}