winnieyangwannan's picture
Training in progress, step 1100, checkpoint
7753f62 verified
raw
history blame
24.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1554621848739495,
"eval_steps": 50,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01050420168067227,
"grad_norm": 2.5582146644592285,
"learning_rate": 4.98249299719888e-05,
"loss": 1.6787,
"step": 10
},
{
"epoch": 0.02100840336134454,
"grad_norm": 0.9345070719718933,
"learning_rate": 4.96498599439776e-05,
"loss": 0.518,
"step": 20
},
{
"epoch": 0.031512605042016806,
"grad_norm": 1.6358414888381958,
"learning_rate": 4.947478991596639e-05,
"loss": 0.4604,
"step": 30
},
{
"epoch": 0.04201680672268908,
"grad_norm": 0.7778844237327576,
"learning_rate": 4.9299719887955186e-05,
"loss": 0.3771,
"step": 40
},
{
"epoch": 0.052521008403361345,
"grad_norm": 0.7006077766418457,
"learning_rate": 4.912464985994398e-05,
"loss": 0.3842,
"step": 50
},
{
"epoch": 0.052521008403361345,
"eval_loss": 0.42603224515914917,
"eval_runtime": 13.5673,
"eval_samples_per_second": 35.379,
"eval_steps_per_second": 2.211,
"step": 50
},
{
"epoch": 0.06302521008403361,
"grad_norm": 0.6415153741836548,
"learning_rate": 4.8949579831932775e-05,
"loss": 0.3399,
"step": 60
},
{
"epoch": 0.07352941176470588,
"grad_norm": 0.6030780076980591,
"learning_rate": 4.877450980392157e-05,
"loss": 0.3447,
"step": 70
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.688852071762085,
"learning_rate": 4.859943977591036e-05,
"loss": 0.3219,
"step": 80
},
{
"epoch": 0.09453781512605042,
"grad_norm": 0.6371557712554932,
"learning_rate": 4.8424369747899164e-05,
"loss": 0.3379,
"step": 90
},
{
"epoch": 0.10504201680672269,
"grad_norm": 0.7739270329475403,
"learning_rate": 4.824929971988796e-05,
"loss": 0.3177,
"step": 100
},
{
"epoch": 0.10504201680672269,
"eval_loss": 0.3801896274089813,
"eval_runtime": 13.6107,
"eval_samples_per_second": 35.266,
"eval_steps_per_second": 2.204,
"step": 100
},
{
"epoch": 0.11554621848739496,
"grad_norm": 0.649507462978363,
"learning_rate": 4.807422969187675e-05,
"loss": 0.3415,
"step": 110
},
{
"epoch": 0.12605042016806722,
"grad_norm": 0.594717264175415,
"learning_rate": 4.7899159663865554e-05,
"loss": 0.3325,
"step": 120
},
{
"epoch": 0.13655462184873948,
"grad_norm": 0.627918541431427,
"learning_rate": 4.772408963585435e-05,
"loss": 0.3222,
"step": 130
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.5384674668312073,
"learning_rate": 4.7549019607843135e-05,
"loss": 0.3426,
"step": 140
},
{
"epoch": 0.15756302521008403,
"grad_norm": 0.5673420429229736,
"learning_rate": 4.7373949579831936e-05,
"loss": 0.3061,
"step": 150
},
{
"epoch": 0.15756302521008403,
"eval_loss": 0.3653399348258972,
"eval_runtime": 13.5947,
"eval_samples_per_second": 35.308,
"eval_steps_per_second": 2.207,
"step": 150
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.6111018657684326,
"learning_rate": 4.719887955182073e-05,
"loss": 0.3271,
"step": 160
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.7422594428062439,
"learning_rate": 4.7023809523809525e-05,
"loss": 0.315,
"step": 170
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.7226534485816956,
"learning_rate": 4.684873949579832e-05,
"loss": 0.3031,
"step": 180
},
{
"epoch": 0.19957983193277312,
"grad_norm": 0.6302976012229919,
"learning_rate": 4.667366946778712e-05,
"loss": 0.3161,
"step": 190
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.6225076913833618,
"learning_rate": 4.6498599439775914e-05,
"loss": 0.3038,
"step": 200
},
{
"epoch": 0.21008403361344538,
"eval_loss": 0.35061606764793396,
"eval_runtime": 13.5616,
"eval_samples_per_second": 35.394,
"eval_steps_per_second": 2.212,
"step": 200
},
{
"epoch": 0.22058823529411764,
"grad_norm": 0.6001319885253906,
"learning_rate": 4.632352941176471e-05,
"loss": 0.3129,
"step": 210
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.5385990142822266,
"learning_rate": 4.61484593837535e-05,
"loss": 0.2991,
"step": 220
},
{
"epoch": 0.2415966386554622,
"grad_norm": 0.4513624906539917,
"learning_rate": 4.59733893557423e-05,
"loss": 0.2896,
"step": 230
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.6142160892486572,
"learning_rate": 4.579831932773109e-05,
"loss": 0.3059,
"step": 240
},
{
"epoch": 0.26260504201680673,
"grad_norm": 0.6714802384376526,
"learning_rate": 4.562324929971989e-05,
"loss": 0.2897,
"step": 250
},
{
"epoch": 0.26260504201680673,
"eval_loss": 0.3456435203552246,
"eval_runtime": 13.5552,
"eval_samples_per_second": 35.411,
"eval_steps_per_second": 2.213,
"step": 250
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.6518235206604004,
"learning_rate": 4.5448179271708687e-05,
"loss": 0.312,
"step": 260
},
{
"epoch": 0.28361344537815125,
"grad_norm": 0.6250632405281067,
"learning_rate": 4.527310924369748e-05,
"loss": 0.2959,
"step": 270
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.5683826804161072,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.3027,
"step": 280
},
{
"epoch": 0.30462184873949577,
"grad_norm": 0.560312807559967,
"learning_rate": 4.4922969187675076e-05,
"loss": 0.3002,
"step": 290
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.66291743516922,
"learning_rate": 4.474789915966387e-05,
"loss": 0.2925,
"step": 300
},
{
"epoch": 0.31512605042016806,
"eval_loss": 0.3431606888771057,
"eval_runtime": 13.5629,
"eval_samples_per_second": 35.391,
"eval_steps_per_second": 2.212,
"step": 300
},
{
"epoch": 0.32563025210084034,
"grad_norm": 0.6478439569473267,
"learning_rate": 4.4572829131652665e-05,
"loss": 0.2893,
"step": 310
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.5832348465919495,
"learning_rate": 4.439775910364146e-05,
"loss": 0.2842,
"step": 320
},
{
"epoch": 0.34663865546218486,
"grad_norm": 0.525932252407074,
"learning_rate": 4.422268907563025e-05,
"loss": 0.2837,
"step": 330
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.5487508177757263,
"learning_rate": 4.404761904761905e-05,
"loss": 0.2706,
"step": 340
},
{
"epoch": 0.36764705882352944,
"grad_norm": 0.5392388701438904,
"learning_rate": 4.387254901960784e-05,
"loss": 0.2835,
"step": 350
},
{
"epoch": 0.36764705882352944,
"eval_loss": 0.33528536558151245,
"eval_runtime": 13.5508,
"eval_samples_per_second": 35.422,
"eval_steps_per_second": 2.214,
"step": 350
},
{
"epoch": 0.37815126050420167,
"grad_norm": 0.6706260442733765,
"learning_rate": 4.369747899159664e-05,
"loss": 0.2844,
"step": 360
},
{
"epoch": 0.38865546218487396,
"grad_norm": 0.6042625904083252,
"learning_rate": 4.352240896358544e-05,
"loss": 0.2758,
"step": 370
},
{
"epoch": 0.39915966386554624,
"grad_norm": 0.534008264541626,
"learning_rate": 4.334733893557423e-05,
"loss": 0.2918,
"step": 380
},
{
"epoch": 0.4096638655462185,
"grad_norm": 0.48162588477134705,
"learning_rate": 4.317226890756303e-05,
"loss": 0.273,
"step": 390
},
{
"epoch": 0.42016806722689076,
"grad_norm": 0.5669644474983215,
"learning_rate": 4.2997198879551826e-05,
"loss": 0.285,
"step": 400
},
{
"epoch": 0.42016806722689076,
"eval_loss": 0.3348632752895355,
"eval_runtime": 13.5507,
"eval_samples_per_second": 35.423,
"eval_steps_per_second": 2.214,
"step": 400
},
{
"epoch": 0.43067226890756305,
"grad_norm": 0.6257824897766113,
"learning_rate": 4.2822128851540614e-05,
"loss": 0.299,
"step": 410
},
{
"epoch": 0.4411764705882353,
"grad_norm": 0.5430576205253601,
"learning_rate": 4.2647058823529415e-05,
"loss": 0.2868,
"step": 420
},
{
"epoch": 0.45168067226890757,
"grad_norm": 0.5633955597877502,
"learning_rate": 4.247198879551821e-05,
"loss": 0.2589,
"step": 430
},
{
"epoch": 0.46218487394957986,
"grad_norm": 0.5294789671897888,
"learning_rate": 4.2296918767507e-05,
"loss": 0.2777,
"step": 440
},
{
"epoch": 0.4726890756302521,
"grad_norm": 0.5480856895446777,
"learning_rate": 4.21218487394958e-05,
"loss": 0.2704,
"step": 450
},
{
"epoch": 0.4726890756302521,
"eval_loss": 0.329515278339386,
"eval_runtime": 13.5423,
"eval_samples_per_second": 35.445,
"eval_steps_per_second": 2.215,
"step": 450
},
{
"epoch": 0.4831932773109244,
"grad_norm": 0.5051332116127014,
"learning_rate": 4.19467787114846e-05,
"loss": 0.2438,
"step": 460
},
{
"epoch": 0.49369747899159666,
"grad_norm": 0.6251511573791504,
"learning_rate": 4.177170868347339e-05,
"loss": 0.2748,
"step": 470
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.4729413092136383,
"learning_rate": 4.159663865546219e-05,
"loss": 0.2689,
"step": 480
},
{
"epoch": 0.5147058823529411,
"grad_norm": 0.5220003724098206,
"learning_rate": 4.142156862745099e-05,
"loss": 0.2899,
"step": 490
},
{
"epoch": 0.5252100840336135,
"grad_norm": 0.54283207654953,
"learning_rate": 4.1246498599439776e-05,
"loss": 0.272,
"step": 500
},
{
"epoch": 0.5252100840336135,
"eval_loss": 0.32714489102363586,
"eval_runtime": 13.5497,
"eval_samples_per_second": 35.425,
"eval_steps_per_second": 2.214,
"step": 500
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.5851682424545288,
"learning_rate": 4.107142857142857e-05,
"loss": 0.2691,
"step": 510
},
{
"epoch": 0.5462184873949579,
"grad_norm": 0.6026607751846313,
"learning_rate": 4.089635854341737e-05,
"loss": 0.2716,
"step": 520
},
{
"epoch": 0.5567226890756303,
"grad_norm": 0.522422730922699,
"learning_rate": 4.0721288515406165e-05,
"loss": 0.2774,
"step": 530
},
{
"epoch": 0.5672268907563025,
"grad_norm": 0.516901433467865,
"learning_rate": 4.054621848739496e-05,
"loss": 0.2726,
"step": 540
},
{
"epoch": 0.5777310924369747,
"grad_norm": 0.667030394077301,
"learning_rate": 4.0371148459383754e-05,
"loss": 0.2622,
"step": 550
},
{
"epoch": 0.5777310924369747,
"eval_loss": 0.3262839615345001,
"eval_runtime": 13.5448,
"eval_samples_per_second": 35.438,
"eval_steps_per_second": 2.215,
"step": 550
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.542658269405365,
"learning_rate": 4.0196078431372555e-05,
"loss": 0.2572,
"step": 560
},
{
"epoch": 0.5987394957983193,
"grad_norm": 0.5408573746681213,
"learning_rate": 4.002100840336135e-05,
"loss": 0.2636,
"step": 570
},
{
"epoch": 0.6092436974789915,
"grad_norm": 0.5691037774085999,
"learning_rate": 3.984593837535014e-05,
"loss": 0.268,
"step": 580
},
{
"epoch": 0.6197478991596639,
"grad_norm": 0.5530794858932495,
"learning_rate": 3.967086834733894e-05,
"loss": 0.2583,
"step": 590
},
{
"epoch": 0.6302521008403361,
"grad_norm": 0.546229362487793,
"learning_rate": 3.949579831932773e-05,
"loss": 0.2622,
"step": 600
},
{
"epoch": 0.6302521008403361,
"eval_loss": 0.3219989836215973,
"eval_runtime": 13.5524,
"eval_samples_per_second": 35.418,
"eval_steps_per_second": 2.214,
"step": 600
},
{
"epoch": 0.6407563025210085,
"grad_norm": 0.5098925232887268,
"learning_rate": 3.9320728291316526e-05,
"loss": 0.2553,
"step": 610
},
{
"epoch": 0.6512605042016807,
"grad_norm": 0.5201871991157532,
"learning_rate": 3.914565826330533e-05,
"loss": 0.2584,
"step": 620
},
{
"epoch": 0.6617647058823529,
"grad_norm": 0.47408100962638855,
"learning_rate": 3.897058823529412e-05,
"loss": 0.2686,
"step": 630
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.5591098666191101,
"learning_rate": 3.8795518207282915e-05,
"loss": 0.2772,
"step": 640
},
{
"epoch": 0.6827731092436975,
"grad_norm": 0.5344163179397583,
"learning_rate": 3.862044817927171e-05,
"loss": 0.263,
"step": 650
},
{
"epoch": 0.6827731092436975,
"eval_loss": 0.31990015506744385,
"eval_runtime": 13.5501,
"eval_samples_per_second": 35.424,
"eval_steps_per_second": 2.214,
"step": 650
},
{
"epoch": 0.6932773109243697,
"grad_norm": 0.6538853049278259,
"learning_rate": 3.844537815126051e-05,
"loss": 0.2605,
"step": 660
},
{
"epoch": 0.7037815126050421,
"grad_norm": 0.43679994344711304,
"learning_rate": 3.82703081232493e-05,
"loss": 0.2486,
"step": 670
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.5602915287017822,
"learning_rate": 3.809523809523809e-05,
"loss": 0.2359,
"step": 680
},
{
"epoch": 0.7247899159663865,
"grad_norm": 0.5354353189468384,
"learning_rate": 3.792016806722689e-05,
"loss": 0.2541,
"step": 690
},
{
"epoch": 0.7352941176470589,
"grad_norm": 0.5954485535621643,
"learning_rate": 3.774509803921569e-05,
"loss": 0.2649,
"step": 700
},
{
"epoch": 0.7352941176470589,
"eval_loss": 0.3204093873500824,
"eval_runtime": 13.5523,
"eval_samples_per_second": 35.418,
"eval_steps_per_second": 2.214,
"step": 700
},
{
"epoch": 0.7457983193277311,
"grad_norm": 0.6093761324882507,
"learning_rate": 3.757002801120448e-05,
"loss": 0.2456,
"step": 710
},
{
"epoch": 0.7563025210084033,
"grad_norm": 0.4796586334705353,
"learning_rate": 3.739495798319328e-05,
"loss": 0.253,
"step": 720
},
{
"epoch": 0.7668067226890757,
"grad_norm": 0.5846813917160034,
"learning_rate": 3.721988795518208e-05,
"loss": 0.2442,
"step": 730
},
{
"epoch": 0.7773109243697479,
"grad_norm": 0.4811939597129822,
"learning_rate": 3.704481792717087e-05,
"loss": 0.2522,
"step": 740
},
{
"epoch": 0.7878151260504201,
"grad_norm": 0.5718042850494385,
"learning_rate": 3.6869747899159665e-05,
"loss": 0.2562,
"step": 750
},
{
"epoch": 0.7878151260504201,
"eval_loss": 0.32099905610084534,
"eval_runtime": 13.5408,
"eval_samples_per_second": 35.448,
"eval_steps_per_second": 2.216,
"step": 750
},
{
"epoch": 0.7983193277310925,
"grad_norm": 0.5630698204040527,
"learning_rate": 3.669467787114846e-05,
"loss": 0.2476,
"step": 760
},
{
"epoch": 0.8088235294117647,
"grad_norm": 0.6513442397117615,
"learning_rate": 3.6519607843137254e-05,
"loss": 0.2544,
"step": 770
},
{
"epoch": 0.819327731092437,
"grad_norm": 0.6139647960662842,
"learning_rate": 3.634453781512605e-05,
"loss": 0.258,
"step": 780
},
{
"epoch": 0.8298319327731093,
"grad_norm": 0.5916554927825928,
"learning_rate": 3.616946778711485e-05,
"loss": 0.2415,
"step": 790
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.5163634419441223,
"learning_rate": 3.5994397759103643e-05,
"loss": 0.252,
"step": 800
},
{
"epoch": 0.8403361344537815,
"eval_loss": 0.3215568959712982,
"eval_runtime": 13.5293,
"eval_samples_per_second": 35.479,
"eval_steps_per_second": 2.217,
"step": 800
},
{
"epoch": 0.8508403361344538,
"grad_norm": 0.5768859386444092,
"learning_rate": 3.581932773109244e-05,
"loss": 0.2421,
"step": 810
},
{
"epoch": 0.8613445378151261,
"grad_norm": 0.6197952032089233,
"learning_rate": 3.564425770308123e-05,
"loss": 0.2672,
"step": 820
},
{
"epoch": 0.8718487394957983,
"grad_norm": 0.5396980047225952,
"learning_rate": 3.546918767507003e-05,
"loss": 0.2393,
"step": 830
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.5783377885818481,
"learning_rate": 3.529411764705883e-05,
"loss": 0.2374,
"step": 840
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.5808666944503784,
"learning_rate": 3.511904761904762e-05,
"loss": 0.2405,
"step": 850
},
{
"epoch": 0.8928571428571429,
"eval_loss": 0.3207303583621979,
"eval_runtime": 13.5299,
"eval_samples_per_second": 35.477,
"eval_steps_per_second": 2.217,
"step": 850
},
{
"epoch": 0.9033613445378151,
"grad_norm": 0.4931146204471588,
"learning_rate": 3.4943977591036416e-05,
"loss": 0.2382,
"step": 860
},
{
"epoch": 0.9138655462184874,
"grad_norm": 0.6456460952758789,
"learning_rate": 3.476890756302521e-05,
"loss": 0.2416,
"step": 870
},
{
"epoch": 0.9243697478991597,
"grad_norm": 0.5459381937980652,
"learning_rate": 3.4593837535014004e-05,
"loss": 0.2611,
"step": 880
},
{
"epoch": 0.9348739495798319,
"grad_norm": 0.5317162275314331,
"learning_rate": 3.4418767507002805e-05,
"loss": 0.2383,
"step": 890
},
{
"epoch": 0.9453781512605042,
"grad_norm": 0.5790566205978394,
"learning_rate": 3.42436974789916e-05,
"loss": 0.2455,
"step": 900
},
{
"epoch": 0.9453781512605042,
"eval_loss": 0.31993839144706726,
"eval_runtime": 13.5323,
"eval_samples_per_second": 35.471,
"eval_steps_per_second": 2.217,
"step": 900
},
{
"epoch": 0.9558823529411765,
"grad_norm": 0.5805277228355408,
"learning_rate": 3.4068627450980394e-05,
"loss": 0.2393,
"step": 910
},
{
"epoch": 0.9663865546218487,
"grad_norm": 0.6085871458053589,
"learning_rate": 3.389355742296919e-05,
"loss": 0.2505,
"step": 920
},
{
"epoch": 0.976890756302521,
"grad_norm": 0.6251375079154968,
"learning_rate": 3.371848739495799e-05,
"loss": 0.2378,
"step": 930
},
{
"epoch": 0.9873949579831933,
"grad_norm": 0.6169071197509766,
"learning_rate": 3.3543417366946776e-05,
"loss": 0.239,
"step": 940
},
{
"epoch": 0.9978991596638656,
"grad_norm": 0.5361204147338867,
"learning_rate": 3.336834733893557e-05,
"loss": 0.2484,
"step": 950
},
{
"epoch": 0.9978991596638656,
"eval_loss": 0.31845951080322266,
"eval_runtime": 13.5483,
"eval_samples_per_second": 35.429,
"eval_steps_per_second": 2.214,
"step": 950
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.47690704464912415,
"learning_rate": 3.319327731092437e-05,
"loss": 0.2006,
"step": 960
},
{
"epoch": 1.01890756302521,
"grad_norm": 0.7460448741912842,
"learning_rate": 3.3018207282913166e-05,
"loss": 0.1983,
"step": 970
},
{
"epoch": 1.0294117647058822,
"grad_norm": 0.5729458332061768,
"learning_rate": 3.284313725490196e-05,
"loss": 0.1989,
"step": 980
},
{
"epoch": 1.0399159663865547,
"grad_norm": 0.5524929761886597,
"learning_rate": 3.266806722689076e-05,
"loss": 0.2039,
"step": 990
},
{
"epoch": 1.050420168067227,
"grad_norm": 0.6426274180412292,
"learning_rate": 3.2492997198879555e-05,
"loss": 0.2184,
"step": 1000
},
{
"epoch": 1.050420168067227,
"eval_loss": 0.3305407762527466,
"eval_runtime": 13.5592,
"eval_samples_per_second": 35.4,
"eval_steps_per_second": 2.213,
"step": 1000
},
{
"epoch": 1.0609243697478992,
"grad_norm": 0.4944634437561035,
"learning_rate": 3.231792717086835e-05,
"loss": 0.2042,
"step": 1010
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.5576530694961548,
"learning_rate": 3.2142857142857144e-05,
"loss": 0.2029,
"step": 1020
},
{
"epoch": 1.0819327731092436,
"grad_norm": 0.674849271774292,
"learning_rate": 3.196778711484594e-05,
"loss": 0.2026,
"step": 1030
},
{
"epoch": 1.092436974789916,
"grad_norm": 0.5424471497535706,
"learning_rate": 3.179271708683473e-05,
"loss": 0.2027,
"step": 1040
},
{
"epoch": 1.1029411764705883,
"grad_norm": 0.6491550207138062,
"learning_rate": 3.161764705882353e-05,
"loss": 0.2029,
"step": 1050
},
{
"epoch": 1.1029411764705883,
"eval_loss": 0.33292290568351746,
"eval_runtime": 13.5611,
"eval_samples_per_second": 35.395,
"eval_steps_per_second": 2.212,
"step": 1050
},
{
"epoch": 1.1134453781512605,
"grad_norm": 0.6113711595535278,
"learning_rate": 3.144257703081233e-05,
"loss": 0.2,
"step": 1060
},
{
"epoch": 1.1239495798319328,
"grad_norm": 0.5068053603172302,
"learning_rate": 3.126750700280112e-05,
"loss": 0.1903,
"step": 1070
},
{
"epoch": 1.134453781512605,
"grad_norm": 0.6518192291259766,
"learning_rate": 3.1092436974789916e-05,
"loss": 0.2006,
"step": 1080
},
{
"epoch": 1.1449579831932772,
"grad_norm": 0.6932762861251831,
"learning_rate": 3.091736694677872e-05,
"loss": 0.2051,
"step": 1090
},
{
"epoch": 1.1554621848739495,
"grad_norm": 0.5372537970542908,
"learning_rate": 3.074229691876751e-05,
"loss": 0.2037,
"step": 1100
},
{
"epoch": 1.1554621848739495,
"eval_loss": 0.33518460392951965,
"eval_runtime": 13.5628,
"eval_samples_per_second": 35.391,
"eval_steps_per_second": 2.212,
"step": 1100
}
],
"logging_steps": 10,
"max_steps": 2856,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3173860578020557e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}