prxy5605's picture
Training in progress, epoch 0, checkpoint
4187985 verified
raw
history blame
15.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11533193973906149,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002883298493476537,
"eval_loss": 0.8066914081573486,
"eval_runtime": 230.0868,
"eval_samples_per_second": 6.35,
"eval_steps_per_second": 3.177,
"step": 1
},
{
"epoch": 0.0014416492467382686,
"grad_norm": 2.8629841804504395,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.7011,
"step": 5
},
{
"epoch": 0.0028832984934765373,
"grad_norm": 2.9777753353118896,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.9463,
"step": 10
},
{
"epoch": 0.004324947740214806,
"grad_norm": 2.402409553527832,
"learning_rate": 5e-05,
"loss": 2.9867,
"step": 15
},
{
"epoch": 0.0057665969869530745,
"grad_norm": 5.7486252784729,
"learning_rate": 6.666666666666667e-05,
"loss": 2.6301,
"step": 20
},
{
"epoch": 0.007208246233691343,
"grad_norm": 2.579139232635498,
"learning_rate": 8.333333333333334e-05,
"loss": 2.6941,
"step": 25
},
{
"epoch": 0.008649895480429612,
"grad_norm": 2.965322256088257,
"learning_rate": 0.0001,
"loss": 2.5461,
"step": 30
},
{
"epoch": 0.01009154472716788,
"grad_norm": 2.260119915008545,
"learning_rate": 9.995494831023409e-05,
"loss": 2.8797,
"step": 35
},
{
"epoch": 0.011533193973906149,
"grad_norm": 2.610872745513916,
"learning_rate": 9.981987442712633e-05,
"loss": 2.6554,
"step": 40
},
{
"epoch": 0.012974843220644417,
"grad_norm": 2.4740097522735596,
"learning_rate": 9.959502176294383e-05,
"loss": 2.8629,
"step": 45
},
{
"epoch": 0.014416492467382686,
"grad_norm": 2.1658761501312256,
"learning_rate": 9.928079551738543e-05,
"loss": 2.5093,
"step": 50
},
{
"epoch": 0.015858141714120956,
"grad_norm": 2.2830867767333984,
"learning_rate": 9.887776194738432e-05,
"loss": 2.6503,
"step": 55
},
{
"epoch": 0.017299790960859224,
"grad_norm": 2.0611510276794434,
"learning_rate": 9.838664734667495e-05,
"loss": 2.7094,
"step": 60
},
{
"epoch": 0.018741440207597493,
"grad_norm": 2.929940700531006,
"learning_rate": 9.780833673696254e-05,
"loss": 2.7529,
"step": 65
},
{
"epoch": 0.02018308945433576,
"grad_norm": 2.0682194232940674,
"learning_rate": 9.714387227305422e-05,
"loss": 3.08,
"step": 70
},
{
"epoch": 0.02162473870107403,
"grad_norm": 2.8449878692626953,
"learning_rate": 9.639445136482548e-05,
"loss": 2.7717,
"step": 75
},
{
"epoch": 0.023066387947812298,
"grad_norm": 3.2146430015563965,
"learning_rate": 9.55614245194068e-05,
"loss": 2.6265,
"step": 80
},
{
"epoch": 0.024508037194550567,
"grad_norm": 2.071394443511963,
"learning_rate": 9.464629290747842e-05,
"loss": 2.795,
"step": 85
},
{
"epoch": 0.025949686441288835,
"grad_norm": 1.8428181409835815,
"learning_rate": 9.365070565805941e-05,
"loss": 2.4166,
"step": 90
},
{
"epoch": 0.027391335688027103,
"grad_norm": 2.301480770111084,
"learning_rate": 9.257645688666556e-05,
"loss": 2.5041,
"step": 95
},
{
"epoch": 0.028832984934765372,
"grad_norm": 2.6278109550476074,
"learning_rate": 9.142548246219212e-05,
"loss": 2.3521,
"step": 100
},
{
"epoch": 0.028832984934765372,
"eval_loss": 0.6489478945732117,
"eval_runtime": 232.2501,
"eval_samples_per_second": 6.291,
"eval_steps_per_second": 3.147,
"step": 100
},
{
"epoch": 0.03027463418150364,
"grad_norm": 3.011148452758789,
"learning_rate": 9.019985651834703e-05,
"loss": 2.8982,
"step": 105
},
{
"epoch": 0.03171628342824191,
"grad_norm": 2.338167428970337,
"learning_rate": 8.890178771592199e-05,
"loss": 2.7675,
"step": 110
},
{
"epoch": 0.03315793267498018,
"grad_norm": 1.683278203010559,
"learning_rate": 8.753361526263621e-05,
"loss": 2.0754,
"step": 115
},
{
"epoch": 0.03459958192171845,
"grad_norm": 2.0816845893859863,
"learning_rate": 8.609780469772623e-05,
"loss": 2.6878,
"step": 120
},
{
"epoch": 0.036041231168456714,
"grad_norm": 1.9449472427368164,
"learning_rate": 8.459694344887732e-05,
"loss": 2.5088,
"step": 125
},
{
"epoch": 0.037482880415194986,
"grad_norm": 3.2148945331573486,
"learning_rate": 8.303373616950408e-05,
"loss": 2.3728,
"step": 130
},
{
"epoch": 0.03892452966193325,
"grad_norm": 2.0440514087677,
"learning_rate": 8.141099986478212e-05,
"loss": 2.7105,
"step": 135
},
{
"epoch": 0.04036617890867152,
"grad_norm": 2.3115408420562744,
"learning_rate": 7.973165881521434e-05,
"loss": 2.7877,
"step": 140
},
{
"epoch": 0.04180782815540979,
"grad_norm": 2.4646363258361816,
"learning_rate": 7.799873930687978e-05,
"loss": 2.3042,
"step": 145
},
{
"epoch": 0.04324947740214806,
"grad_norm": 2.0529026985168457,
"learning_rate": 7.621536417786159e-05,
"loss": 2.1666,
"step": 150
},
{
"epoch": 0.044691126648886324,
"grad_norm": 2.708082914352417,
"learning_rate": 7.438474719068173e-05,
"loss": 2.4795,
"step": 155
},
{
"epoch": 0.046132775895624596,
"grad_norm": 2.6128344535827637,
"learning_rate": 7.251018724088367e-05,
"loss": 2.6347,
"step": 160
},
{
"epoch": 0.04757442514236286,
"grad_norm": 2.3222455978393555,
"learning_rate": 7.059506241219965e-05,
"loss": 2.5229,
"step": 165
},
{
"epoch": 0.04901607438910113,
"grad_norm": 2.3687586784362793,
"learning_rate": 6.864282388901544e-05,
"loss": 2.1619,
"step": 170
},
{
"epoch": 0.0504577236358394,
"grad_norm": 2.1631596088409424,
"learning_rate": 6.665698973710288e-05,
"loss": 2.608,
"step": 175
},
{
"epoch": 0.05189937288257767,
"grad_norm": 2.542854070663452,
"learning_rate": 6.464113856382752e-05,
"loss": 2.4161,
"step": 180
},
{
"epoch": 0.053341022129315935,
"grad_norm": 1.9485701322555542,
"learning_rate": 6.259890306925627e-05,
"loss": 2.7054,
"step": 185
},
{
"epoch": 0.05478267137605421,
"grad_norm": 2.0567824840545654,
"learning_rate": 6.0533963499786314e-05,
"loss": 2.5283,
"step": 190
},
{
"epoch": 0.05622432062279247,
"grad_norm": 2.237706184387207,
"learning_rate": 5.8450041016092464e-05,
"loss": 2.5936,
"step": 195
},
{
"epoch": 0.057665969869530743,
"grad_norm": 2.4279816150665283,
"learning_rate": 5.6350890987343944e-05,
"loss": 2.2478,
"step": 200
},
{
"epoch": 0.057665969869530743,
"eval_loss": 0.6299009323120117,
"eval_runtime": 232.2791,
"eval_samples_per_second": 6.29,
"eval_steps_per_second": 3.147,
"step": 200
},
{
"epoch": 0.05910761911626901,
"grad_norm": 2.3157238960266113,
"learning_rate": 5.4240296223775465e-05,
"loss": 2.5239,
"step": 205
},
{
"epoch": 0.06054926836300728,
"grad_norm": 2.1027748584747314,
"learning_rate": 5.212206015980742e-05,
"loss": 2.5472,
"step": 210
},
{
"epoch": 0.06199091760974555,
"grad_norm": 2.006582736968994,
"learning_rate": 5e-05,
"loss": 2.4515,
"step": 215
},
{
"epoch": 0.06343256685648382,
"grad_norm": 2.642357587814331,
"learning_rate": 4.78779398401926e-05,
"loss": 2.4863,
"step": 220
},
{
"epoch": 0.06487421610322208,
"grad_norm": 2.00481915473938,
"learning_rate": 4.575970377622456e-05,
"loss": 2.2922,
"step": 225
},
{
"epoch": 0.06631586534996035,
"grad_norm": 2.7545015811920166,
"learning_rate": 4.364910901265606e-05,
"loss": 2.6815,
"step": 230
},
{
"epoch": 0.06775751459669863,
"grad_norm": 1.8313714265823364,
"learning_rate": 4.1549958983907555e-05,
"loss": 2.5887,
"step": 235
},
{
"epoch": 0.0691991638434369,
"grad_norm": 2.634171724319458,
"learning_rate": 3.94660365002137e-05,
"loss": 2.6743,
"step": 240
},
{
"epoch": 0.07064081309017516,
"grad_norm": 2.0860755443573,
"learning_rate": 3.740109693074375e-05,
"loss": 2.4406,
"step": 245
},
{
"epoch": 0.07208246233691343,
"grad_norm": 1.9648572206497192,
"learning_rate": 3.5358861436172485e-05,
"loss": 2.2171,
"step": 250
},
{
"epoch": 0.0735241115836517,
"grad_norm": 2.553926467895508,
"learning_rate": 3.334301026289712e-05,
"loss": 2.6093,
"step": 255
},
{
"epoch": 0.07496576083038997,
"grad_norm": 2.15262770652771,
"learning_rate": 3.135717611098458e-05,
"loss": 2.5061,
"step": 260
},
{
"epoch": 0.07640741007712823,
"grad_norm": 2.1403212547302246,
"learning_rate": 2.9404937587800375e-05,
"loss": 2.4626,
"step": 265
},
{
"epoch": 0.0778490593238665,
"grad_norm": 2.3066139221191406,
"learning_rate": 2.748981275911633e-05,
"loss": 2.5797,
"step": 270
},
{
"epoch": 0.07929070857060477,
"grad_norm": 2.4364964962005615,
"learning_rate": 2.5615252809318284e-05,
"loss": 2.3479,
"step": 275
},
{
"epoch": 0.08073235781734305,
"grad_norm": 1.7531434297561646,
"learning_rate": 2.3784635822138424e-05,
"loss": 2.1783,
"step": 280
},
{
"epoch": 0.0821740070640813,
"grad_norm": 2.306757688522339,
"learning_rate": 2.2001260693120233e-05,
"loss": 2.6474,
"step": 285
},
{
"epoch": 0.08361565631081957,
"grad_norm": 2.356346368789673,
"learning_rate": 2.026834118478567e-05,
"loss": 2.6784,
"step": 290
},
{
"epoch": 0.08505730555755785,
"grad_norm": 2.021139144897461,
"learning_rate": 1.858900013521788e-05,
"loss": 2.7808,
"step": 295
},
{
"epoch": 0.08649895480429612,
"grad_norm": 1.925753116607666,
"learning_rate": 1.6966263830495936e-05,
"loss": 2.0863,
"step": 300
},
{
"epoch": 0.08649895480429612,
"eval_loss": 0.6235827207565308,
"eval_runtime": 232.2728,
"eval_samples_per_second": 6.29,
"eval_steps_per_second": 3.147,
"step": 300
},
{
"epoch": 0.08794060405103438,
"grad_norm": 1.981512427330017,
"learning_rate": 1.5403056551122697e-05,
"loss": 2.3868,
"step": 305
},
{
"epoch": 0.08938225329777265,
"grad_norm": 2.808793544769287,
"learning_rate": 1.3902195302273779e-05,
"loss": 2.6536,
"step": 310
},
{
"epoch": 0.09082390254451092,
"grad_norm": 1.8927634954452515,
"learning_rate": 1.246638473736378e-05,
"loss": 2.4549,
"step": 315
},
{
"epoch": 0.09226555179124919,
"grad_norm": 1.5640909671783447,
"learning_rate": 1.1098212284078036e-05,
"loss": 2.3759,
"step": 320
},
{
"epoch": 0.09370720103798746,
"grad_norm": 2.3104989528656006,
"learning_rate": 9.800143481652979e-06,
"loss": 2.48,
"step": 325
},
{
"epoch": 0.09514885028472572,
"grad_norm": 2.1639902591705322,
"learning_rate": 8.574517537807897e-06,
"loss": 2.7076,
"step": 330
},
{
"epoch": 0.096590499531464,
"grad_norm": 2.3075921535491943,
"learning_rate": 7.423543113334436e-06,
"loss": 2.8997,
"step": 335
},
{
"epoch": 0.09803214877820227,
"grad_norm": 2.313678503036499,
"learning_rate": 6.349294341940593e-06,
"loss": 2.7808,
"step": 340
},
{
"epoch": 0.09947379802494054,
"grad_norm": 2.103289842605591,
"learning_rate": 5.353707092521582e-06,
"loss": 2.6406,
"step": 345
},
{
"epoch": 0.1009154472716788,
"grad_norm": 2.329012870788574,
"learning_rate": 4.43857548059321e-06,
"loss": 2.4586,
"step": 350
},
{
"epoch": 0.10235709651841707,
"grad_norm": 1.8864246606826782,
"learning_rate": 3.605548635174533e-06,
"loss": 2.6252,
"step": 355
},
{
"epoch": 0.10379874576515534,
"grad_norm": 2.0503783226013184,
"learning_rate": 2.85612772694579e-06,
"loss": 2.7678,
"step": 360
},
{
"epoch": 0.10524039501189361,
"grad_norm": 1.6099926233291626,
"learning_rate": 2.191663263037458e-06,
"loss": 2.3372,
"step": 365
},
{
"epoch": 0.10668204425863187,
"grad_norm": 1.768552541732788,
"learning_rate": 1.6133526533250565e-06,
"loss": 2.5533,
"step": 370
},
{
"epoch": 0.10812369350537014,
"grad_norm": 2.1435041427612305,
"learning_rate": 1.1222380526156928e-06,
"loss": 2.5535,
"step": 375
},
{
"epoch": 0.10956534275210841,
"grad_norm": 1.6894549131393433,
"learning_rate": 7.192044826145771e-07,
"loss": 2.5319,
"step": 380
},
{
"epoch": 0.11100699199884669,
"grad_norm": 1.8937331438064575,
"learning_rate": 4.049782370561583e-07,
"loss": 2.5789,
"step": 385
},
{
"epoch": 0.11244864124558494,
"grad_norm": 2.080510139465332,
"learning_rate": 1.8012557287367392e-07,
"loss": 2.4853,
"step": 390
},
{
"epoch": 0.11389029049232322,
"grad_norm": 2.1903302669525146,
"learning_rate": 4.5051689765929214e-08,
"loss": 2.4425,
"step": 395
},
{
"epoch": 0.11533193973906149,
"grad_norm": 2.601141929626465,
"learning_rate": 0.0,
"loss": 1.9008,
"step": 400
},
{
"epoch": 0.11533193973906149,
"eval_loss": 0.6221744418144226,
"eval_runtime": 232.3152,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 3.147,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.804271657517056e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}