fats-fme's picture
Training in progress, step 76, checkpoint
5e650fb verified
raw
history blame
13.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.04962455109369899,
"eval_steps": 76,
"global_step": 76,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000652954619653934,
"grad_norm": 4.757781028747559,
"learning_rate": 4.000000000000001e-06,
"loss": 4.2047,
"step": 1
},
{
"epoch": 0.000652954619653934,
"eval_loss": NaN,
"eval_runtime": 704.7675,
"eval_samples_per_second": 3.661,
"eval_steps_per_second": 0.915,
"step": 1
},
{
"epoch": 0.001305909239307868,
"grad_norm": 4.874630928039551,
"learning_rate": 8.000000000000001e-06,
"loss": 4.1653,
"step": 2
},
{
"epoch": 0.0019588638589618022,
"grad_norm": 5.5883331298828125,
"learning_rate": 1.2e-05,
"loss": 4.1635,
"step": 3
},
{
"epoch": 0.002611818478615736,
"grad_norm": 5.3879313468933105,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.8569,
"step": 4
},
{
"epoch": 0.00326477309826967,
"grad_norm": 4.848824501037598,
"learning_rate": 2e-05,
"loss": 4.0147,
"step": 5
},
{
"epoch": 0.0039177277179236044,
"grad_norm": 5.3052978515625,
"learning_rate": 2.4e-05,
"loss": 3.7956,
"step": 6
},
{
"epoch": 0.004570682337577538,
"grad_norm": 6.782866954803467,
"learning_rate": 2.8000000000000003e-05,
"loss": 4.6771,
"step": 7
},
{
"epoch": 0.005223636957231472,
"grad_norm": 6.1247735023498535,
"learning_rate": 3.2000000000000005e-05,
"loss": 3.8201,
"step": 8
},
{
"epoch": 0.005876591576885406,
"grad_norm": 5.305412769317627,
"learning_rate": 3.6e-05,
"loss": 3.2011,
"step": 9
},
{
"epoch": 0.00652954619653934,
"grad_norm": 4.974228858947754,
"learning_rate": 4e-05,
"loss": 3.2488,
"step": 10
},
{
"epoch": 0.007182500816193275,
"grad_norm": 4.17997407913208,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.8667,
"step": 11
},
{
"epoch": 0.007835455435847209,
"grad_norm": 5.494494915008545,
"learning_rate": 4.8e-05,
"loss": 3.0199,
"step": 12
},
{
"epoch": 0.008488410055501142,
"grad_norm": 3.663966178894043,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.5652,
"step": 13
},
{
"epoch": 0.009141364675155077,
"grad_norm": 3.196805477142334,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.6467,
"step": 14
},
{
"epoch": 0.009794319294809012,
"grad_norm": 3.161735773086548,
"learning_rate": 6e-05,
"loss": 2.4477,
"step": 15
},
{
"epoch": 0.010447273914462945,
"grad_norm": 4.473456859588623,
"learning_rate": 6.400000000000001e-05,
"loss": 2.4943,
"step": 16
},
{
"epoch": 0.01110022853411688,
"grad_norm": 5.138904094696045,
"learning_rate": 6.800000000000001e-05,
"loss": 2.51,
"step": 17
},
{
"epoch": 0.011753183153770812,
"grad_norm": 5.232083320617676,
"learning_rate": 7.2e-05,
"loss": 2.3384,
"step": 18
},
{
"epoch": 0.012406137773424747,
"grad_norm": 5.67042875289917,
"learning_rate": 7.6e-05,
"loss": 2.3158,
"step": 19
},
{
"epoch": 0.01305909239307868,
"grad_norm": 6.414549350738525,
"learning_rate": 8e-05,
"loss": 2.2479,
"step": 20
},
{
"epoch": 0.013712047012732615,
"grad_norm": 6.064276218414307,
"learning_rate": 8.4e-05,
"loss": 2.689,
"step": 21
},
{
"epoch": 0.01436500163238655,
"grad_norm": 4.989445686340332,
"learning_rate": 8.800000000000001e-05,
"loss": 1.9927,
"step": 22
},
{
"epoch": 0.015017956252040483,
"grad_norm": 6.970681667327881,
"learning_rate": 9.200000000000001e-05,
"loss": 2.0652,
"step": 23
},
{
"epoch": 0.015670910871694418,
"grad_norm": 5.712936878204346,
"learning_rate": 9.6e-05,
"loss": 1.7749,
"step": 24
},
{
"epoch": 0.01632386549134835,
"grad_norm": 6.509803771972656,
"learning_rate": 0.0001,
"loss": 1.6381,
"step": 25
},
{
"epoch": 0.016976820111002284,
"grad_norm": 9.46315860748291,
"learning_rate": 0.00010400000000000001,
"loss": 2.9097,
"step": 26
},
{
"epoch": 0.01762977473065622,
"grad_norm": 7.60649299621582,
"learning_rate": 0.00010800000000000001,
"loss": 2.3185,
"step": 27
},
{
"epoch": 0.018282729350310153,
"grad_norm": 4.304985046386719,
"learning_rate": 0.00011200000000000001,
"loss": 1.8866,
"step": 28
},
{
"epoch": 0.018935683969964087,
"grad_norm": 4.42153787612915,
"learning_rate": 0.000116,
"loss": 1.9317,
"step": 29
},
{
"epoch": 0.019588638589618023,
"grad_norm": 4.993203163146973,
"learning_rate": 0.00012,
"loss": 1.963,
"step": 30
},
{
"epoch": 0.020241593209271956,
"grad_norm": 5.630634784698486,
"learning_rate": 0.000124,
"loss": 2.0134,
"step": 31
},
{
"epoch": 0.02089454782892589,
"grad_norm": 4.011257648468018,
"learning_rate": 0.00012800000000000002,
"loss": 1.5838,
"step": 32
},
{
"epoch": 0.021547502448579822,
"grad_norm": 4.430102348327637,
"learning_rate": 0.000132,
"loss": 1.567,
"step": 33
},
{
"epoch": 0.02220045706823376,
"grad_norm": 4.285562038421631,
"learning_rate": 0.00013600000000000003,
"loss": 1.5668,
"step": 34
},
{
"epoch": 0.022853411687887692,
"grad_norm": 4.76671028137207,
"learning_rate": 0.00014,
"loss": 1.4504,
"step": 35
},
{
"epoch": 0.023506366307541625,
"grad_norm": 3.6335926055908203,
"learning_rate": 0.000144,
"loss": 1.7653,
"step": 36
},
{
"epoch": 0.02415932092719556,
"grad_norm": 3.863640546798706,
"learning_rate": 0.000148,
"loss": 1.7619,
"step": 37
},
{
"epoch": 0.024812275546849494,
"grad_norm": 3.339837074279785,
"learning_rate": 0.000152,
"loss": 1.9767,
"step": 38
},
{
"epoch": 0.025465230166503428,
"grad_norm": 3.004544734954834,
"learning_rate": 0.00015600000000000002,
"loss": 1.954,
"step": 39
},
{
"epoch": 0.02611818478615736,
"grad_norm": 3.1160991191864014,
"learning_rate": 0.00016,
"loss": 2.2944,
"step": 40
},
{
"epoch": 0.026771139405811297,
"grad_norm": 2.9257686138153076,
"learning_rate": 0.000164,
"loss": 2.1756,
"step": 41
},
{
"epoch": 0.02742409402546523,
"grad_norm": 3.0116686820983887,
"learning_rate": 0.000168,
"loss": 2.2031,
"step": 42
},
{
"epoch": 0.028077048645119163,
"grad_norm": 3.1789779663085938,
"learning_rate": 0.000172,
"loss": 1.71,
"step": 43
},
{
"epoch": 0.0287300032647731,
"grad_norm": 3.2652573585510254,
"learning_rate": 0.00017600000000000002,
"loss": 2.108,
"step": 44
},
{
"epoch": 0.029382957884427033,
"grad_norm": 3.3591766357421875,
"learning_rate": 0.00018,
"loss": 2.252,
"step": 45
},
{
"epoch": 0.030035912504080966,
"grad_norm": 4.020256519317627,
"learning_rate": 0.00018400000000000003,
"loss": 2.0209,
"step": 46
},
{
"epoch": 0.0306888671237349,
"grad_norm": 3.1844100952148438,
"learning_rate": 0.000188,
"loss": 1.7205,
"step": 47
},
{
"epoch": 0.031341821743388835,
"grad_norm": 4.041150093078613,
"learning_rate": 0.000192,
"loss": 2.0322,
"step": 48
},
{
"epoch": 0.031994776363042765,
"grad_norm": 3.976959228515625,
"learning_rate": 0.000196,
"loss": 1.5729,
"step": 49
},
{
"epoch": 0.0326477309826967,
"grad_norm": 3.8643813133239746,
"learning_rate": 0.0002,
"loss": 1.3921,
"step": 50
},
{
"epoch": 0.03330068560235064,
"grad_norm": 4.523630142211914,
"learning_rate": 0.0001999922905547776,
"loss": 2.0141,
"step": 51
},
{
"epoch": 0.03395364022200457,
"grad_norm": 3.308136463165283,
"learning_rate": 0.0001999691634078213,
"loss": 1.6375,
"step": 52
},
{
"epoch": 0.034606594841658504,
"grad_norm": 3.70528244972229,
"learning_rate": 0.00019993062212508053,
"loss": 2.1218,
"step": 53
},
{
"epoch": 0.03525954946131244,
"grad_norm": 3.909076690673828,
"learning_rate": 0.0001998766726491935,
"loss": 1.7935,
"step": 54
},
{
"epoch": 0.03591250408096637,
"grad_norm": 3.2719497680664062,
"learning_rate": 0.00019980732329857076,
"loss": 1.4998,
"step": 55
},
{
"epoch": 0.03656545870062031,
"grad_norm": 4.357359886169434,
"learning_rate": 0.0001997225847661127,
"loss": 1.7119,
"step": 56
},
{
"epoch": 0.03721841332027424,
"grad_norm": 3.111414909362793,
"learning_rate": 0.00019962247011756081,
"loss": 1.3332,
"step": 57
},
{
"epoch": 0.03787136793992817,
"grad_norm": 3.3698601722717285,
"learning_rate": 0.00019950699478948309,
"loss": 1.3576,
"step": 58
},
{
"epoch": 0.03852432255958211,
"grad_norm": 4.0696024894714355,
"learning_rate": 0.00019937617658689384,
"loss": 1.2196,
"step": 59
},
{
"epoch": 0.039177277179236046,
"grad_norm": 3.984119176864624,
"learning_rate": 0.00019923003568050844,
"loss": 1.4652,
"step": 60
},
{
"epoch": 0.039830231798889976,
"grad_norm": 3.909024238586426,
"learning_rate": 0.00019906859460363307,
"loss": 1.2951,
"step": 61
},
{
"epoch": 0.04048318641854391,
"grad_norm": 3.550440788269043,
"learning_rate": 0.0001988918782486906,
"loss": 1.4699,
"step": 62
},
{
"epoch": 0.04113614103819784,
"grad_norm": 3.341071367263794,
"learning_rate": 0.0001986999138633821,
"loss": 2.1781,
"step": 63
},
{
"epoch": 0.04178909565785178,
"grad_norm": 2.938025951385498,
"learning_rate": 0.00019849273104648592,
"loss": 1.7088,
"step": 64
},
{
"epoch": 0.042442050277505715,
"grad_norm": 2.956080913543701,
"learning_rate": 0.00019827036174329353,
"loss": 2.0676,
"step": 65
},
{
"epoch": 0.043095004897159644,
"grad_norm": 2.349592924118042,
"learning_rate": 0.00019803284024068427,
"loss": 1.7783,
"step": 66
},
{
"epoch": 0.04374795951681358,
"grad_norm": 2.7854583263397217,
"learning_rate": 0.0001977802031618383,
"loss": 2.222,
"step": 67
},
{
"epoch": 0.04440091413646752,
"grad_norm": 3.6111695766448975,
"learning_rate": 0.00019751248946059014,
"loss": 2.0701,
"step": 68
},
{
"epoch": 0.04505386875612145,
"grad_norm": 3.226724863052368,
"learning_rate": 0.00019722974041542203,
"loss": 2.2502,
"step": 69
},
{
"epoch": 0.045706823375775384,
"grad_norm": 3.4755945205688477,
"learning_rate": 0.0001969319996230995,
"loss": 2.0837,
"step": 70
},
{
"epoch": 0.04635977799542932,
"grad_norm": 3.1673951148986816,
"learning_rate": 0.0001966193129919491,
"loss": 1.8692,
"step": 71
},
{
"epoch": 0.04701273261508325,
"grad_norm": 3.5966238975524902,
"learning_rate": 0.00019629172873477995,
"loss": 2.0036,
"step": 72
},
{
"epoch": 0.047665687234737186,
"grad_norm": 4.344339370727539,
"learning_rate": 0.00019594929736144976,
"loss": 1.963,
"step": 73
},
{
"epoch": 0.04831864185439112,
"grad_norm": 3.6855549812316895,
"learning_rate": 0.00019559207167107684,
"loss": 1.5932,
"step": 74
},
{
"epoch": 0.04897159647404505,
"grad_norm": 3.7931909561157227,
"learning_rate": 0.000195220106743899,
"loss": 1.272,
"step": 75
},
{
"epoch": 0.04962455109369899,
"grad_norm": 3.422001361846924,
"learning_rate": 0.00019483345993278093,
"loss": 1.8281,
"step": 76
},
{
"epoch": 0.04962455109369899,
"eval_loss": NaN,
"eval_runtime": 704.1079,
"eval_samples_per_second": 3.664,
"eval_steps_per_second": 0.916,
"step": 76
}
],
"logging_steps": 1,
"max_steps": 303,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 76,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.331358757084201e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}