lesso07's picture
Training in progress, step 75, checkpoint
99699f0 verified
raw
history blame
15.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1111934766493699,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0014825796886582653,
"grad_norm": 2.8474559783935547,
"learning_rate": 1e-05,
"loss": 5.225,
"step": 1
},
{
"epoch": 0.0014825796886582653,
"eval_loss": 2.508056402206421,
"eval_runtime": 69.5324,
"eval_samples_per_second": 8.169,
"eval_steps_per_second": 1.021,
"step": 1
},
{
"epoch": 0.0029651593773165306,
"grad_norm": 2.3535349369049072,
"learning_rate": 2e-05,
"loss": 4.6645,
"step": 2
},
{
"epoch": 0.004447739065974796,
"grad_norm": 2.6361703872680664,
"learning_rate": 3e-05,
"loss": 4.8725,
"step": 3
},
{
"epoch": 0.005930318754633061,
"grad_norm": 4.400317192077637,
"learning_rate": 4e-05,
"loss": 4.7542,
"step": 4
},
{
"epoch": 0.007412898443291327,
"grad_norm": 2.6915769577026367,
"learning_rate": 5e-05,
"loss": 5.1234,
"step": 5
},
{
"epoch": 0.008895478131949592,
"grad_norm": 3.3075225353240967,
"learning_rate": 6e-05,
"loss": 4.5994,
"step": 6
},
{
"epoch": 0.010378057820607857,
"grad_norm": 3.946103572845459,
"learning_rate": 7e-05,
"loss": 4.6332,
"step": 7
},
{
"epoch": 0.011860637509266123,
"grad_norm": 4.162266731262207,
"learning_rate": 8e-05,
"loss": 4.7407,
"step": 8
},
{
"epoch": 0.013343217197924388,
"grad_norm": 5.167053699493408,
"learning_rate": 9e-05,
"loss": 4.3724,
"step": 9
},
{
"epoch": 0.013343217197924388,
"eval_loss": 1.8550039529800415,
"eval_runtime": 69.5376,
"eval_samples_per_second": 8.168,
"eval_steps_per_second": 1.021,
"step": 9
},
{
"epoch": 0.014825796886582653,
"grad_norm": 4.388719081878662,
"learning_rate": 0.0001,
"loss": 3.382,
"step": 10
},
{
"epoch": 0.01630837657524092,
"grad_norm": 5.396074295043945,
"learning_rate": 9.99695413509548e-05,
"loss": 3.0693,
"step": 11
},
{
"epoch": 0.017790956263899184,
"grad_norm": 5.097604751586914,
"learning_rate": 9.987820251299122e-05,
"loss": 2.345,
"step": 12
},
{
"epoch": 0.01927353595255745,
"grad_norm": 5.280440807342529,
"learning_rate": 9.972609476841367e-05,
"loss": 2.0353,
"step": 13
},
{
"epoch": 0.020756115641215715,
"grad_norm": 3.470097780227661,
"learning_rate": 9.951340343707852e-05,
"loss": 1.1178,
"step": 14
},
{
"epoch": 0.02223869532987398,
"grad_norm": 4.099720478057861,
"learning_rate": 9.924038765061042e-05,
"loss": 1.1429,
"step": 15
},
{
"epoch": 0.023721275018532245,
"grad_norm": 3.006298065185547,
"learning_rate": 9.890738003669029e-05,
"loss": 1.0927,
"step": 16
},
{
"epoch": 0.025203854707190512,
"grad_norm": 1.9103775024414062,
"learning_rate": 9.851478631379982e-05,
"loss": 0.6789,
"step": 17
},
{
"epoch": 0.026686434395848776,
"grad_norm": 3.037099599838257,
"learning_rate": 9.806308479691595e-05,
"loss": 0.3045,
"step": 18
},
{
"epoch": 0.026686434395848776,
"eval_loss": 0.32767146825790405,
"eval_runtime": 69.5401,
"eval_samples_per_second": 8.168,
"eval_steps_per_second": 1.021,
"step": 18
},
{
"epoch": 0.028169014084507043,
"grad_norm": 4.88580322265625,
"learning_rate": 9.755282581475769e-05,
"loss": 0.8711,
"step": 19
},
{
"epoch": 0.029651593773165306,
"grad_norm": 14.325489044189453,
"learning_rate": 9.698463103929542e-05,
"loss": 0.6723,
"step": 20
},
{
"epoch": 0.031134173461823574,
"grad_norm": 1.8659063577651978,
"learning_rate": 9.635919272833938e-05,
"loss": 0.3444,
"step": 21
},
{
"epoch": 0.03261675315048184,
"grad_norm": 1.1169908046722412,
"learning_rate": 9.567727288213005e-05,
"loss": 0.1008,
"step": 22
},
{
"epoch": 0.0340993328391401,
"grad_norm": 3.31902813911438,
"learning_rate": 9.493970231495835e-05,
"loss": 0.5448,
"step": 23
},
{
"epoch": 0.03558191252779837,
"grad_norm": 2.234715700149536,
"learning_rate": 9.414737964294636e-05,
"loss": 0.2588,
"step": 24
},
{
"epoch": 0.037064492216456635,
"grad_norm": 1.7870392799377441,
"learning_rate": 9.330127018922194e-05,
"loss": 0.219,
"step": 25
},
{
"epoch": 0.0385470719051149,
"grad_norm": 2.5575437545776367,
"learning_rate": 9.24024048078213e-05,
"loss": 0.5638,
"step": 26
},
{
"epoch": 0.04002965159377316,
"grad_norm": 3.1489202976226807,
"learning_rate": 9.145187862775209e-05,
"loss": 0.1793,
"step": 27
},
{
"epoch": 0.04002965159377316,
"eval_loss": 0.14556753635406494,
"eval_runtime": 69.5326,
"eval_samples_per_second": 8.169,
"eval_steps_per_second": 1.021,
"step": 27
},
{
"epoch": 0.04151223128243143,
"grad_norm": 2.429274559020996,
"learning_rate": 9.045084971874738e-05,
"loss": 0.4793,
"step": 28
},
{
"epoch": 0.042994810971089696,
"grad_norm": 1.9509632587432861,
"learning_rate": 8.940053768033609e-05,
"loss": 0.4278,
"step": 29
},
{
"epoch": 0.04447739065974796,
"grad_norm": 1.8806370496749878,
"learning_rate": 8.83022221559489e-05,
"loss": 0.3179,
"step": 30
},
{
"epoch": 0.04595997034840623,
"grad_norm": 1.5672978162765503,
"learning_rate": 8.715724127386972e-05,
"loss": 0.2368,
"step": 31
},
{
"epoch": 0.04744255003706449,
"grad_norm": 1.8994029760360718,
"learning_rate": 8.596699001693255e-05,
"loss": 0.3258,
"step": 32
},
{
"epoch": 0.04892512972572276,
"grad_norm": 0.3584083914756775,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0261,
"step": 33
},
{
"epoch": 0.050407709414381024,
"grad_norm": 20.641138076782227,
"learning_rate": 8.345653031794292e-05,
"loss": 0.2844,
"step": 34
},
{
"epoch": 0.05189028910303929,
"grad_norm": 1.0512652397155762,
"learning_rate": 8.213938048432697e-05,
"loss": 0.078,
"step": 35
},
{
"epoch": 0.05337286879169755,
"grad_norm": 1.5341689586639404,
"learning_rate": 8.07830737662829e-05,
"loss": 0.1862,
"step": 36
},
{
"epoch": 0.05337286879169755,
"eval_loss": 0.10944854468107224,
"eval_runtime": 69.5263,
"eval_samples_per_second": 8.17,
"eval_steps_per_second": 1.021,
"step": 36
},
{
"epoch": 0.05485544848035582,
"grad_norm": 1.0907577276229858,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0845,
"step": 37
},
{
"epoch": 0.056338028169014086,
"grad_norm": 3.018357515335083,
"learning_rate": 7.795964517353735e-05,
"loss": 0.2438,
"step": 38
},
{
"epoch": 0.05782060785767235,
"grad_norm": 2.455247640609741,
"learning_rate": 7.649596321166024e-05,
"loss": 0.2814,
"step": 39
},
{
"epoch": 0.05930318754633061,
"grad_norm": 1.3131052255630493,
"learning_rate": 7.500000000000001e-05,
"loss": 0.15,
"step": 40
},
{
"epoch": 0.06078576723498888,
"grad_norm": 3.73819899559021,
"learning_rate": 7.347357813929454e-05,
"loss": 0.5374,
"step": 41
},
{
"epoch": 0.06226834692364715,
"grad_norm": 2.3663792610168457,
"learning_rate": 7.191855733945387e-05,
"loss": 0.2676,
"step": 42
},
{
"epoch": 0.06375092661230541,
"grad_norm": 1.835417628288269,
"learning_rate": 7.033683215379002e-05,
"loss": 0.1357,
"step": 43
},
{
"epoch": 0.06523350630096368,
"grad_norm": 2.896899461746216,
"learning_rate": 6.873032967079561e-05,
"loss": 0.1411,
"step": 44
},
{
"epoch": 0.06671608598962195,
"grad_norm": 1.5244169235229492,
"learning_rate": 6.710100716628344e-05,
"loss": 0.2247,
"step": 45
},
{
"epoch": 0.06671608598962195,
"eval_loss": 0.08920716494321823,
"eval_runtime": 69.558,
"eval_samples_per_second": 8.166,
"eval_steps_per_second": 1.021,
"step": 45
},
{
"epoch": 0.0681986656782802,
"grad_norm": 1.3107813596725464,
"learning_rate": 6.545084971874738e-05,
"loss": 0.3675,
"step": 46
},
{
"epoch": 0.06968124536693847,
"grad_norm": 1.982515573501587,
"learning_rate": 6.378186779084995e-05,
"loss": 0.2806,
"step": 47
},
{
"epoch": 0.07116382505559674,
"grad_norm": 0.5991136431694031,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0344,
"step": 48
},
{
"epoch": 0.072646404744255,
"grad_norm": 0.9610239267349243,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.183,
"step": 49
},
{
"epoch": 0.07412898443291327,
"grad_norm": 6.47637939453125,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0722,
"step": 50
},
{
"epoch": 0.07561156412157154,
"grad_norm": 1.6755069494247437,
"learning_rate": 5.695865504800327e-05,
"loss": 0.1886,
"step": 51
},
{
"epoch": 0.0770941438102298,
"grad_norm": 1.4019275903701782,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2802,
"step": 52
},
{
"epoch": 0.07857672349888807,
"grad_norm": 1.2601383924484253,
"learning_rate": 5.348782368720626e-05,
"loss": 0.1966,
"step": 53
},
{
"epoch": 0.08005930318754632,
"grad_norm": 2.310203790664673,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0976,
"step": 54
},
{
"epoch": 0.08005930318754632,
"eval_loss": 0.0861150324344635,
"eval_runtime": 69.5538,
"eval_samples_per_second": 8.166,
"eval_steps_per_second": 1.021,
"step": 54
},
{
"epoch": 0.08154188287620459,
"grad_norm": 1.8496018648147583,
"learning_rate": 5e-05,
"loss": 0.1013,
"step": 55
},
{
"epoch": 0.08302446256486286,
"grad_norm": 2.3585057258605957,
"learning_rate": 4.825502516487497e-05,
"loss": 0.4208,
"step": 56
},
{
"epoch": 0.08450704225352113,
"grad_norm": 1.9495047330856323,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.1957,
"step": 57
},
{
"epoch": 0.08598962194217939,
"grad_norm": 0.7716205716133118,
"learning_rate": 4.477357683661734e-05,
"loss": 0.2906,
"step": 58
},
{
"epoch": 0.08747220163083766,
"grad_norm": 0.6261329054832458,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0633,
"step": 59
},
{
"epoch": 0.08895478131949593,
"grad_norm": 0.622874915599823,
"learning_rate": 4.131759111665349e-05,
"loss": 0.1441,
"step": 60
},
{
"epoch": 0.0904373610081542,
"grad_norm": 0.31516921520233154,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0166,
"step": 61
},
{
"epoch": 0.09191994069681246,
"grad_norm": 1.9802504777908325,
"learning_rate": 3.790390522001662e-05,
"loss": 0.2408,
"step": 62
},
{
"epoch": 0.09340252038547071,
"grad_norm": 0.5901920795440674,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0406,
"step": 63
},
{
"epoch": 0.09340252038547071,
"eval_loss": 0.07249511033296585,
"eval_runtime": 69.5376,
"eval_samples_per_second": 8.168,
"eval_steps_per_second": 1.021,
"step": 63
},
{
"epoch": 0.09488510007412898,
"grad_norm": 0.49623680114746094,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0985,
"step": 64
},
{
"epoch": 0.09636767976278725,
"grad_norm": 1.2704252004623413,
"learning_rate": 3.289899283371657e-05,
"loss": 0.2287,
"step": 65
},
{
"epoch": 0.09785025945144551,
"grad_norm": 0.3415655195713043,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0341,
"step": 66
},
{
"epoch": 0.09933283914010378,
"grad_norm": 1.9959161281585693,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.2905,
"step": 67
},
{
"epoch": 0.10081541882876205,
"grad_norm": 1.1924718618392944,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2025,
"step": 68
},
{
"epoch": 0.10229799851742032,
"grad_norm": 0.9648506045341492,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.224,
"step": 69
},
{
"epoch": 0.10378057820607858,
"grad_norm": 0.41112014651298523,
"learning_rate": 2.500000000000001e-05,
"loss": 0.1015,
"step": 70
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.44839367270469666,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0422,
"step": 71
},
{
"epoch": 0.1067457375833951,
"grad_norm": 0.6787695288658142,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0655,
"step": 72
},
{
"epoch": 0.1067457375833951,
"eval_loss": 0.06948262453079224,
"eval_runtime": 69.544,
"eval_samples_per_second": 8.167,
"eval_steps_per_second": 1.021,
"step": 72
},
{
"epoch": 0.10822831727205337,
"grad_norm": 0.8753631114959717,
"learning_rate": 2.061073738537635e-05,
"loss": 0.1026,
"step": 73
},
{
"epoch": 0.10971089696071164,
"grad_norm": 0.4623323082923889,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.1358,
"step": 74
},
{
"epoch": 0.1111934766493699,
"grad_norm": 2.5765066146850586,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.4581,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.90098483265536e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}