lesso04's picture
Training in progress, step 100, checkpoint
8f889c8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01156737998843262,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001156737998843262,
"grad_norm": 1.395166277885437,
"learning_rate": 1e-05,
"loss": 1.6313,
"step": 1
},
{
"epoch": 0.0001156737998843262,
"eval_loss": 1.9891488552093506,
"eval_runtime": 738.5884,
"eval_samples_per_second": 9.857,
"eval_steps_per_second": 1.232,
"step": 1
},
{
"epoch": 0.0002313475997686524,
"grad_norm": 1.2996492385864258,
"learning_rate": 2e-05,
"loss": 1.6748,
"step": 2
},
{
"epoch": 0.0003470213996529786,
"grad_norm": 1.7481492757797241,
"learning_rate": 3e-05,
"loss": 1.7503,
"step": 3
},
{
"epoch": 0.0004626951995373048,
"grad_norm": 3.627359390258789,
"learning_rate": 4e-05,
"loss": 2.5117,
"step": 4
},
{
"epoch": 0.000578368999421631,
"grad_norm": 2.383563995361328,
"learning_rate": 5e-05,
"loss": 2.2055,
"step": 5
},
{
"epoch": 0.0006940427993059572,
"grad_norm": 1.7713170051574707,
"learning_rate": 6e-05,
"loss": 1.9405,
"step": 6
},
{
"epoch": 0.0008097165991902834,
"grad_norm": 2.0939180850982666,
"learning_rate": 7e-05,
"loss": 2.1283,
"step": 7
},
{
"epoch": 0.0009253903990746096,
"grad_norm": 0.9817687273025513,
"learning_rate": 8e-05,
"loss": 1.6677,
"step": 8
},
{
"epoch": 0.0010410641989589359,
"grad_norm": 0.5597569346427917,
"learning_rate": 9e-05,
"loss": 1.2373,
"step": 9
},
{
"epoch": 0.0010410641989589359,
"eval_loss": 1.639083743095398,
"eval_runtime": 740.2556,
"eval_samples_per_second": 9.834,
"eval_steps_per_second": 1.229,
"step": 9
},
{
"epoch": 0.001156737998843262,
"grad_norm": 0.9529999494552612,
"learning_rate": 0.0001,
"loss": 1.668,
"step": 10
},
{
"epoch": 0.0012724117987275882,
"grad_norm": 1.0801961421966553,
"learning_rate": 9.99695413509548e-05,
"loss": 1.7986,
"step": 11
},
{
"epoch": 0.0013880855986119144,
"grad_norm": 0.5361855030059814,
"learning_rate": 9.987820251299122e-05,
"loss": 1.3109,
"step": 12
},
{
"epoch": 0.0015037593984962407,
"grad_norm": 0.6420459747314453,
"learning_rate": 9.972609476841367e-05,
"loss": 1.4781,
"step": 13
},
{
"epoch": 0.0016194331983805667,
"grad_norm": 0.8224722146987915,
"learning_rate": 9.951340343707852e-05,
"loss": 1.5962,
"step": 14
},
{
"epoch": 0.001735106998264893,
"grad_norm": 0.7396243810653687,
"learning_rate": 9.924038765061042e-05,
"loss": 1.5408,
"step": 15
},
{
"epoch": 0.0018507807981492192,
"grad_norm": 1.3103138208389282,
"learning_rate": 9.890738003669029e-05,
"loss": 1.545,
"step": 16
},
{
"epoch": 0.0019664545980335453,
"grad_norm": 0.8075879812240601,
"learning_rate": 9.851478631379982e-05,
"loss": 1.4319,
"step": 17
},
{
"epoch": 0.0020821283979178718,
"grad_norm": 0.7657693028450012,
"learning_rate": 9.806308479691595e-05,
"loss": 1.4987,
"step": 18
},
{
"epoch": 0.0020821283979178718,
"eval_loss": 1.4760798215866089,
"eval_runtime": 740.2741,
"eval_samples_per_second": 9.834,
"eval_steps_per_second": 1.229,
"step": 18
},
{
"epoch": 0.002197802197802198,
"grad_norm": 0.6409154534339905,
"learning_rate": 9.755282581475769e-05,
"loss": 1.4464,
"step": 19
},
{
"epoch": 0.002313475997686524,
"grad_norm": 1.1145334243774414,
"learning_rate": 9.698463103929542e-05,
"loss": 1.6525,
"step": 20
},
{
"epoch": 0.0024291497975708503,
"grad_norm": 0.5951794385910034,
"learning_rate": 9.635919272833938e-05,
"loss": 1.4047,
"step": 21
},
{
"epoch": 0.0025448235974551764,
"grad_norm": 0.9459831118583679,
"learning_rate": 9.567727288213005e-05,
"loss": 1.8246,
"step": 22
},
{
"epoch": 0.0026604973973395024,
"grad_norm": 0.5552104115486145,
"learning_rate": 9.493970231495835e-05,
"loss": 1.3021,
"step": 23
},
{
"epoch": 0.002776171197223829,
"grad_norm": 0.6463938355445862,
"learning_rate": 9.414737964294636e-05,
"loss": 1.2261,
"step": 24
},
{
"epoch": 0.002891844997108155,
"grad_norm": 0.5082288980484009,
"learning_rate": 9.330127018922194e-05,
"loss": 1.3508,
"step": 25
},
{
"epoch": 0.0030075187969924814,
"grad_norm": 0.6465054154396057,
"learning_rate": 9.24024048078213e-05,
"loss": 1.2657,
"step": 26
},
{
"epoch": 0.0031231925968768074,
"grad_norm": 0.6023719906806946,
"learning_rate": 9.145187862775209e-05,
"loss": 1.4483,
"step": 27
},
{
"epoch": 0.0031231925968768074,
"eval_loss": 1.3892802000045776,
"eval_runtime": 740.4541,
"eval_samples_per_second": 9.832,
"eval_steps_per_second": 1.229,
"step": 27
},
{
"epoch": 0.0032388663967611335,
"grad_norm": 0.46956098079681396,
"learning_rate": 9.045084971874738e-05,
"loss": 1.2895,
"step": 28
},
{
"epoch": 0.00335454019664546,
"grad_norm": 0.5599877834320068,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3907,
"step": 29
},
{
"epoch": 0.003470213996529786,
"grad_norm": 0.6060083508491516,
"learning_rate": 8.83022221559489e-05,
"loss": 1.398,
"step": 30
},
{
"epoch": 0.003585887796414112,
"grad_norm": 0.9557406902313232,
"learning_rate": 8.715724127386972e-05,
"loss": 1.4978,
"step": 31
},
{
"epoch": 0.0037015615962984385,
"grad_norm": 0.575370728969574,
"learning_rate": 8.596699001693255e-05,
"loss": 1.4165,
"step": 32
},
{
"epoch": 0.0038172353961827645,
"grad_norm": 0.7410251498222351,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3919,
"step": 33
},
{
"epoch": 0.003932909196067091,
"grad_norm": 0.5126181244850159,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3076,
"step": 34
},
{
"epoch": 0.004048582995951417,
"grad_norm": 0.5956414937973022,
"learning_rate": 8.213938048432697e-05,
"loss": 1.3227,
"step": 35
},
{
"epoch": 0.0041642567958357435,
"grad_norm": 0.6212967038154602,
"learning_rate": 8.07830737662829e-05,
"loss": 1.2832,
"step": 36
},
{
"epoch": 0.0041642567958357435,
"eval_loss": 1.3439499139785767,
"eval_runtime": 744.255,
"eval_samples_per_second": 9.782,
"eval_steps_per_second": 1.223,
"step": 36
},
{
"epoch": 0.004279930595720069,
"grad_norm": 0.5324627757072449,
"learning_rate": 7.938926261462366e-05,
"loss": 1.318,
"step": 37
},
{
"epoch": 0.004395604395604396,
"grad_norm": 0.5703225135803223,
"learning_rate": 7.795964517353735e-05,
"loss": 1.5839,
"step": 38
},
{
"epoch": 0.004511278195488722,
"grad_norm": 0.7628889679908752,
"learning_rate": 7.649596321166024e-05,
"loss": 1.517,
"step": 39
},
{
"epoch": 0.004626951995373048,
"grad_norm": 1.1482807397842407,
"learning_rate": 7.500000000000001e-05,
"loss": 1.7088,
"step": 40
},
{
"epoch": 0.004742625795257374,
"grad_norm": 0.4733085036277771,
"learning_rate": 7.347357813929454e-05,
"loss": 1.3973,
"step": 41
},
{
"epoch": 0.004858299595141701,
"grad_norm": 0.7030467391014099,
"learning_rate": 7.191855733945387e-05,
"loss": 1.3371,
"step": 42
},
{
"epoch": 0.004973973395026026,
"grad_norm": 0.46287447214126587,
"learning_rate": 7.033683215379002e-05,
"loss": 1.2644,
"step": 43
},
{
"epoch": 0.005089647194910353,
"grad_norm": 0.482746958732605,
"learning_rate": 6.873032967079561e-05,
"loss": 1.3177,
"step": 44
},
{
"epoch": 0.005205320994794679,
"grad_norm": 0.4805569350719452,
"learning_rate": 6.710100716628344e-05,
"loss": 1.4233,
"step": 45
},
{
"epoch": 0.005205320994794679,
"eval_loss": 1.320542812347412,
"eval_runtime": 742.5308,
"eval_samples_per_second": 9.804,
"eval_steps_per_second": 1.226,
"step": 45
},
{
"epoch": 0.005320994794679005,
"grad_norm": 0.6398974657058716,
"learning_rate": 6.545084971874738e-05,
"loss": 1.3809,
"step": 46
},
{
"epoch": 0.005436668594563331,
"grad_norm": 0.7115899920463562,
"learning_rate": 6.378186779084995e-05,
"loss": 1.4922,
"step": 47
},
{
"epoch": 0.005552342394447658,
"grad_norm": 0.539852499961853,
"learning_rate": 6.209609477998338e-05,
"loss": 1.1924,
"step": 48
},
{
"epoch": 0.005668016194331984,
"grad_norm": 0.47398844361305237,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.2039,
"step": 49
},
{
"epoch": 0.00578368999421631,
"grad_norm": 0.6044884324073792,
"learning_rate": 5.868240888334653e-05,
"loss": 1.3744,
"step": 50
},
{
"epoch": 0.005899363794100636,
"grad_norm": 0.5805677175521851,
"learning_rate": 5.695865504800327e-05,
"loss": 1.5171,
"step": 51
},
{
"epoch": 0.006015037593984963,
"grad_norm": 0.7673519849777222,
"learning_rate": 5.522642316338268e-05,
"loss": 1.539,
"step": 52
},
{
"epoch": 0.006130711393869288,
"grad_norm": 0.6361537575721741,
"learning_rate": 5.348782368720626e-05,
"loss": 1.3872,
"step": 53
},
{
"epoch": 0.006246385193753615,
"grad_norm": 0.6733700037002563,
"learning_rate": 5.174497483512506e-05,
"loss": 1.3519,
"step": 54
},
{
"epoch": 0.006246385193753615,
"eval_loss": 1.3062171936035156,
"eval_runtime": 740.203,
"eval_samples_per_second": 9.835,
"eval_steps_per_second": 1.229,
"step": 54
},
{
"epoch": 0.006362058993637941,
"grad_norm": 0.7923408150672913,
"learning_rate": 5e-05,
"loss": 1.551,
"step": 55
},
{
"epoch": 0.006477732793522267,
"grad_norm": 0.5155686140060425,
"learning_rate": 4.825502516487497e-05,
"loss": 1.3645,
"step": 56
},
{
"epoch": 0.006593406593406593,
"grad_norm": 0.5228220820426941,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.1924,
"step": 57
},
{
"epoch": 0.00670908039329092,
"grad_norm": 0.532507598400116,
"learning_rate": 4.477357683661734e-05,
"loss": 1.3401,
"step": 58
},
{
"epoch": 0.0068247541931752455,
"grad_norm": 0.502334713935852,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.4,
"step": 59
},
{
"epoch": 0.006940427993059572,
"grad_norm": 0.5452474355697632,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3153,
"step": 60
},
{
"epoch": 0.007056101792943898,
"grad_norm": 0.5242115259170532,
"learning_rate": 3.960441545911204e-05,
"loss": 1.2159,
"step": 61
},
{
"epoch": 0.007171775592828224,
"grad_norm": 0.5378577709197998,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4083,
"step": 62
},
{
"epoch": 0.0072874493927125505,
"grad_norm": 0.6914247870445251,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.4672,
"step": 63
},
{
"epoch": 0.0072874493927125505,
"eval_loss": 1.2972004413604736,
"eval_runtime": 740.0228,
"eval_samples_per_second": 9.838,
"eval_steps_per_second": 1.23,
"step": 63
},
{
"epoch": 0.007403123192596877,
"grad_norm": 0.44368940591812134,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.2916,
"step": 64
},
{
"epoch": 0.007518796992481203,
"grad_norm": 0.9511928558349609,
"learning_rate": 3.289899283371657e-05,
"loss": 1.263,
"step": 65
},
{
"epoch": 0.007634470792365529,
"grad_norm": 0.5774746537208557,
"learning_rate": 3.12696703292044e-05,
"loss": 1.3052,
"step": 66
},
{
"epoch": 0.0077501445922498555,
"grad_norm": 0.46265479922294617,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.9235,
"step": 67
},
{
"epoch": 0.007865818392134181,
"grad_norm": 0.41311484575271606,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.2072,
"step": 68
},
{
"epoch": 0.007981492192018508,
"grad_norm": 0.7407188415527344,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.4064,
"step": 69
},
{
"epoch": 0.008097165991902834,
"grad_norm": 0.6462542414665222,
"learning_rate": 2.500000000000001e-05,
"loss": 1.581,
"step": 70
},
{
"epoch": 0.00821283979178716,
"grad_norm": 0.7185479998588562,
"learning_rate": 2.350403678833976e-05,
"loss": 1.3954,
"step": 71
},
{
"epoch": 0.008328513591671487,
"grad_norm": 0.5311028957366943,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.4774,
"step": 72
},
{
"epoch": 0.008328513591671487,
"eval_loss": 1.2917875051498413,
"eval_runtime": 740.0809,
"eval_samples_per_second": 9.837,
"eval_steps_per_second": 1.23,
"step": 72
},
{
"epoch": 0.008444187391555813,
"grad_norm": 0.576357364654541,
"learning_rate": 2.061073738537635e-05,
"loss": 1.3201,
"step": 73
},
{
"epoch": 0.008559861191440138,
"grad_norm": 0.4944629669189453,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.2015,
"step": 74
},
{
"epoch": 0.008675534991324466,
"grad_norm": 0.5077426433563232,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.0517,
"step": 75
},
{
"epoch": 0.008791208791208791,
"grad_norm": 0.7473317980766296,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.4491,
"step": 76
},
{
"epoch": 0.008906882591093117,
"grad_norm": 0.5357276797294617,
"learning_rate": 1.526708147705013e-05,
"loss": 1.4757,
"step": 77
},
{
"epoch": 0.009022556390977444,
"grad_norm": 0.6168134212493896,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.2008,
"step": 78
},
{
"epoch": 0.00913823019086177,
"grad_norm": 0.44112443923950195,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.2055,
"step": 79
},
{
"epoch": 0.009253903990746095,
"grad_norm": 0.8096681833267212,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.4336,
"step": 80
},
{
"epoch": 0.009369577790630423,
"grad_norm": 0.645187258720398,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.4662,
"step": 81
},
{
"epoch": 0.009369577790630423,
"eval_loss": 1.2884397506713867,
"eval_runtime": 740.1668,
"eval_samples_per_second": 9.836,
"eval_steps_per_second": 1.229,
"step": 81
},
{
"epoch": 0.009485251590514748,
"grad_norm": 0.5497397184371948,
"learning_rate": 9.549150281252633e-06,
"loss": 1.3035,
"step": 82
},
{
"epoch": 0.009600925390399074,
"grad_norm": 0.6364536285400391,
"learning_rate": 8.548121372247918e-06,
"loss": 1.3105,
"step": 83
},
{
"epoch": 0.009716599190283401,
"grad_norm": 0.5434615015983582,
"learning_rate": 7.597595192178702e-06,
"loss": 1.637,
"step": 84
},
{
"epoch": 0.009832272990167727,
"grad_norm": 0.4715433716773987,
"learning_rate": 6.698729810778065e-06,
"loss": 1.2279,
"step": 85
},
{
"epoch": 0.009947946790052052,
"grad_norm": 0.5800156593322754,
"learning_rate": 5.852620357053651e-06,
"loss": 1.3831,
"step": 86
},
{
"epoch": 0.01006362058993638,
"grad_norm": 0.6520781517028809,
"learning_rate": 5.060297685041659e-06,
"loss": 1.2524,
"step": 87
},
{
"epoch": 0.010179294389820705,
"grad_norm": 0.5186251997947693,
"learning_rate": 4.322727117869951e-06,
"loss": 1.1758,
"step": 88
},
{
"epoch": 0.010294968189705031,
"grad_norm": 0.5043338537216187,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.4261,
"step": 89
},
{
"epoch": 0.010410641989589358,
"grad_norm": 0.6483629941940308,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.2404,
"step": 90
},
{
"epoch": 0.010410641989589358,
"eval_loss": 1.2871201038360596,
"eval_runtime": 739.8525,
"eval_samples_per_second": 9.84,
"eval_steps_per_second": 1.23,
"step": 90
},
{
"epoch": 0.010526315789473684,
"grad_norm": 0.5623393654823303,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.2984,
"step": 91
},
{
"epoch": 0.01064198958935801,
"grad_norm": 0.49533864855766296,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3793,
"step": 92
},
{
"epoch": 0.010757663389242337,
"grad_norm": 0.6281312108039856,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.2025,
"step": 93
},
{
"epoch": 0.010873337189126663,
"grad_norm": 0.5476338267326355,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.338,
"step": 94
},
{
"epoch": 0.01098901098901099,
"grad_norm": 0.6564503312110901,
"learning_rate": 7.596123493895991e-07,
"loss": 1.283,
"step": 95
},
{
"epoch": 0.011104684788895315,
"grad_norm": 0.6445186138153076,
"learning_rate": 4.865965629214819e-07,
"loss": 1.4782,
"step": 96
},
{
"epoch": 0.011220358588779641,
"grad_norm": 0.6720985174179077,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.194,
"step": 97
},
{
"epoch": 0.011336032388663968,
"grad_norm": 0.6252644062042236,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.1304,
"step": 98
},
{
"epoch": 0.011451706188548294,
"grad_norm": 0.5523411631584167,
"learning_rate": 3.04586490452119e-08,
"loss": 1.3266,
"step": 99
},
{
"epoch": 0.011451706188548294,
"eval_loss": 1.286765217781067,
"eval_runtime": 739.8084,
"eval_samples_per_second": 9.84,
"eval_steps_per_second": 1.23,
"step": 99
},
{
"epoch": 0.01156737998843262,
"grad_norm": 0.42987060546875,
"learning_rate": 0.0,
"loss": 1.151,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.6109411688448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}