lesso08's picture
Training in progress, step 100, checkpoint
b7ac4ed verified
raw
history blame
20.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1029336078229542,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001029336078229542,
"grad_norm": 0.23270708322525024,
"learning_rate": 1e-05,
"loss": 4.8778,
"step": 1
},
{
"epoch": 0.001029336078229542,
"eval_loss": 2.449375629425049,
"eval_runtime": 226.7141,
"eval_samples_per_second": 3.608,
"eval_steps_per_second": 0.454,
"step": 1
},
{
"epoch": 0.002058672156459084,
"grad_norm": 0.2403886765241623,
"learning_rate": 2e-05,
"loss": 4.6643,
"step": 2
},
{
"epoch": 0.003088008234688626,
"grad_norm": 0.21336641907691956,
"learning_rate": 3e-05,
"loss": 4.8765,
"step": 3
},
{
"epoch": 0.004117344312918168,
"grad_norm": 0.24259218573570251,
"learning_rate": 4e-05,
"loss": 5.0207,
"step": 4
},
{
"epoch": 0.0051466803911477095,
"grad_norm": 0.2876662611961365,
"learning_rate": 5e-05,
"loss": 5.0594,
"step": 5
},
{
"epoch": 0.006176016469377252,
"grad_norm": 0.3250068724155426,
"learning_rate": 6e-05,
"loss": 4.8713,
"step": 6
},
{
"epoch": 0.007205352547606794,
"grad_norm": 0.2040358930826187,
"learning_rate": 7e-05,
"loss": 4.711,
"step": 7
},
{
"epoch": 0.008234688625836336,
"grad_norm": 0.34615465998649597,
"learning_rate": 8e-05,
"loss": 4.8104,
"step": 8
},
{
"epoch": 0.009264024704065878,
"grad_norm": 0.3927066922187805,
"learning_rate": 9e-05,
"loss": 5.0474,
"step": 9
},
{
"epoch": 0.009264024704065878,
"eval_loss": 2.4394149780273438,
"eval_runtime": 226.8989,
"eval_samples_per_second": 3.605,
"eval_steps_per_second": 0.454,
"step": 9
},
{
"epoch": 0.010293360782295419,
"grad_norm": 0.2759287655353546,
"learning_rate": 0.0001,
"loss": 4.8251,
"step": 10
},
{
"epoch": 0.011322696860524962,
"grad_norm": 0.35022222995758057,
"learning_rate": 9.99695413509548e-05,
"loss": 5.2115,
"step": 11
},
{
"epoch": 0.012352032938754504,
"grad_norm": 0.2925807535648346,
"learning_rate": 9.987820251299122e-05,
"loss": 4.8341,
"step": 12
},
{
"epoch": 0.013381369016984045,
"grad_norm": 0.27655553817749023,
"learning_rate": 9.972609476841367e-05,
"loss": 4.5449,
"step": 13
},
{
"epoch": 0.014410705095213588,
"grad_norm": 0.3062187135219574,
"learning_rate": 9.951340343707852e-05,
"loss": 4.8675,
"step": 14
},
{
"epoch": 0.01544004117344313,
"grad_norm": 0.3353738486766815,
"learning_rate": 9.924038765061042e-05,
"loss": 4.7423,
"step": 15
},
{
"epoch": 0.016469377251672673,
"grad_norm": 0.3210652470588684,
"learning_rate": 9.890738003669029e-05,
"loss": 5.0688,
"step": 16
},
{
"epoch": 0.017498713329902212,
"grad_norm": 0.36584898829460144,
"learning_rate": 9.851478631379982e-05,
"loss": 4.8991,
"step": 17
},
{
"epoch": 0.018528049408131755,
"grad_norm": 0.31348302960395813,
"learning_rate": 9.806308479691595e-05,
"loss": 4.7511,
"step": 18
},
{
"epoch": 0.018528049408131755,
"eval_loss": 2.4040560722351074,
"eval_runtime": 226.9454,
"eval_samples_per_second": 3.604,
"eval_steps_per_second": 0.454,
"step": 18
},
{
"epoch": 0.0195573854863613,
"grad_norm": 0.31781822443008423,
"learning_rate": 9.755282581475769e-05,
"loss": 4.818,
"step": 19
},
{
"epoch": 0.020586721564590838,
"grad_norm": 0.27385666966438293,
"learning_rate": 9.698463103929542e-05,
"loss": 4.8567,
"step": 20
},
{
"epoch": 0.02161605764282038,
"grad_norm": 0.3134090304374695,
"learning_rate": 9.635919272833938e-05,
"loss": 4.8306,
"step": 21
},
{
"epoch": 0.022645393721049924,
"grad_norm": 0.32903534173965454,
"learning_rate": 9.567727288213005e-05,
"loss": 4.6411,
"step": 22
},
{
"epoch": 0.023674729799279464,
"grad_norm": 0.5250512361526489,
"learning_rate": 9.493970231495835e-05,
"loss": 4.9593,
"step": 23
},
{
"epoch": 0.024704065877509007,
"grad_norm": 0.31589871644973755,
"learning_rate": 9.414737964294636e-05,
"loss": 4.7378,
"step": 24
},
{
"epoch": 0.02573340195573855,
"grad_norm": 0.2427978217601776,
"learning_rate": 9.330127018922194e-05,
"loss": 4.471,
"step": 25
},
{
"epoch": 0.02676273803396809,
"grad_norm": 0.3135339617729187,
"learning_rate": 9.24024048078213e-05,
"loss": 4.7955,
"step": 26
},
{
"epoch": 0.027792074112197633,
"grad_norm": 0.3900616466999054,
"learning_rate": 9.145187862775209e-05,
"loss": 5.0081,
"step": 27
},
{
"epoch": 0.027792074112197633,
"eval_loss": 2.386911153793335,
"eval_runtime": 227.018,
"eval_samples_per_second": 3.603,
"eval_steps_per_second": 0.454,
"step": 27
},
{
"epoch": 0.028821410190427176,
"grad_norm": 0.26265034079551697,
"learning_rate": 9.045084971874738e-05,
"loss": 4.799,
"step": 28
},
{
"epoch": 0.029850746268656716,
"grad_norm": 0.30579325556755066,
"learning_rate": 8.940053768033609e-05,
"loss": 4.5961,
"step": 29
},
{
"epoch": 0.03088008234688626,
"grad_norm": 0.42424359917640686,
"learning_rate": 8.83022221559489e-05,
"loss": 4.8045,
"step": 30
},
{
"epoch": 0.0319094184251158,
"grad_norm": 0.3576556444168091,
"learning_rate": 8.715724127386972e-05,
"loss": 4.8802,
"step": 31
},
{
"epoch": 0.032938754503345345,
"grad_norm": 0.3096839189529419,
"learning_rate": 8.596699001693255e-05,
"loss": 4.7263,
"step": 32
},
{
"epoch": 0.033968090581574885,
"grad_norm": 0.425644189119339,
"learning_rate": 8.473291852294987e-05,
"loss": 4.6257,
"step": 33
},
{
"epoch": 0.034997426659804425,
"grad_norm": 0.3519304394721985,
"learning_rate": 8.345653031794292e-05,
"loss": 4.7576,
"step": 34
},
{
"epoch": 0.03602676273803397,
"grad_norm": 0.38826555013656616,
"learning_rate": 8.213938048432697e-05,
"loss": 4.7818,
"step": 35
},
{
"epoch": 0.03705609881626351,
"grad_norm": 0.33783453702926636,
"learning_rate": 8.07830737662829e-05,
"loss": 4.7301,
"step": 36
},
{
"epoch": 0.03705609881626351,
"eval_loss": 2.3819499015808105,
"eval_runtime": 226.8579,
"eval_samples_per_second": 3.606,
"eval_steps_per_second": 0.454,
"step": 36
},
{
"epoch": 0.03808543489449305,
"grad_norm": 0.30728089809417725,
"learning_rate": 7.938926261462366e-05,
"loss": 4.6269,
"step": 37
},
{
"epoch": 0.0391147709727226,
"grad_norm": 0.26561239361763,
"learning_rate": 7.795964517353735e-05,
"loss": 4.8023,
"step": 38
},
{
"epoch": 0.04014410705095214,
"grad_norm": 0.333359956741333,
"learning_rate": 7.649596321166024e-05,
"loss": 4.6354,
"step": 39
},
{
"epoch": 0.041173443129181676,
"grad_norm": 0.280263215303421,
"learning_rate": 7.500000000000001e-05,
"loss": 4.9582,
"step": 40
},
{
"epoch": 0.04220277920741122,
"grad_norm": 0.3637993633747101,
"learning_rate": 7.347357813929454e-05,
"loss": 5.0495,
"step": 41
},
{
"epoch": 0.04323211528564076,
"grad_norm": 0.3035355806350708,
"learning_rate": 7.191855733945387e-05,
"loss": 4.8531,
"step": 42
},
{
"epoch": 0.0442614513638703,
"grad_norm": 0.2905413806438446,
"learning_rate": 7.033683215379002e-05,
"loss": 4.8934,
"step": 43
},
{
"epoch": 0.04529078744209985,
"grad_norm": 0.2680681049823761,
"learning_rate": 6.873032967079561e-05,
"loss": 4.8269,
"step": 44
},
{
"epoch": 0.04632012352032939,
"grad_norm": 0.2697928547859192,
"learning_rate": 6.710100716628344e-05,
"loss": 4.5012,
"step": 45
},
{
"epoch": 0.04632012352032939,
"eval_loss": 2.379274845123291,
"eval_runtime": 226.8561,
"eval_samples_per_second": 3.606,
"eval_steps_per_second": 0.454,
"step": 45
},
{
"epoch": 0.04734945959855893,
"grad_norm": 0.34642162919044495,
"learning_rate": 6.545084971874738e-05,
"loss": 4.8511,
"step": 46
},
{
"epoch": 0.048378795676788475,
"grad_norm": 0.3296074867248535,
"learning_rate": 6.378186779084995e-05,
"loss": 4.8204,
"step": 47
},
{
"epoch": 0.049408131755018014,
"grad_norm": 0.27953025698661804,
"learning_rate": 6.209609477998338e-05,
"loss": 5.0973,
"step": 48
},
{
"epoch": 0.050437467833247554,
"grad_norm": 0.3788706660270691,
"learning_rate": 6.0395584540887963e-05,
"loss": 5.0161,
"step": 49
},
{
"epoch": 0.0514668039114771,
"grad_norm": 0.32087740302085876,
"learning_rate": 5.868240888334653e-05,
"loss": 4.8925,
"step": 50
},
{
"epoch": 0.05249613998970664,
"grad_norm": 0.2698979675769806,
"learning_rate": 5.695865504800327e-05,
"loss": 4.6946,
"step": 51
},
{
"epoch": 0.05352547606793618,
"grad_norm": 0.27260327339172363,
"learning_rate": 5.522642316338268e-05,
"loss": 4.601,
"step": 52
},
{
"epoch": 0.054554812146165726,
"grad_norm": 0.3249811828136444,
"learning_rate": 5.348782368720626e-05,
"loss": 4.6817,
"step": 53
},
{
"epoch": 0.055584148224395266,
"grad_norm": 0.4115317463874817,
"learning_rate": 5.174497483512506e-05,
"loss": 4.8402,
"step": 54
},
{
"epoch": 0.055584148224395266,
"eval_loss": 2.3773033618927,
"eval_runtime": 227.1671,
"eval_samples_per_second": 3.601,
"eval_steps_per_second": 0.453,
"step": 54
},
{
"epoch": 0.056613484302624806,
"grad_norm": 0.31797289848327637,
"learning_rate": 5e-05,
"loss": 4.866,
"step": 55
},
{
"epoch": 0.05764282038085435,
"grad_norm": 0.32362496852874756,
"learning_rate": 4.825502516487497e-05,
"loss": 4.5958,
"step": 56
},
{
"epoch": 0.05867215645908389,
"grad_norm": 0.4479215741157532,
"learning_rate": 4.6512176312793736e-05,
"loss": 4.8453,
"step": 57
},
{
"epoch": 0.05970149253731343,
"grad_norm": 0.25469648838043213,
"learning_rate": 4.477357683661734e-05,
"loss": 4.6875,
"step": 58
},
{
"epoch": 0.06073082861554297,
"grad_norm": 0.2602284550666809,
"learning_rate": 4.3041344951996746e-05,
"loss": 4.6748,
"step": 59
},
{
"epoch": 0.06176016469377252,
"grad_norm": 0.309254914522171,
"learning_rate": 4.131759111665349e-05,
"loss": 4.8994,
"step": 60
},
{
"epoch": 0.06278950077200206,
"grad_norm": 0.352872759103775,
"learning_rate": 3.960441545911204e-05,
"loss": 4.8684,
"step": 61
},
{
"epoch": 0.0638188368502316,
"grad_norm": 0.33318430185317993,
"learning_rate": 3.790390522001662e-05,
"loss": 4.6973,
"step": 62
},
{
"epoch": 0.06484817292846114,
"grad_norm": 0.4939478039741516,
"learning_rate": 3.6218132209150045e-05,
"loss": 4.7247,
"step": 63
},
{
"epoch": 0.06484817292846114,
"eval_loss": 2.3757121562957764,
"eval_runtime": 226.8372,
"eval_samples_per_second": 3.606,
"eval_steps_per_second": 0.454,
"step": 63
},
{
"epoch": 0.06587750900669069,
"grad_norm": 0.27650734782218933,
"learning_rate": 3.4549150281252636e-05,
"loss": 4.8668,
"step": 64
},
{
"epoch": 0.06690684508492023,
"grad_norm": 0.2447676956653595,
"learning_rate": 3.289899283371657e-05,
"loss": 4.6292,
"step": 65
},
{
"epoch": 0.06793618116314977,
"grad_norm": 0.40559127926826477,
"learning_rate": 3.12696703292044e-05,
"loss": 4.6037,
"step": 66
},
{
"epoch": 0.06896551724137931,
"grad_norm": 0.32476454973220825,
"learning_rate": 2.9663167846209998e-05,
"loss": 4.8826,
"step": 67
},
{
"epoch": 0.06999485331960885,
"grad_norm": 0.25857582688331604,
"learning_rate": 2.8081442660546125e-05,
"loss": 4.8219,
"step": 68
},
{
"epoch": 0.07102418939783839,
"grad_norm": 0.3146672546863556,
"learning_rate": 2.6526421860705473e-05,
"loss": 4.9364,
"step": 69
},
{
"epoch": 0.07205352547606794,
"grad_norm": 0.3049579858779907,
"learning_rate": 2.500000000000001e-05,
"loss": 4.7155,
"step": 70
},
{
"epoch": 0.07308286155429748,
"grad_norm": 0.27620747685432434,
"learning_rate": 2.350403678833976e-05,
"loss": 4.8771,
"step": 71
},
{
"epoch": 0.07411219763252702,
"grad_norm": 0.33244985342025757,
"learning_rate": 2.2040354826462668e-05,
"loss": 4.8509,
"step": 72
},
{
"epoch": 0.07411219763252702,
"eval_loss": 2.3749494552612305,
"eval_runtime": 226.9106,
"eval_samples_per_second": 3.605,
"eval_steps_per_second": 0.454,
"step": 72
},
{
"epoch": 0.07514153371075656,
"grad_norm": 0.2920321524143219,
"learning_rate": 2.061073738537635e-05,
"loss": 4.8178,
"step": 73
},
{
"epoch": 0.0761708697889861,
"grad_norm": 0.2564603388309479,
"learning_rate": 1.9216926233717085e-05,
"loss": 4.6999,
"step": 74
},
{
"epoch": 0.07720020586721564,
"grad_norm": 0.3616389036178589,
"learning_rate": 1.7860619515673033e-05,
"loss": 4.5835,
"step": 75
},
{
"epoch": 0.0782295419454452,
"grad_norm": 0.3594980537891388,
"learning_rate": 1.6543469682057106e-05,
"loss": 4.6926,
"step": 76
},
{
"epoch": 0.07925887802367473,
"grad_norm": 0.30547279119491577,
"learning_rate": 1.526708147705013e-05,
"loss": 4.7796,
"step": 77
},
{
"epoch": 0.08028821410190427,
"grad_norm": 0.35963600873947144,
"learning_rate": 1.4033009983067452e-05,
"loss": 4.5412,
"step": 78
},
{
"epoch": 0.08131755018013381,
"grad_norm": 0.26262137293815613,
"learning_rate": 1.2842758726130283e-05,
"loss": 4.5385,
"step": 79
},
{
"epoch": 0.08234688625836335,
"grad_norm": 0.3326081931591034,
"learning_rate": 1.1697777844051105e-05,
"loss": 4.7268,
"step": 80
},
{
"epoch": 0.08337622233659289,
"grad_norm": 0.29198020696640015,
"learning_rate": 1.0599462319663905e-05,
"loss": 4.7795,
"step": 81
},
{
"epoch": 0.08337622233659289,
"eval_loss": 2.3745617866516113,
"eval_runtime": 226.9222,
"eval_samples_per_second": 3.605,
"eval_steps_per_second": 0.454,
"step": 81
},
{
"epoch": 0.08440555841482245,
"grad_norm": 0.36414095759391785,
"learning_rate": 9.549150281252633e-06,
"loss": 4.7554,
"step": 82
},
{
"epoch": 0.08543489449305199,
"grad_norm": 0.3461471199989319,
"learning_rate": 8.548121372247918e-06,
"loss": 4.788,
"step": 83
},
{
"epoch": 0.08646423057128153,
"grad_norm": 0.36194470524787903,
"learning_rate": 7.597595192178702e-06,
"loss": 5.0131,
"step": 84
},
{
"epoch": 0.08749356664951106,
"grad_norm": 0.3344295918941498,
"learning_rate": 6.698729810778065e-06,
"loss": 4.9249,
"step": 85
},
{
"epoch": 0.0885229027277406,
"grad_norm": 0.29325899481773376,
"learning_rate": 5.852620357053651e-06,
"loss": 4.4008,
"step": 86
},
{
"epoch": 0.08955223880597014,
"grad_norm": 0.31407222151756287,
"learning_rate": 5.060297685041659e-06,
"loss": 4.6769,
"step": 87
},
{
"epoch": 0.0905815748841997,
"grad_norm": 0.44068512320518494,
"learning_rate": 4.322727117869951e-06,
"loss": 4.9277,
"step": 88
},
{
"epoch": 0.09161091096242924,
"grad_norm": 0.3176312744617462,
"learning_rate": 3.6408072716606346e-06,
"loss": 4.8676,
"step": 89
},
{
"epoch": 0.09264024704065878,
"grad_norm": 0.3227611780166626,
"learning_rate": 3.0153689607045845e-06,
"loss": 4.7992,
"step": 90
},
{
"epoch": 0.09264024704065878,
"eval_loss": 2.3742470741271973,
"eval_runtime": 226.8864,
"eval_samples_per_second": 3.605,
"eval_steps_per_second": 0.454,
"step": 90
},
{
"epoch": 0.09366958311888832,
"grad_norm": 0.3470034897327423,
"learning_rate": 2.4471741852423237e-06,
"loss": 4.8775,
"step": 91
},
{
"epoch": 0.09469891919711786,
"grad_norm": 0.3345381021499634,
"learning_rate": 1.9369152030840556e-06,
"loss": 4.6053,
"step": 92
},
{
"epoch": 0.0957282552753474,
"grad_norm": 0.2730475962162018,
"learning_rate": 1.4852136862001764e-06,
"loss": 4.6352,
"step": 93
},
{
"epoch": 0.09675759135357695,
"grad_norm": 0.3089372515678406,
"learning_rate": 1.0926199633097157e-06,
"loss": 4.7786,
"step": 94
},
{
"epoch": 0.09778692743180649,
"grad_norm": 0.3256579339504242,
"learning_rate": 7.596123493895991e-07,
"loss": 4.6533,
"step": 95
},
{
"epoch": 0.09881626351003603,
"grad_norm": 0.4511566460132599,
"learning_rate": 4.865965629214819e-07,
"loss": 4.9058,
"step": 96
},
{
"epoch": 0.09984559958826557,
"grad_norm": 0.2858843207359314,
"learning_rate": 2.7390523158633554e-07,
"loss": 4.4558,
"step": 97
},
{
"epoch": 0.10087493566649511,
"grad_norm": 0.38588446378707886,
"learning_rate": 1.2179748700879012e-07,
"loss": 4.9628,
"step": 98
},
{
"epoch": 0.10190427174472465,
"grad_norm": 0.29587432742118835,
"learning_rate": 3.04586490452119e-08,
"loss": 4.683,
"step": 99
},
{
"epoch": 0.10190427174472465,
"eval_loss": 2.3742330074310303,
"eval_runtime": 226.8851,
"eval_samples_per_second": 3.605,
"eval_steps_per_second": 0.454,
"step": 99
},
{
"epoch": 0.1029336078229542,
"grad_norm": 0.36477580666542053,
"learning_rate": 0.0,
"loss": 4.9429,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1925729759461376e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}