ErrorAI's picture
Training in progress, step 53, checkpoint
adec230 verified
raw
history blame
9.83 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2535885167464115,
"eval_steps": 500,
"global_step": 53,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004784688995215311,
"grad_norm": 0.14709724485874176,
"learning_rate": 2e-05,
"loss": 1.2491,
"step": 1
},
{
"epoch": 0.009569377990430622,
"grad_norm": 0.17019398510456085,
"learning_rate": 4e-05,
"loss": 1.4909,
"step": 2
},
{
"epoch": 0.014354066985645933,
"grad_norm": 0.17654545605182648,
"learning_rate": 6e-05,
"loss": 1.5363,
"step": 3
},
{
"epoch": 0.019138755980861243,
"grad_norm": 0.1930554211139679,
"learning_rate": 8e-05,
"loss": 1.6395,
"step": 4
},
{
"epoch": 0.023923444976076555,
"grad_norm": 0.2117353081703186,
"learning_rate": 0.0001,
"loss": 1.7391,
"step": 5
},
{
"epoch": 0.028708133971291867,
"grad_norm": 0.20174962282180786,
"learning_rate": 9.999407114490384e-05,
"loss": 1.6012,
"step": 6
},
{
"epoch": 0.03349282296650718,
"grad_norm": 0.21089714765548706,
"learning_rate": 9.99762859856683e-05,
"loss": 1.5543,
"step": 7
},
{
"epoch": 0.03827751196172249,
"grad_norm": 0.21190762519836426,
"learning_rate": 9.994664874011863e-05,
"loss": 1.4511,
"step": 8
},
{
"epoch": 0.0430622009569378,
"grad_norm": 0.22505144774913788,
"learning_rate": 9.990516643685222e-05,
"loss": 1.4619,
"step": 9
},
{
"epoch": 0.04784688995215311,
"grad_norm": 0.23618264496326447,
"learning_rate": 9.985184891357164e-05,
"loss": 1.5113,
"step": 10
},
{
"epoch": 0.05263157894736842,
"grad_norm": 0.23488004505634308,
"learning_rate": 9.978670881475172e-05,
"loss": 1.502,
"step": 11
},
{
"epoch": 0.05741626794258373,
"grad_norm": 0.24843598902225494,
"learning_rate": 9.970976158864073e-05,
"loss": 1.5901,
"step": 12
},
{
"epoch": 0.06220095693779904,
"grad_norm": 0.25334247946739197,
"learning_rate": 9.96210254835968e-05,
"loss": 1.4698,
"step": 13
},
{
"epoch": 0.06698564593301436,
"grad_norm": 0.2642665505409241,
"learning_rate": 9.952052154376026e-05,
"loss": 1.5525,
"step": 14
},
{
"epoch": 0.07177033492822966,
"grad_norm": 0.2637348175048828,
"learning_rate": 9.940827360406297e-05,
"loss": 1.4046,
"step": 15
},
{
"epoch": 0.07655502392344497,
"grad_norm": 0.28210246562957764,
"learning_rate": 9.928430828457572e-05,
"loss": 1.3994,
"step": 16
},
{
"epoch": 0.08133971291866028,
"grad_norm": 0.3060890734195709,
"learning_rate": 9.91486549841951e-05,
"loss": 1.6673,
"step": 17
},
{
"epoch": 0.0861244019138756,
"grad_norm": 0.2893538475036621,
"learning_rate": 9.90013458736716e-05,
"loss": 1.5797,
"step": 18
},
{
"epoch": 0.09090909090909091,
"grad_norm": 0.29651427268981934,
"learning_rate": 9.884241588798005e-05,
"loss": 1.4553,
"step": 19
},
{
"epoch": 0.09569377990430622,
"grad_norm": 0.311667263507843,
"learning_rate": 9.867190271803465e-05,
"loss": 1.4259,
"step": 20
},
{
"epoch": 0.10047846889952153,
"grad_norm": 0.32705116271972656,
"learning_rate": 9.848984680175049e-05,
"loss": 1.5493,
"step": 21
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.33383113145828247,
"learning_rate": 9.829629131445342e-05,
"loss": 1.5034,
"step": 22
},
{
"epoch": 0.11004784688995216,
"grad_norm": 0.3618900179862976,
"learning_rate": 9.809128215864097e-05,
"loss": 1.7059,
"step": 23
},
{
"epoch": 0.11483253588516747,
"grad_norm": 0.36600548028945923,
"learning_rate": 9.787486795309621e-05,
"loss": 1.7853,
"step": 24
},
{
"epoch": 0.11961722488038277,
"grad_norm": 0.3755670189857483,
"learning_rate": 9.764710002135784e-05,
"loss": 1.5303,
"step": 25
},
{
"epoch": 0.12440191387559808,
"grad_norm": 0.3820343613624573,
"learning_rate": 9.74080323795483e-05,
"loss": 1.687,
"step": 26
},
{
"epoch": 0.1291866028708134,
"grad_norm": 0.3974711000919342,
"learning_rate": 9.715772172356388e-05,
"loss": 1.7679,
"step": 27
},
{
"epoch": 0.1339712918660287,
"grad_norm": 0.4264979064464569,
"learning_rate": 9.689622741562892e-05,
"loss": 1.6876,
"step": 28
},
{
"epoch": 0.13875598086124402,
"grad_norm": 0.4498402178287506,
"learning_rate": 9.662361147021779e-05,
"loss": 1.7944,
"step": 29
},
{
"epoch": 0.14354066985645933,
"grad_norm": 0.4634323716163635,
"learning_rate": 9.633993853934803e-05,
"loss": 2.0416,
"step": 30
},
{
"epoch": 0.14832535885167464,
"grad_norm": 0.467655211687088,
"learning_rate": 9.60452758972477e-05,
"loss": 1.9583,
"step": 31
},
{
"epoch": 0.15311004784688995,
"grad_norm": 0.4708121716976166,
"learning_rate": 9.573969342440106e-05,
"loss": 1.7343,
"step": 32
},
{
"epoch": 0.15789473684210525,
"grad_norm": 0.49835681915283203,
"learning_rate": 9.542326359097619e-05,
"loss": 1.7304,
"step": 33
},
{
"epoch": 0.16267942583732056,
"grad_norm": 0.4945959150791168,
"learning_rate": 9.509606143963832e-05,
"loss": 1.8515,
"step": 34
},
{
"epoch": 0.1674641148325359,
"grad_norm": 0.532650887966156,
"learning_rate": 9.475816456775313e-05,
"loss": 1.955,
"step": 35
},
{
"epoch": 0.1722488038277512,
"grad_norm": 0.5782840847969055,
"learning_rate": 9.440965310898424e-05,
"loss": 1.9727,
"step": 36
},
{
"epoch": 0.17703349282296652,
"grad_norm": 0.6085786819458008,
"learning_rate": 9.405060971428923e-05,
"loss": 1.9417,
"step": 37
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.5588628053665161,
"learning_rate": 9.368111953231848e-05,
"loss": 1.9105,
"step": 38
},
{
"epoch": 0.18660287081339713,
"grad_norm": 0.6089487671852112,
"learning_rate": 9.330127018922194e-05,
"loss": 1.9613,
"step": 39
},
{
"epoch": 0.19138755980861244,
"grad_norm": 0.6214700937271118,
"learning_rate": 9.291115176786814e-05,
"loss": 2.0116,
"step": 40
},
{
"epoch": 0.19617224880382775,
"grad_norm": 0.6787968277931213,
"learning_rate": 9.251085678648072e-05,
"loss": 1.9211,
"step": 41
},
{
"epoch": 0.20095693779904306,
"grad_norm": 0.8012039065361023,
"learning_rate": 9.210048017669726e-05,
"loss": 2.0415,
"step": 42
},
{
"epoch": 0.20574162679425836,
"grad_norm": 0.7107148766517639,
"learning_rate": 9.168011926105598e-05,
"loss": 1.9067,
"step": 43
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.8137962818145752,
"learning_rate": 9.124987372991511e-05,
"loss": 1.9049,
"step": 44
},
{
"epoch": 0.215311004784689,
"grad_norm": 0.8808148503303528,
"learning_rate": 9.08098456178111e-05,
"loss": 2.0898,
"step": 45
},
{
"epoch": 0.22009569377990432,
"grad_norm": 1.1123590469360352,
"learning_rate": 9.036013927926048e-05,
"loss": 2.2945,
"step": 46
},
{
"epoch": 0.22488038277511962,
"grad_norm": 1.2723033428192139,
"learning_rate": 8.9900861364012e-05,
"loss": 2.0564,
"step": 47
},
{
"epoch": 0.22966507177033493,
"grad_norm": 1.601427674293518,
"learning_rate": 8.943212079175391e-05,
"loss": 1.8703,
"step": 48
},
{
"epoch": 0.23444976076555024,
"grad_norm": 2.2030014991760254,
"learning_rate": 8.895402872628352e-05,
"loss": 2.0263,
"step": 49
},
{
"epoch": 0.23923444976076555,
"grad_norm": 3.696323871612549,
"learning_rate": 8.846669854914396e-05,
"loss": 2.6416,
"step": 50
},
{
"epoch": 0.24401913875598086,
"grad_norm": 0.3211597800254822,
"learning_rate": 8.797024583273537e-05,
"loss": 1.5625,
"step": 51
},
{
"epoch": 0.24880382775119617,
"grad_norm": 0.36418232321739197,
"learning_rate": 8.746478831290648e-05,
"loss": 1.6543,
"step": 52
},
{
"epoch": 0.2535885167464115,
"grad_norm": 0.4147177040576935,
"learning_rate": 8.695044586103296e-05,
"loss": 1.5142,
"step": 53
}
],
"logging_steps": 1,
"max_steps": 209,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 53,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7102131861454848.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}