dzanbek's picture
Training in progress, step 25, checkpoint
cf9d1a8 verified
raw
history blame
6.87 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.052328623757195186,
"eval_steps": 3,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020931449502878076,
"grad_norm": 2.9366672039031982,
"learning_rate": 2e-05,
"loss": 5.1134,
"step": 1
},
{
"epoch": 0.0020931449502878076,
"eval_loss": 1.3874133825302124,
"eval_runtime": 28.3528,
"eval_samples_per_second": 7.125,
"eval_steps_per_second": 3.562,
"step": 1
},
{
"epoch": 0.004186289900575615,
"grad_norm": 3.305058479309082,
"learning_rate": 4e-05,
"loss": 5.432,
"step": 2
},
{
"epoch": 0.006279434850863423,
"grad_norm": 2.9360413551330566,
"learning_rate": 6e-05,
"loss": 5.7705,
"step": 3
},
{
"epoch": 0.006279434850863423,
"eval_loss": 1.3774594068527222,
"eval_runtime": 28.6485,
"eval_samples_per_second": 7.051,
"eval_steps_per_second": 3.525,
"step": 3
},
{
"epoch": 0.00837257980115123,
"grad_norm": 3.4692842960357666,
"learning_rate": 8e-05,
"loss": 6.4354,
"step": 4
},
{
"epoch": 0.010465724751439037,
"grad_norm": 3.079684019088745,
"learning_rate": 0.0001,
"loss": 4.955,
"step": 5
},
{
"epoch": 0.012558869701726845,
"grad_norm": 3.631309986114502,
"learning_rate": 9.938441702975689e-05,
"loss": 4.4486,
"step": 6
},
{
"epoch": 0.012558869701726845,
"eval_loss": 1.2356057167053223,
"eval_runtime": 28.6368,
"eval_samples_per_second": 7.054,
"eval_steps_per_second": 3.527,
"step": 6
},
{
"epoch": 0.014652014652014652,
"grad_norm": 4.206442356109619,
"learning_rate": 9.755282581475769e-05,
"loss": 4.5265,
"step": 7
},
{
"epoch": 0.01674515960230246,
"grad_norm": 2.819859504699707,
"learning_rate": 9.45503262094184e-05,
"loss": 3.6465,
"step": 8
},
{
"epoch": 0.018838304552590265,
"grad_norm": 3.166618824005127,
"learning_rate": 9.045084971874738e-05,
"loss": 3.6821,
"step": 9
},
{
"epoch": 0.018838304552590265,
"eval_loss": 0.971888542175293,
"eval_runtime": 28.6342,
"eval_samples_per_second": 7.054,
"eval_steps_per_second": 3.527,
"step": 9
},
{
"epoch": 0.020931449502878074,
"grad_norm": 3.5012850761413574,
"learning_rate": 8.535533905932738e-05,
"loss": 3.6575,
"step": 10
},
{
"epoch": 0.023024594453165882,
"grad_norm": 5.032680988311768,
"learning_rate": 7.938926261462366e-05,
"loss": 4.096,
"step": 11
},
{
"epoch": 0.02511773940345369,
"grad_norm": 3.810659646987915,
"learning_rate": 7.269952498697734e-05,
"loss": 2.9131,
"step": 12
},
{
"epoch": 0.02511773940345369,
"eval_loss": 0.7286272644996643,
"eval_runtime": 28.6298,
"eval_samples_per_second": 7.056,
"eval_steps_per_second": 3.528,
"step": 12
},
{
"epoch": 0.027210884353741496,
"grad_norm": 2.648770570755005,
"learning_rate": 6.545084971874738e-05,
"loss": 2.9433,
"step": 13
},
{
"epoch": 0.029304029304029304,
"grad_norm": 2.9842631816864014,
"learning_rate": 5.782172325201155e-05,
"loss": 2.2262,
"step": 14
},
{
"epoch": 0.03139717425431711,
"grad_norm": 3.5351529121398926,
"learning_rate": 5e-05,
"loss": 2.8287,
"step": 15
},
{
"epoch": 0.03139717425431711,
"eval_loss": 0.5874171257019043,
"eval_runtime": 28.604,
"eval_samples_per_second": 7.062,
"eval_steps_per_second": 3.531,
"step": 15
},
{
"epoch": 0.03349031920460492,
"grad_norm": 2.774341344833374,
"learning_rate": 4.2178276747988446e-05,
"loss": 2.1482,
"step": 16
},
{
"epoch": 0.035583464154892726,
"grad_norm": 2.640618324279785,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.0841,
"step": 17
},
{
"epoch": 0.03767660910518053,
"grad_norm": 3.8104114532470703,
"learning_rate": 2.7300475013022663e-05,
"loss": 2.4072,
"step": 18
},
{
"epoch": 0.03767660910518053,
"eval_loss": 0.510881245136261,
"eval_runtime": 28.714,
"eval_samples_per_second": 7.035,
"eval_steps_per_second": 3.517,
"step": 18
},
{
"epoch": 0.03976975405546834,
"grad_norm": 2.4819905757904053,
"learning_rate": 2.061073738537635e-05,
"loss": 1.8718,
"step": 19
},
{
"epoch": 0.04186289900575615,
"grad_norm": 3.231020212173462,
"learning_rate": 1.4644660940672627e-05,
"loss": 2.3639,
"step": 20
},
{
"epoch": 0.04395604395604396,
"grad_norm": 2.7575583457946777,
"learning_rate": 9.549150281252633e-06,
"loss": 1.8185,
"step": 21
},
{
"epoch": 0.04395604395604396,
"eval_loss": 0.4724537432193756,
"eval_runtime": 28.7756,
"eval_samples_per_second": 7.02,
"eval_steps_per_second": 3.51,
"step": 21
},
{
"epoch": 0.046049188906331764,
"grad_norm": 2.319027900695801,
"learning_rate": 5.449673790581611e-06,
"loss": 1.7463,
"step": 22
},
{
"epoch": 0.04814233385661957,
"grad_norm": 3.020266056060791,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.9595,
"step": 23
},
{
"epoch": 0.05023547880690738,
"grad_norm": 2.4685897827148438,
"learning_rate": 6.15582970243117e-07,
"loss": 1.7662,
"step": 24
},
{
"epoch": 0.05023547880690738,
"eval_loss": 0.4614010155200958,
"eval_runtime": 28.736,
"eval_samples_per_second": 7.03,
"eval_steps_per_second": 3.515,
"step": 24
},
{
"epoch": 0.052328623757195186,
"grad_norm": 4.819024085998535,
"learning_rate": 0.0,
"loss": 2.0695,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8143746210201600.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}