eddysang's picture
Training in progress, step 99, checkpoint
8354a1b verified
raw
history blame
9.03 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.009525327207617857,
"eval_steps": 9,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 9.621542633957431e-05,
"eval_loss": 3.1601955890655518,
"eval_runtime": 13080.0915,
"eval_samples_per_second": 2.677,
"eval_steps_per_second": 1.338,
"step": 1
},
{
"epoch": 0.00028864627901872294,
"grad_norm": 88.87578582763672,
"learning_rate": 3e-05,
"loss": 100.51,
"step": 3
},
{
"epoch": 0.0005772925580374459,
"grad_norm": 85.21216583251953,
"learning_rate": 6e-05,
"loss": 96.5347,
"step": 6
},
{
"epoch": 0.0008659388370561688,
"grad_norm": 77.13072967529297,
"learning_rate": 9e-05,
"loss": 80.7471,
"step": 9
},
{
"epoch": 0.0008659388370561688,
"eval_loss": 2.1278481483459473,
"eval_runtime": 13090.1143,
"eval_samples_per_second": 2.675,
"eval_steps_per_second": 1.337,
"step": 9
},
{
"epoch": 0.0011545851160748917,
"grad_norm": 68.87947845458984,
"learning_rate": 0.00012,
"loss": 64.7623,
"step": 12
},
{
"epoch": 0.0014432313950936146,
"grad_norm": 43.136959075927734,
"learning_rate": 0.00015000000000000001,
"loss": 44.3736,
"step": 15
},
{
"epoch": 0.0017318776741123375,
"grad_norm": 41.73539352416992,
"learning_rate": 0.00018,
"loss": 31.1364,
"step": 18
},
{
"epoch": 0.0017318776741123375,
"eval_loss": 0.844988226890564,
"eval_runtime": 13063.393,
"eval_samples_per_second": 2.68,
"eval_steps_per_second": 1.34,
"step": 18
},
{
"epoch": 0.0020205239531310604,
"grad_norm": 37.0665397644043,
"learning_rate": 0.0001999229036240723,
"loss": 25.9659,
"step": 21
},
{
"epoch": 0.0023091702321497835,
"grad_norm": 32.10556411743164,
"learning_rate": 0.00019876883405951377,
"loss": 19.8184,
"step": 24
},
{
"epoch": 0.002597816511168506,
"grad_norm": 39.13405990600586,
"learning_rate": 0.00019624552364536473,
"loss": 15.5343,
"step": 27
},
{
"epoch": 0.002597816511168506,
"eval_loss": 0.4686388671398163,
"eval_runtime": 11313.8601,
"eval_samples_per_second": 3.094,
"eval_steps_per_second": 1.547,
"step": 27
},
{
"epoch": 0.0028864627901872292,
"grad_norm": 29.395793914794922,
"learning_rate": 0.0001923879532511287,
"loss": 14.8443,
"step": 30
},
{
"epoch": 0.003175109069205952,
"grad_norm": 27.154170989990234,
"learning_rate": 0.00018724960070727972,
"loss": 13.3753,
"step": 33
},
{
"epoch": 0.003463755348224675,
"grad_norm": 23.787752151489258,
"learning_rate": 0.00018090169943749476,
"loss": 10.3941,
"step": 36
},
{
"epoch": 0.003463755348224675,
"eval_loss": 0.3380902111530304,
"eval_runtime": 10045.2845,
"eval_samples_per_second": 3.485,
"eval_steps_per_second": 1.743,
"step": 36
},
{
"epoch": 0.003752401627243398,
"grad_norm": 32.828285217285156,
"learning_rate": 0.00017343225094356855,
"loss": 11.2033,
"step": 39
},
{
"epoch": 0.004041047906262121,
"grad_norm": 46.57306671142578,
"learning_rate": 0.00016494480483301836,
"loss": 10.001,
"step": 42
},
{
"epoch": 0.004329694185280844,
"grad_norm": 33.11753845214844,
"learning_rate": 0.00015555702330196023,
"loss": 8.5669,
"step": 45
},
{
"epoch": 0.004329694185280844,
"eval_loss": 0.25203973054885864,
"eval_runtime": 11473.7667,
"eval_samples_per_second": 3.051,
"eval_steps_per_second": 1.526,
"step": 45
},
{
"epoch": 0.004618340464299567,
"grad_norm": 21.155969619750977,
"learning_rate": 0.00014539904997395468,
"loss": 8.103,
"step": 48
},
{
"epoch": 0.00490698674331829,
"grad_norm": 25.17514419555664,
"learning_rate": 0.0001346117057077493,
"loss": 7.4075,
"step": 51
},
{
"epoch": 0.005195633022337012,
"grad_norm": 21.17397117614746,
"learning_rate": 0.00012334453638559057,
"loss": 6.9151,
"step": 54
},
{
"epoch": 0.005195633022337012,
"eval_loss": 0.21289654076099396,
"eval_runtime": 12548.5942,
"eval_samples_per_second": 2.79,
"eval_steps_per_second": 1.395,
"step": 54
},
{
"epoch": 0.005484279301355735,
"grad_norm": 21.539554595947266,
"learning_rate": 0.00011175373974578378,
"loss": 6.7601,
"step": 57
},
{
"epoch": 0.0057729255803744585,
"grad_norm": 20.63874626159668,
"learning_rate": 0.0001,
"loss": 6.994,
"step": 60
},
{
"epoch": 0.006061571859393182,
"grad_norm": 16.417226791381836,
"learning_rate": 8.824626025421626e-05,
"loss": 5.5389,
"step": 63
},
{
"epoch": 0.006061571859393182,
"eval_loss": 0.17797139286994934,
"eval_runtime": 9174.8,
"eval_samples_per_second": 3.816,
"eval_steps_per_second": 1.908,
"step": 63
},
{
"epoch": 0.006350218138411904,
"grad_norm": 17.829113006591797,
"learning_rate": 7.66554636144095e-05,
"loss": 5.8211,
"step": 66
},
{
"epoch": 0.006638864417430627,
"grad_norm": 18.48779296875,
"learning_rate": 6.538829429225069e-05,
"loss": 5.1794,
"step": 69
},
{
"epoch": 0.00692751069644935,
"grad_norm": 17.563810348510742,
"learning_rate": 5.4600950026045326e-05,
"loss": 5.6267,
"step": 72
},
{
"epoch": 0.00692751069644935,
"eval_loss": 0.16187982261180878,
"eval_runtime": 9520.9589,
"eval_samples_per_second": 3.677,
"eval_steps_per_second": 1.839,
"step": 72
},
{
"epoch": 0.007216156975468073,
"grad_norm": 19.880619049072266,
"learning_rate": 4.444297669803981e-05,
"loss": 5.2772,
"step": 75
},
{
"epoch": 0.007504803254486796,
"grad_norm": 21.604888916015625,
"learning_rate": 3.5055195166981645e-05,
"loss": 4.9052,
"step": 78
},
{
"epoch": 0.0077934495335055184,
"grad_norm": 19.327678680419922,
"learning_rate": 2.6567749056431467e-05,
"loss": 5.1425,
"step": 81
},
{
"epoch": 0.0077934495335055184,
"eval_loss": 0.14949661493301392,
"eval_runtime": 12097.8181,
"eval_samples_per_second": 2.894,
"eval_steps_per_second": 1.447,
"step": 81
},
{
"epoch": 0.008082095812524242,
"grad_norm": 17.062747955322266,
"learning_rate": 1.9098300562505266e-05,
"loss": 5.306,
"step": 84
},
{
"epoch": 0.008370742091542965,
"grad_norm": 15.015271186828613,
"learning_rate": 1.2750399292720283e-05,
"loss": 4.3013,
"step": 87
},
{
"epoch": 0.008659388370561688,
"grad_norm": 12.879692077636719,
"learning_rate": 7.612046748871327e-06,
"loss": 4.3707,
"step": 90
},
{
"epoch": 0.008659388370561688,
"eval_loss": 0.1438611000776291,
"eval_runtime": 12295.3446,
"eval_samples_per_second": 2.847,
"eval_steps_per_second": 1.424,
"step": 90
},
{
"epoch": 0.00894803464958041,
"grad_norm": 14.83832836151123,
"learning_rate": 3.7544763546352834e-06,
"loss": 4.7525,
"step": 93
},
{
"epoch": 0.009236680928599134,
"grad_norm": 12.520251274108887,
"learning_rate": 1.231165940486234e-06,
"loss": 4.5588,
"step": 96
},
{
"epoch": 0.009525327207617857,
"grad_norm": 17.722375869750977,
"learning_rate": 7.709637592770991e-08,
"loss": 4.3476,
"step": 99
},
{
"epoch": 0.009525327207617857,
"eval_loss": 0.14178016781806946,
"eval_runtime": 9365.6187,
"eval_samples_per_second": 3.738,
"eval_steps_per_second": 1.869,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.206564752284713e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}