beast33's picture
Training in progress, step 50, checkpoint
7fb4877 verified
raw
history blame
9.94 kB
{
"best_metric": 0.44632911682128906,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.04105933073290905,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000821186614658181,
"grad_norm": 2.0305542945861816,
"learning_rate": 1e-05,
"loss": 0.7261,
"step": 1
},
{
"epoch": 0.000821186614658181,
"eval_loss": 1.465772271156311,
"eval_runtime": 288.2262,
"eval_samples_per_second": 7.116,
"eval_steps_per_second": 1.78,
"step": 1
},
{
"epoch": 0.001642373229316362,
"grad_norm": 2.114654541015625,
"learning_rate": 2e-05,
"loss": 0.7726,
"step": 2
},
{
"epoch": 0.002463559843974543,
"grad_norm": 2.2381458282470703,
"learning_rate": 3e-05,
"loss": 0.8371,
"step": 3
},
{
"epoch": 0.003284746458632724,
"grad_norm": 1.9415267705917358,
"learning_rate": 4e-05,
"loss": 0.7885,
"step": 4
},
{
"epoch": 0.0041059330732909054,
"grad_norm": 1.484900712966919,
"learning_rate": 5e-05,
"loss": 0.7522,
"step": 5
},
{
"epoch": 0.004927119687949086,
"grad_norm": 1.259929895401001,
"learning_rate": 6e-05,
"loss": 0.7248,
"step": 6
},
{
"epoch": 0.005748306302607267,
"grad_norm": 0.9762012958526611,
"learning_rate": 7e-05,
"loss": 0.5922,
"step": 7
},
{
"epoch": 0.006569492917265448,
"grad_norm": 1.0204635858535767,
"learning_rate": 8e-05,
"loss": 0.5452,
"step": 8
},
{
"epoch": 0.00739067953192363,
"grad_norm": 0.8520223498344421,
"learning_rate": 9e-05,
"loss": 0.4675,
"step": 9
},
{
"epoch": 0.008211866146581811,
"grad_norm": 1.470017433166504,
"learning_rate": 0.0001,
"loss": 0.4164,
"step": 10
},
{
"epoch": 0.009033052761239991,
"grad_norm": 0.8858131766319275,
"learning_rate": 9.99983777858264e-05,
"loss": 0.3847,
"step": 11
},
{
"epoch": 0.009854239375898173,
"grad_norm": 1.0039604902267456,
"learning_rate": 9.999351124856874e-05,
"loss": 0.3836,
"step": 12
},
{
"epoch": 0.010675425990556354,
"grad_norm": 0.6317289471626282,
"learning_rate": 9.998540070400966e-05,
"loss": 0.3325,
"step": 13
},
{
"epoch": 0.011496612605214535,
"grad_norm": 0.680807888507843,
"learning_rate": 9.997404667843075e-05,
"loss": 0.3493,
"step": 14
},
{
"epoch": 0.012317799219872716,
"grad_norm": 0.6124119758605957,
"learning_rate": 9.995944990857849e-05,
"loss": 0.3204,
"step": 15
},
{
"epoch": 0.013138985834530896,
"grad_norm": 0.6300580501556396,
"learning_rate": 9.994161134161634e-05,
"loss": 0.3341,
"step": 16
},
{
"epoch": 0.013960172449189078,
"grad_norm": 0.608344554901123,
"learning_rate": 9.992053213506334e-05,
"loss": 0.3386,
"step": 17
},
{
"epoch": 0.01478135906384726,
"grad_norm": 0.5785073041915894,
"learning_rate": 9.989621365671902e-05,
"loss": 0.3035,
"step": 18
},
{
"epoch": 0.01560254567850544,
"grad_norm": 0.6188136339187622,
"learning_rate": 9.986865748457457e-05,
"loss": 0.3162,
"step": 19
},
{
"epoch": 0.016423732293163622,
"grad_norm": 0.5587109923362732,
"learning_rate": 9.983786540671051e-05,
"loss": 0.273,
"step": 20
},
{
"epoch": 0.017244918907821802,
"grad_norm": 0.5472216606140137,
"learning_rate": 9.980383942118066e-05,
"loss": 0.3215,
"step": 21
},
{
"epoch": 0.018066105522479982,
"grad_norm": 0.4810258150100708,
"learning_rate": 9.976658173588244e-05,
"loss": 0.307,
"step": 22
},
{
"epoch": 0.018887292137138165,
"grad_norm": 0.5424895882606506,
"learning_rate": 9.972609476841367e-05,
"loss": 0.3065,
"step": 23
},
{
"epoch": 0.019708478751796345,
"grad_norm": 0.5074129700660706,
"learning_rate": 9.968238114591566e-05,
"loss": 0.2774,
"step": 24
},
{
"epoch": 0.020529665366454525,
"grad_norm": 0.5599948167800903,
"learning_rate": 9.96354437049027e-05,
"loss": 0.2691,
"step": 25
},
{
"epoch": 0.02135085198111271,
"grad_norm": 0.6742061972618103,
"learning_rate": 9.95852854910781e-05,
"loss": 0.2898,
"step": 26
},
{
"epoch": 0.02217203859577089,
"grad_norm": 0.5526089072227478,
"learning_rate": 9.953190975913647e-05,
"loss": 0.3071,
"step": 27
},
{
"epoch": 0.02299322521042907,
"grad_norm": 0.5390534996986389,
"learning_rate": 9.947531997255256e-05,
"loss": 0.2906,
"step": 28
},
{
"epoch": 0.023814411825087253,
"grad_norm": 0.504539430141449,
"learning_rate": 9.941551980335652e-05,
"loss": 0.2688,
"step": 29
},
{
"epoch": 0.024635598439745433,
"grad_norm": 0.4898300766944885,
"learning_rate": 9.935251313189564e-05,
"loss": 0.2845,
"step": 30
},
{
"epoch": 0.025456785054403613,
"grad_norm": 0.49974775314331055,
"learning_rate": 9.928630404658255e-05,
"loss": 0.2702,
"step": 31
},
{
"epoch": 0.026277971669061793,
"grad_norm": 0.5819835662841797,
"learning_rate": 9.921689684362989e-05,
"loss": 0.2699,
"step": 32
},
{
"epoch": 0.027099158283719976,
"grad_norm": 0.6171815395355225,
"learning_rate": 9.914429602677162e-05,
"loss": 0.3477,
"step": 33
},
{
"epoch": 0.027920344898378156,
"grad_norm": 0.6020647883415222,
"learning_rate": 9.906850630697068e-05,
"loss": 0.2901,
"step": 34
},
{
"epoch": 0.028741531513036336,
"grad_norm": 0.7458943724632263,
"learning_rate": 9.898953260211338e-05,
"loss": 0.2498,
"step": 35
},
{
"epoch": 0.02956271812769452,
"grad_norm": 0.6588975787162781,
"learning_rate": 9.890738003669029e-05,
"loss": 0.2599,
"step": 36
},
{
"epoch": 0.0303839047423527,
"grad_norm": 0.6839740872383118,
"learning_rate": 9.882205394146361e-05,
"loss": 0.337,
"step": 37
},
{
"epoch": 0.03120509135701088,
"grad_norm": 0.6753020882606506,
"learning_rate": 9.87335598531214e-05,
"loss": 0.3069,
"step": 38
},
{
"epoch": 0.032026277971669063,
"grad_norm": 0.8709951043128967,
"learning_rate": 9.864190351391822e-05,
"loss": 0.3396,
"step": 39
},
{
"epoch": 0.032847464586327244,
"grad_norm": 0.6819374561309814,
"learning_rate": 9.85470908713026e-05,
"loss": 0.2963,
"step": 40
},
{
"epoch": 0.033668651200985424,
"grad_norm": 0.856820821762085,
"learning_rate": 9.844912807753104e-05,
"loss": 0.289,
"step": 41
},
{
"epoch": 0.034489837815643604,
"grad_norm": 0.7196516990661621,
"learning_rate": 9.834802148926882e-05,
"loss": 0.2858,
"step": 42
},
{
"epoch": 0.035311024430301784,
"grad_norm": 0.7757695913314819,
"learning_rate": 9.824377766717759e-05,
"loss": 0.2882,
"step": 43
},
{
"epoch": 0.036132211044959964,
"grad_norm": 0.7751405835151672,
"learning_rate": 9.813640337548954e-05,
"loss": 0.3174,
"step": 44
},
{
"epoch": 0.03695339765961815,
"grad_norm": 0.8659068942070007,
"learning_rate": 9.802590558156862e-05,
"loss": 0.2818,
"step": 45
},
{
"epoch": 0.03777458427427633,
"grad_norm": 1.336848258972168,
"learning_rate": 9.791229145545831e-05,
"loss": 0.3209,
"step": 46
},
{
"epoch": 0.03859577088893451,
"grad_norm": 0.9144354462623596,
"learning_rate": 9.779556836941645e-05,
"loss": 0.28,
"step": 47
},
{
"epoch": 0.03941695750359269,
"grad_norm": 0.8315229415893555,
"learning_rate": 9.767574389743682e-05,
"loss": 0.276,
"step": 48
},
{
"epoch": 0.04023814411825087,
"grad_norm": 0.967755913734436,
"learning_rate": 9.755282581475769e-05,
"loss": 0.3298,
"step": 49
},
{
"epoch": 0.04105933073290905,
"grad_norm": 1.0207139253616333,
"learning_rate": 9.742682209735727e-05,
"loss": 0.3058,
"step": 50
},
{
"epoch": 0.04105933073290905,
"eval_loss": 0.44632911682128906,
"eval_runtime": 291.2748,
"eval_samples_per_second": 7.041,
"eval_steps_per_second": 1.761,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.722470612133478e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}