prxy5607's picture
Training in progress, step 100, checkpoint
168ff33 verified
raw
history blame
18.7 kB
{
"best_metric": 2.4987945556640625,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 1.5686274509803921,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01568627450980392,
"grad_norm": 0.9535642266273499,
"learning_rate": 1e-05,
"loss": 2.8736,
"step": 1
},
{
"epoch": 0.01568627450980392,
"eval_loss": 3.324666738510132,
"eval_runtime": 7.9996,
"eval_samples_per_second": 13.501,
"eval_steps_per_second": 3.375,
"step": 1
},
{
"epoch": 0.03137254901960784,
"grad_norm": 1.0703654289245605,
"learning_rate": 2e-05,
"loss": 2.8739,
"step": 2
},
{
"epoch": 0.047058823529411764,
"grad_norm": 1.0498156547546387,
"learning_rate": 3e-05,
"loss": 2.9074,
"step": 3
},
{
"epoch": 0.06274509803921569,
"grad_norm": 0.9599528312683105,
"learning_rate": 4e-05,
"loss": 2.9035,
"step": 4
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.9831591248512268,
"learning_rate": 5e-05,
"loss": 2.8063,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.9782329201698303,
"learning_rate": 6e-05,
"loss": 2.9952,
"step": 6
},
{
"epoch": 0.10980392156862745,
"grad_norm": 0.859535813331604,
"learning_rate": 7e-05,
"loss": 2.9002,
"step": 7
},
{
"epoch": 0.12549019607843137,
"grad_norm": 0.9193878769874573,
"learning_rate": 8e-05,
"loss": 2.7132,
"step": 8
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.8184499740600586,
"learning_rate": 9e-05,
"loss": 2.7317,
"step": 9
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.8140602707862854,
"learning_rate": 0.0001,
"loss": 2.6723,
"step": 10
},
{
"epoch": 0.17254901960784313,
"grad_norm": 0.9426817893981934,
"learning_rate": 9.999255120204248e-05,
"loss": 2.6702,
"step": 11
},
{
"epoch": 0.18823529411764706,
"grad_norm": 1.0090997219085693,
"learning_rate": 9.997020702755353e-05,
"loss": 2.7346,
"step": 12
},
{
"epoch": 0.20392156862745098,
"grad_norm": 1.0597916841506958,
"learning_rate": 9.99329741340228e-05,
"loss": 2.678,
"step": 13
},
{
"epoch": 0.2196078431372549,
"grad_norm": 1.2650080919265747,
"learning_rate": 9.98808636150624e-05,
"loss": 2.6331,
"step": 14
},
{
"epoch": 0.23529411764705882,
"grad_norm": 1.5050358772277832,
"learning_rate": 9.981389099710132e-05,
"loss": 3.0748,
"step": 15
},
{
"epoch": 0.25098039215686274,
"grad_norm": 0.516484797000885,
"learning_rate": 9.973207623475965e-05,
"loss": 2.4093,
"step": 16
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.6125854849815369,
"learning_rate": 9.96354437049027e-05,
"loss": 2.5081,
"step": 17
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.5779858827590942,
"learning_rate": 9.952402219937816e-05,
"loss": 2.5021,
"step": 18
},
{
"epoch": 0.2980392156862745,
"grad_norm": 0.5545258522033691,
"learning_rate": 9.939784491643734e-05,
"loss": 2.4604,
"step": 19
},
{
"epoch": 0.3137254901960784,
"grad_norm": 0.5096269249916077,
"learning_rate": 9.92569494508437e-05,
"loss": 2.626,
"step": 20
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.4844023287296295,
"learning_rate": 9.910137778267152e-05,
"loss": 2.5395,
"step": 21
},
{
"epoch": 0.34509803921568627,
"grad_norm": 0.4641485810279846,
"learning_rate": 9.893117626479777e-05,
"loss": 2.5221,
"step": 22
},
{
"epoch": 0.3607843137254902,
"grad_norm": 0.47447431087493896,
"learning_rate": 9.874639560909117e-05,
"loss": 2.4512,
"step": 23
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.6421774625778198,
"learning_rate": 9.85470908713026e-05,
"loss": 2.4789,
"step": 24
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.692217230796814,
"learning_rate": 9.833332143466099e-05,
"loss": 2.5332,
"step": 25
},
{
"epoch": 0.40784313725490196,
"grad_norm": 0.5684048533439636,
"learning_rate": 9.810515099218003e-05,
"loss": 2.7136,
"step": 26
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.6432080864906311,
"learning_rate": 9.78626475276808e-05,
"loss": 2.5727,
"step": 27
},
{
"epoch": 0.4392156862745098,
"grad_norm": 0.7890809774398804,
"learning_rate": 9.760588329553571e-05,
"loss": 2.6341,
"step": 28
},
{
"epoch": 0.4549019607843137,
"grad_norm": 0.8166062235832214,
"learning_rate": 9.73349347991403e-05,
"loss": 2.5955,
"step": 29
},
{
"epoch": 0.47058823529411764,
"grad_norm": 1.1982145309448242,
"learning_rate": 9.704988276811883e-05,
"loss": 2.9488,
"step": 30
},
{
"epoch": 0.48627450980392156,
"grad_norm": 0.30004727840423584,
"learning_rate": 9.675081213427076e-05,
"loss": 2.3561,
"step": 31
},
{
"epoch": 0.5019607843137255,
"grad_norm": 0.41050592064857483,
"learning_rate": 9.643781200626511e-05,
"loss": 2.425,
"step": 32
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.42230165004730225,
"learning_rate": 9.611097564309053e-05,
"loss": 2.5036,
"step": 33
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.4193224608898163,
"learning_rate": 9.577040042626833e-05,
"loss": 2.3797,
"step": 34
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.4057847857475281,
"learning_rate": 9.54161878308377e-05,
"loss": 2.4552,
"step": 35
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.43488991260528564,
"learning_rate": 9.504844339512095e-05,
"loss": 2.4372,
"step": 36
},
{
"epoch": 0.5803921568627451,
"grad_norm": 0.4593192934989929,
"learning_rate": 9.466727668927816e-05,
"loss": 2.5748,
"step": 37
},
{
"epoch": 0.596078431372549,
"grad_norm": 0.4425160884857178,
"learning_rate": 9.42728012826605e-05,
"loss": 2.404,
"step": 38
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.43048760294914246,
"learning_rate": 9.38651347099721e-05,
"loss": 2.3847,
"step": 39
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.4540267884731293,
"learning_rate": 9.344439843625034e-05,
"loss": 2.4953,
"step": 40
},
{
"epoch": 0.6431372549019608,
"grad_norm": 0.4934435486793518,
"learning_rate": 9.301071782067504e-05,
"loss": 2.479,
"step": 41
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.6172159910202026,
"learning_rate": 9.256422207921757e-05,
"loss": 2.5236,
"step": 42
},
{
"epoch": 0.6745098039215687,
"grad_norm": 0.5735173225402832,
"learning_rate": 9.210504424614059e-05,
"loss": 2.452,
"step": 43
},
{
"epoch": 0.6901960784313725,
"grad_norm": 0.692180335521698,
"learning_rate": 9.163332113436032e-05,
"loss": 2.5363,
"step": 44
},
{
"epoch": 0.7058823529411765,
"grad_norm": 1.1689708232879639,
"learning_rate": 9.114919329468282e-05,
"loss": 2.8587,
"step": 45
},
{
"epoch": 0.7215686274509804,
"grad_norm": 0.3823428750038147,
"learning_rate": 9.065280497392663e-05,
"loss": 2.4599,
"step": 46
},
{
"epoch": 0.7372549019607844,
"grad_norm": 0.37304720282554626,
"learning_rate": 9.014430407194413e-05,
"loss": 2.4085,
"step": 47
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.354853093624115,
"learning_rate": 8.962384209755452e-05,
"loss": 2.5411,
"step": 48
},
{
"epoch": 0.7686274509803922,
"grad_norm": 0.3872363269329071,
"learning_rate": 8.90915741234015e-05,
"loss": 2.4203,
"step": 49
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.40209847688674927,
"learning_rate": 8.854765873974898e-05,
"loss": 2.5972,
"step": 50
},
{
"epoch": 0.7843137254901961,
"eval_loss": 2.5110836029052734,
"eval_runtime": 8.1325,
"eval_samples_per_second": 13.28,
"eval_steps_per_second": 3.32,
"step": 50
},
{
"epoch": 0.8,
"grad_norm": 0.38388505578041077,
"learning_rate": 8.799225800722895e-05,
"loss": 2.4428,
"step": 51
},
{
"epoch": 0.8156862745098039,
"grad_norm": 0.4438699185848236,
"learning_rate": 8.742553740855506e-05,
"loss": 2.4849,
"step": 52
},
{
"epoch": 0.8313725490196079,
"grad_norm": 0.4089072644710541,
"learning_rate": 8.684766579921684e-05,
"loss": 2.4612,
"step": 53
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.45029300451278687,
"learning_rate": 8.625881535716883e-05,
"loss": 2.4742,
"step": 54
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.4340762197971344,
"learning_rate": 8.565916153152983e-05,
"loss": 2.4731,
"step": 55
},
{
"epoch": 0.8784313725490196,
"grad_norm": 0.4764128625392914,
"learning_rate": 8.504888299030747e-05,
"loss": 2.6086,
"step": 56
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.5077619552612305,
"learning_rate": 8.442816156716385e-05,
"loss": 2.5386,
"step": 57
},
{
"epoch": 0.9098039215686274,
"grad_norm": 0.5658111572265625,
"learning_rate": 8.379718220723773e-05,
"loss": 2.5659,
"step": 58
},
{
"epoch": 0.9254901960784314,
"grad_norm": 0.6542505621910095,
"learning_rate": 8.315613291203976e-05,
"loss": 2.5815,
"step": 59
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.9077744483947754,
"learning_rate": 8.250520468343722e-05,
"loss": 2.9127,
"step": 60
},
{
"epoch": 0.9568627450980393,
"grad_norm": 0.3028375208377838,
"learning_rate": 8.184459146674446e-05,
"loss": 2.3544,
"step": 61
},
{
"epoch": 0.9725490196078431,
"grad_norm": 0.37583601474761963,
"learning_rate": 8.117449009293668e-05,
"loss": 2.3619,
"step": 62
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.5018683075904846,
"learning_rate": 8.049510022000364e-05,
"loss": 2.5821,
"step": 63
},
{
"epoch": 1.003921568627451,
"grad_norm": 0.824658989906311,
"learning_rate": 7.980662427346127e-05,
"loss": 3.6075,
"step": 64
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.2513439357280731,
"learning_rate": 7.910926738603854e-05,
"loss": 2.1654,
"step": 65
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.2941010296344757,
"learning_rate": 7.840323733655778e-05,
"loss": 2.2546,
"step": 66
},
{
"epoch": 1.0509803921568628,
"grad_norm": 0.3366384506225586,
"learning_rate": 7.768874448802665e-05,
"loss": 2.41,
"step": 67
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.32259663939476013,
"learning_rate": 7.696600172495997e-05,
"loss": 2.2745,
"step": 68
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.34342584013938904,
"learning_rate": 7.62352243899504e-05,
"loss": 2.3813,
"step": 69
},
{
"epoch": 1.0980392156862746,
"grad_norm": 0.3747313916683197,
"learning_rate": 7.54966302195068e-05,
"loss": 2.4037,
"step": 70
},
{
"epoch": 1.1137254901960785,
"grad_norm": 0.3675015866756439,
"learning_rate": 7.475043927917907e-05,
"loss": 2.3497,
"step": 71
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.36925432085990906,
"learning_rate": 7.399687389798933e-05,
"loss": 2.2603,
"step": 72
},
{
"epoch": 1.1450980392156862,
"grad_norm": 0.4180779457092285,
"learning_rate": 7.323615860218843e-05,
"loss": 2.3582,
"step": 73
},
{
"epoch": 1.1607843137254903,
"grad_norm": 0.47406819462776184,
"learning_rate": 7.246852004835807e-05,
"loss": 2.3733,
"step": 74
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.485302597284317,
"learning_rate": 7.169418695587791e-05,
"loss": 2.3337,
"step": 75
},
{
"epoch": 1.192156862745098,
"grad_norm": 0.539759635925293,
"learning_rate": 7.091339003877826e-05,
"loss": 2.3197,
"step": 76
},
{
"epoch": 1.2078431372549019,
"grad_norm": 0.6110227108001709,
"learning_rate": 7.012636193699837e-05,
"loss": 2.109,
"step": 77
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.7330251336097717,
"learning_rate": 6.933333714707094e-05,
"loss": 2.3911,
"step": 78
},
{
"epoch": 1.2392156862745098,
"grad_norm": 0.8158701062202454,
"learning_rate": 6.853455195225338e-05,
"loss": 2.6754,
"step": 79
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.3269573748111725,
"learning_rate": 6.773024435212678e-05,
"loss": 2.1136,
"step": 80
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.4158848226070404,
"learning_rate": 6.692065399168352e-05,
"loss": 2.3167,
"step": 81
},
{
"epoch": 1.2862745098039214,
"grad_norm": 0.4102843999862671,
"learning_rate": 6.610602208992454e-05,
"loss": 2.2767,
"step": 82
},
{
"epoch": 1.3019607843137255,
"grad_norm": 0.41636908054351807,
"learning_rate": 6.528659136798764e-05,
"loss": 2.2742,
"step": 83
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.4526170492172241,
"learning_rate": 6.446260597682839e-05,
"loss": 2.3794,
"step": 84
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.4637283384799957,
"learning_rate": 6.363431142447469e-05,
"loss": 2.3691,
"step": 85
},
{
"epoch": 1.3490196078431373,
"grad_norm": 0.48547840118408203,
"learning_rate": 6.280195450287736e-05,
"loss": 2.344,
"step": 86
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.48103803396224976,
"learning_rate": 6.19657832143779e-05,
"loss": 2.435,
"step": 87
},
{
"epoch": 1.380392156862745,
"grad_norm": 0.4679640531539917,
"learning_rate": 6.112604669781572e-05,
"loss": 2.2715,
"step": 88
},
{
"epoch": 1.396078431372549,
"grad_norm": 0.5256341695785522,
"learning_rate": 6.028299515429683e-05,
"loss": 2.3161,
"step": 89
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.5306299328804016,
"learning_rate": 5.943687977264584e-05,
"loss": 2.3116,
"step": 90
},
{
"epoch": 1.427450980392157,
"grad_norm": 0.6107631921768188,
"learning_rate": 5.8587952654563817e-05,
"loss": 2.3921,
"step": 91
},
{
"epoch": 1.4431372549019608,
"grad_norm": 0.6549443006515503,
"learning_rate": 5.773646673951406e-05,
"loss": 2.2533,
"step": 92
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.8716562986373901,
"learning_rate": 5.688267572935842e-05,
"loss": 2.5249,
"step": 93
},
{
"epoch": 1.4745098039215687,
"grad_norm": 0.8429364562034607,
"learning_rate": 5.602683401276615e-05,
"loss": 2.5472,
"step": 94
},
{
"epoch": 1.4901960784313726,
"grad_norm": 0.3356582224369049,
"learning_rate": 5.5169196589418504e-05,
"loss": 2.0268,
"step": 95
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.4084802269935608,
"learning_rate": 5.431001899403098e-05,
"loss": 2.2999,
"step": 96
},
{
"epoch": 1.5215686274509803,
"grad_norm": 0.4267803430557251,
"learning_rate": 5.344955722021624e-05,
"loss": 2.3227,
"step": 97
},
{
"epoch": 1.5372549019607842,
"grad_norm": 0.4320634603500366,
"learning_rate": 5.258806764421048e-05,
"loss": 2.246,
"step": 98
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.46851688623428345,
"learning_rate": 5.172580694848541e-05,
"loss": 2.3576,
"step": 99
},
{
"epoch": 1.5686274509803921,
"grad_norm": 0.4941369593143463,
"learning_rate": 5.086303204526943e-05,
"loss": 2.3498,
"step": 100
},
{
"epoch": 1.5686274509803921,
"eval_loss": 2.4987945556640625,
"eval_runtime": 8.1314,
"eval_samples_per_second": 13.282,
"eval_steps_per_second": 3.32,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.36553356591104e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}