cilooor's picture
Training in progress, step 50, checkpoint
1a4fd6e verified
raw
history blame
9.94 kB
{
"best_metric": 0.4549524486064911,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.0392156862745098,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000784313725490196,
"grad_norm": 12.951228141784668,
"learning_rate": 9e-06,
"loss": 4.2676,
"step": 1
},
{
"epoch": 0.000784313725490196,
"eval_loss": 1.2241708040237427,
"eval_runtime": 170.1958,
"eval_samples_per_second": 12.621,
"eval_steps_per_second": 3.155,
"step": 1
},
{
"epoch": 0.001568627450980392,
"grad_norm": 15.065213203430176,
"learning_rate": 1.8e-05,
"loss": 2.7549,
"step": 2
},
{
"epoch": 0.002352941176470588,
"grad_norm": 13.755888938903809,
"learning_rate": 2.7000000000000002e-05,
"loss": 2.7658,
"step": 3
},
{
"epoch": 0.003137254901960784,
"grad_norm": 11.931817054748535,
"learning_rate": 3.6e-05,
"loss": 2.2277,
"step": 4
},
{
"epoch": 0.00392156862745098,
"grad_norm": 8.542208671569824,
"learning_rate": 4.5e-05,
"loss": 1.8974,
"step": 5
},
{
"epoch": 0.004705882352941176,
"grad_norm": 7.989168643951416,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.0913,
"step": 6
},
{
"epoch": 0.005490196078431373,
"grad_norm": 6.630977630615234,
"learning_rate": 6.3e-05,
"loss": 1.6821,
"step": 7
},
{
"epoch": 0.006274509803921568,
"grad_norm": 6.496756553649902,
"learning_rate": 7.2e-05,
"loss": 1.6665,
"step": 8
},
{
"epoch": 0.007058823529411765,
"grad_norm": 7.982497215270996,
"learning_rate": 8.1e-05,
"loss": 2.0694,
"step": 9
},
{
"epoch": 0.00784313725490196,
"grad_norm": 6.226460933685303,
"learning_rate": 9e-05,
"loss": 1.6821,
"step": 10
},
{
"epoch": 0.008627450980392156,
"grad_norm": 6.819662570953369,
"learning_rate": 8.999384872466111e-05,
"loss": 1.73,
"step": 11
},
{
"epoch": 0.009411764705882352,
"grad_norm": 8.317466735839844,
"learning_rate": 8.997539658034168e-05,
"loss": 1.7702,
"step": 12
},
{
"epoch": 0.01019607843137255,
"grad_norm": 7.111891269683838,
"learning_rate": 8.994464861167372e-05,
"loss": 1.6366,
"step": 13
},
{
"epoch": 0.010980392156862745,
"grad_norm": 7.476611614227295,
"learning_rate": 8.990161322484486e-05,
"loss": 1.6355,
"step": 14
},
{
"epoch": 0.011764705882352941,
"grad_norm": 6.031744480133057,
"learning_rate": 8.984630218530015e-05,
"loss": 1.4807,
"step": 15
},
{
"epoch": 0.012549019607843137,
"grad_norm": 6.812473773956299,
"learning_rate": 8.977873061452552e-05,
"loss": 1.6878,
"step": 16
},
{
"epoch": 0.013333333333333334,
"grad_norm": 8.05068588256836,
"learning_rate": 8.969891698591372e-05,
"loss": 1.828,
"step": 17
},
{
"epoch": 0.01411764705882353,
"grad_norm": 12.39442253112793,
"learning_rate": 8.96068831197139e-05,
"loss": 1.5053,
"step": 18
},
{
"epoch": 0.014901960784313726,
"grad_norm": 10.175419807434082,
"learning_rate": 8.950265417706609e-05,
"loss": 1.7489,
"step": 19
},
{
"epoch": 0.01568627450980392,
"grad_norm": 8.073866844177246,
"learning_rate": 8.938625865312251e-05,
"loss": 1.9391,
"step": 20
},
{
"epoch": 0.01647058823529412,
"grad_norm": 7.202723026275635,
"learning_rate": 8.925772836925722e-05,
"loss": 1.3958,
"step": 21
},
{
"epoch": 0.017254901960784313,
"grad_norm": 8.959539413452148,
"learning_rate": 8.911709846436643e-05,
"loss": 1.6471,
"step": 22
},
{
"epoch": 0.01803921568627451,
"grad_norm": 9.780714988708496,
"learning_rate": 8.896440738526198e-05,
"loss": 1.3311,
"step": 23
},
{
"epoch": 0.018823529411764704,
"grad_norm": 9.363273620605469,
"learning_rate": 8.879969687616027e-05,
"loss": 1.5693,
"step": 24
},
{
"epoch": 0.0196078431372549,
"grad_norm": 11.106522560119629,
"learning_rate": 8.862301196726988e-05,
"loss": 1.3139,
"step": 25
},
{
"epoch": 0.0203921568627451,
"grad_norm": 9.613574028015137,
"learning_rate": 8.84344009624807e-05,
"loss": 1.0847,
"step": 26
},
{
"epoch": 0.021176470588235293,
"grad_norm": 9.658738136291504,
"learning_rate": 8.823391542615818e-05,
"loss": 1.432,
"step": 27
},
{
"epoch": 0.02196078431372549,
"grad_norm": 7.206670761108398,
"learning_rate": 8.80216101690461e-05,
"loss": 1.0293,
"step": 28
},
{
"epoch": 0.022745098039215685,
"grad_norm": 11.575990676879883,
"learning_rate": 8.779754323328193e-05,
"loss": 1.3784,
"step": 29
},
{
"epoch": 0.023529411764705882,
"grad_norm": 9.105690956115723,
"learning_rate": 8.756177587652856e-05,
"loss": 0.9299,
"step": 30
},
{
"epoch": 0.02431372549019608,
"grad_norm": 11.146190643310547,
"learning_rate": 8.731437255522727e-05,
"loss": 1.127,
"step": 31
},
{
"epoch": 0.025098039215686273,
"grad_norm": 13.225046157836914,
"learning_rate": 8.705540090697575e-05,
"loss": 1.3228,
"step": 32
},
{
"epoch": 0.02588235294117647,
"grad_norm": 12.464346885681152,
"learning_rate": 8.678493173203682e-05,
"loss": 1.9641,
"step": 33
},
{
"epoch": 0.02666666666666667,
"grad_norm": 10.996861457824707,
"learning_rate": 8.650303897398232e-05,
"loss": 1.6458,
"step": 34
},
{
"epoch": 0.027450980392156862,
"grad_norm": 11.745558738708496,
"learning_rate": 8.620979969947759e-05,
"loss": 1.6805,
"step": 35
},
{
"epoch": 0.02823529411764706,
"grad_norm": 10.623291015625,
"learning_rate": 8.590529407721231e-05,
"loss": 1.594,
"step": 36
},
{
"epoch": 0.029019607843137254,
"grad_norm": 9.937420845031738,
"learning_rate": 8.558960535598317e-05,
"loss": 1.4287,
"step": 37
},
{
"epoch": 0.02980392156862745,
"grad_norm": 12.139042854309082,
"learning_rate": 8.526281984193436e-05,
"loss": 1.7026,
"step": 38
},
{
"epoch": 0.03058823529411765,
"grad_norm": 12.166768074035645,
"learning_rate": 8.492502687496253e-05,
"loss": 2.1068,
"step": 39
},
{
"epoch": 0.03137254901960784,
"grad_norm": 12.726622581481934,
"learning_rate": 8.4576318804292e-05,
"loss": 1.583,
"step": 40
},
{
"epoch": 0.03215686274509804,
"grad_norm": 21.52069854736328,
"learning_rate": 8.421679096322747e-05,
"loss": 1.7145,
"step": 41
},
{
"epoch": 0.03294117647058824,
"grad_norm": 13.835341453552246,
"learning_rate": 8.384654164309084e-05,
"loss": 2.445,
"step": 42
},
{
"epoch": 0.03372549019607843,
"grad_norm": 16.390380859375,
"learning_rate": 8.346567206634926e-05,
"loss": 1.6284,
"step": 43
},
{
"epoch": 0.034509803921568626,
"grad_norm": 19.605005264282227,
"learning_rate": 8.307428635894209e-05,
"loss": 2.1787,
"step": 44
},
{
"epoch": 0.03529411764705882,
"grad_norm": 12.839860916137695,
"learning_rate": 8.26724915218138e-05,
"loss": 1.6432,
"step": 45
},
{
"epoch": 0.03607843137254902,
"grad_norm": 17.19524383544922,
"learning_rate": 8.226039740166091e-05,
"loss": 2.6786,
"step": 46
},
{
"epoch": 0.03686274509803922,
"grad_norm": 22.435400009155273,
"learning_rate": 8.183811666090117e-05,
"loss": 2.043,
"step": 47
},
{
"epoch": 0.03764705882352941,
"grad_norm": 52.507537841796875,
"learning_rate": 8.140576474687264e-05,
"loss": 2.563,
"step": 48
},
{
"epoch": 0.038431372549019606,
"grad_norm": 33.50967025756836,
"learning_rate": 8.096345986027161e-05,
"loss": 2.8917,
"step": 49
},
{
"epoch": 0.0392156862745098,
"grad_norm": 47.0520133972168,
"learning_rate": 8.051132292283772e-05,
"loss": 3.6867,
"step": 50
},
{
"epoch": 0.0392156862745098,
"eval_loss": 0.4549524486064911,
"eval_runtime": 171.8457,
"eval_samples_per_second": 12.5,
"eval_steps_per_second": 3.125,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.548522993392026e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}