lesso08's picture
Training in progress, step 500, checkpoint
ca85396 verified
{
"best_metric": 0.875745415687561,
"best_model_checkpoint": "miner_id_24/checkpoint-500",
"epoch": 0.03259133722256624,
"eval_steps": 50,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.518267444513249e-05,
"eval_loss": 2.850663900375366,
"eval_runtime": 443.5942,
"eval_samples_per_second": 14.563,
"eval_steps_per_second": 3.641,
"step": 1
},
{
"epoch": 0.0006518267444513248,
"grad_norm": 3.971789598464966,
"learning_rate": 4.16e-05,
"loss": 2.0815,
"step": 10
},
{
"epoch": 0.0013036534889026496,
"grad_norm": 2.8578591346740723,
"learning_rate": 8.32e-05,
"loss": 1.8557,
"step": 20
},
{
"epoch": 0.0019554802333539745,
"grad_norm": 3.0689802169799805,
"learning_rate": 0.0001248,
"loss": 1.5229,
"step": 30
},
{
"epoch": 0.002607306977805299,
"grad_norm": 4.3151397705078125,
"learning_rate": 0.0001664,
"loss": 1.3099,
"step": 40
},
{
"epoch": 0.0032591337222566243,
"grad_norm": 4.729147434234619,
"learning_rate": 0.000208,
"loss": 1.6873,
"step": 50
},
{
"epoch": 0.0032591337222566243,
"eval_loss": 1.273123025894165,
"eval_runtime": 445.0677,
"eval_samples_per_second": 14.515,
"eval_steps_per_second": 3.629,
"step": 50
},
{
"epoch": 0.003910960466707949,
"grad_norm": 2.3496737480163574,
"learning_rate": 0.0002077466612270217,
"loss": 1.0568,
"step": 60
},
{
"epoch": 0.004562787211159274,
"grad_norm": 2.499488353729248,
"learning_rate": 0.0002069878791491233,
"loss": 1.1359,
"step": 70
},
{
"epoch": 0.005214613955610598,
"grad_norm": 3.1299209594726562,
"learning_rate": 0.00020572735047631578,
"loss": 1.2264,
"step": 80
},
{
"epoch": 0.005866440700061924,
"grad_norm": 3.3837058544158936,
"learning_rate": 0.00020397121637758515,
"loss": 1.2004,
"step": 90
},
{
"epoch": 0.006518267444513249,
"grad_norm": 2.973175287246704,
"learning_rate": 0.00020172803256173445,
"loss": 1.545,
"step": 100
},
{
"epoch": 0.006518267444513249,
"eval_loss": 1.275679111480713,
"eval_runtime": 446.3022,
"eval_samples_per_second": 14.474,
"eval_steps_per_second": 3.619,
"step": 100
},
{
"epoch": 0.007170094188964573,
"grad_norm": 2.3844895362854004,
"learning_rate": 0.00019900872759483047,
"loss": 1.0244,
"step": 110
},
{
"epoch": 0.007821920933415898,
"grad_norm": 2.4352855682373047,
"learning_rate": 0.0001958265496573284,
"loss": 1.1891,
"step": 120
},
{
"epoch": 0.008473747677867223,
"grad_norm": 3.231959819793701,
"learning_rate": 0.00019219700200026827,
"loss": 1.2308,
"step": 130
},
{
"epoch": 0.009125574422318547,
"grad_norm": 2.5154852867126465,
"learning_rate": 0.0001881377674149945,
"loss": 1.2077,
"step": 140
},
{
"epoch": 0.009777401166769872,
"grad_norm": 3.0093817710876465,
"learning_rate": 0.00018366862208437368,
"loss": 1.3988,
"step": 150
},
{
"epoch": 0.009777401166769872,
"eval_loss": 1.1874332427978516,
"eval_runtime": 447.657,
"eval_samples_per_second": 14.431,
"eval_steps_per_second": 3.608,
"step": 150
},
{
"epoch": 0.010429227911221197,
"grad_norm": 2.5995352268218994,
"learning_rate": 0.00017881133923521971,
"loss": 1.0265,
"step": 160
},
{
"epoch": 0.011081054655672521,
"grad_norm": 2.5440680980682373,
"learning_rate": 0.00017358958306132124,
"loss": 0.9651,
"step": 170
},
{
"epoch": 0.011732881400123848,
"grad_norm": 2.6204676628112793,
"learning_rate": 0.00016802879343386844,
"loss": 1.0322,
"step": 180
},
{
"epoch": 0.012384708144575172,
"grad_norm": 2.2899601459503174,
"learning_rate": 0.00016215606196095766,
"loss": 1.1977,
"step": 190
},
{
"epoch": 0.013036534889026497,
"grad_norm": 3.2127535343170166,
"learning_rate": 0.000156,
"loss": 1.6102,
"step": 200
},
{
"epoch": 0.013036534889026497,
"eval_loss": 1.1086701154708862,
"eval_runtime": 446.4333,
"eval_samples_per_second": 14.47,
"eval_steps_per_second": 3.618,
"step": 200
},
{
"epoch": 0.013688361633477822,
"grad_norm": 1.3613508939743042,
"learning_rate": 0.00014959059926606403,
"loss": 0.838,
"step": 210
},
{
"epoch": 0.014340188377929147,
"grad_norm": 1.6068763732910156,
"learning_rate": 0.00014295908571525487,
"loss": 0.9718,
"step": 220
},
{
"epoch": 0.014992015122380471,
"grad_norm": 2.977311849594116,
"learning_rate": 0.00013613776741499452,
"loss": 1.0673,
"step": 230
},
{
"epoch": 0.015643841866831796,
"grad_norm": 2.3362767696380615,
"learning_rate": 0.00012915987714236542,
"loss": 1.1417,
"step": 240
},
{
"epoch": 0.01629566861128312,
"grad_norm": 5.354211807250977,
"learning_rate": 0.00012205941047736077,
"loss": 1.3597,
"step": 250
},
{
"epoch": 0.01629566861128312,
"eval_loss": 1.0507456064224243,
"eval_runtime": 446.5699,
"eval_samples_per_second": 14.466,
"eval_steps_per_second": 3.616,
"step": 250
},
{
"epoch": 0.016947495355734445,
"grad_norm": 2.0789878368377686,
"learning_rate": 0.00011487096017983597,
"loss": 0.8857,
"step": 260
},
{
"epoch": 0.01759932210018577,
"grad_norm": 1.4197766780853271,
"learning_rate": 0.00010762954765706012,
"loss": 0.903,
"step": 270
},
{
"epoch": 0.018251148844637095,
"grad_norm": 2.561424732208252,
"learning_rate": 0.00010037045234293992,
"loss": 0.9548,
"step": 280
},
{
"epoch": 0.01890297558908842,
"grad_norm": 1.98018217086792,
"learning_rate": 9.312903982016405e-05,
"loss": 1.1046,
"step": 290
},
{
"epoch": 0.019554802333539744,
"grad_norm": 5.7000837326049805,
"learning_rate": 8.594058952263925e-05,
"loss": 1.3285,
"step": 300
},
{
"epoch": 0.019554802333539744,
"eval_loss": 0.9808639287948608,
"eval_runtime": 446.8663,
"eval_samples_per_second": 14.456,
"eval_steps_per_second": 3.614,
"step": 300
},
{
"epoch": 0.02020662907799107,
"grad_norm": 1.3622760772705078,
"learning_rate": 7.884012285763457e-05,
"loss": 0.7478,
"step": 310
},
{
"epoch": 0.020858455822442393,
"grad_norm": 1.826834797859192,
"learning_rate": 7.186223258500548e-05,
"loss": 0.8517,
"step": 320
},
{
"epoch": 0.021510282566893718,
"grad_norm": 1.2498234510421753,
"learning_rate": 6.504091428474514e-05,
"loss": 1.0577,
"step": 330
},
{
"epoch": 0.022162109311345043,
"grad_norm": 2.1019372940063477,
"learning_rate": 5.840940073393593e-05,
"loss": 0.8884,
"step": 340
},
{
"epoch": 0.02281393605579637,
"grad_norm": 2.712697744369507,
"learning_rate": 5.200000000000002e-05,
"loss": 1.0915,
"step": 350
},
{
"epoch": 0.02281393605579637,
"eval_loss": 0.9416553974151611,
"eval_runtime": 449.3829,
"eval_samples_per_second": 14.375,
"eval_steps_per_second": 3.594,
"step": 350
},
{
"epoch": 0.023465762800247696,
"grad_norm": 1.5010788440704346,
"learning_rate": 4.5843938039042344e-05,
"loss": 0.829,
"step": 360
},
{
"epoch": 0.02411758954469902,
"grad_norm": 3.3226091861724854,
"learning_rate": 3.997120656613154e-05,
"loss": 0.9773,
"step": 370
},
{
"epoch": 0.024769416289150345,
"grad_norm": 1.6384238004684448,
"learning_rate": 3.441041693867878e-05,
"loss": 0.8725,
"step": 380
},
{
"epoch": 0.02542124303360167,
"grad_norm": 2.352992534637451,
"learning_rate": 2.9188660764780296e-05,
"loss": 0.9907,
"step": 390
},
{
"epoch": 0.026073069778052994,
"grad_norm": 3.1694495677948,
"learning_rate": 2.4331377915626298e-05,
"loss": 1.2449,
"step": 400
},
{
"epoch": 0.026073069778052994,
"eval_loss": 0.8955165147781372,
"eval_runtime": 446.7108,
"eval_samples_per_second": 14.461,
"eval_steps_per_second": 3.615,
"step": 400
},
{
"epoch": 0.02672489652250432,
"grad_norm": 2.0528738498687744,
"learning_rate": 1.9862232585005475e-05,
"loss": 0.7577,
"step": 410
},
{
"epoch": 0.027376723266955644,
"grad_norm": 1.3070178031921387,
"learning_rate": 1.58029979997317e-05,
"loss": 0.7595,
"step": 420
},
{
"epoch": 0.02802855001140697,
"grad_norm": 2.6197099685668945,
"learning_rate": 1.2173450342671593e-05,
"loss": 0.7412,
"step": 430
},
{
"epoch": 0.028680376755858293,
"grad_norm": 2.77477765083313,
"learning_rate": 8.991272405169498e-06,
"loss": 0.9327,
"step": 440
},
{
"epoch": 0.029332203500309618,
"grad_norm": 4.5697340965271,
"learning_rate": 6.271967438265535e-06,
"loss": 1.0734,
"step": 450
},
{
"epoch": 0.029332203500309618,
"eval_loss": 0.876220166683197,
"eval_runtime": 443.5926,
"eval_samples_per_second": 14.563,
"eval_steps_per_second": 3.641,
"step": 450
},
{
"epoch": 0.029984030244760942,
"grad_norm": 2.2378225326538086,
"learning_rate": 4.028783622414835e-06,
"loss": 0.6365,
"step": 460
},
{
"epoch": 0.030635856989212267,
"grad_norm": 2.819683074951172,
"learning_rate": 2.272649523684208e-06,
"loss": 0.8311,
"step": 470
},
{
"epoch": 0.03128768373366359,
"grad_norm": 1.4101600646972656,
"learning_rate": 1.0121208508766823e-06,
"loss": 0.9517,
"step": 480
},
{
"epoch": 0.03193951047811492,
"grad_norm": 1.9987350702285767,
"learning_rate": 2.533387729782834e-07,
"loss": 0.8854,
"step": 490
},
{
"epoch": 0.03259133722256624,
"grad_norm": 5.414979934692383,
"learning_rate": 0.0,
"loss": 1.0317,
"step": 500
},
{
"epoch": 0.03259133722256624,
"eval_loss": 0.875745415687561,
"eval_runtime": 444.3276,
"eval_samples_per_second": 14.539,
"eval_steps_per_second": 3.635,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0400815009234944e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}