mrferr3t's picture
Training in progress, step 99, checkpoint
c9c6e6f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03576266594418857,
"eval_steps": 50,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00036123904994129867,
"grad_norm": 11.258881568908691,
"learning_rate": 5e-05,
"loss": 4.9805,
"step": 1
},
{
"epoch": 0.00036123904994129867,
"eval_loss": 1.3654987812042236,
"eval_runtime": 65.6871,
"eval_samples_per_second": 17.751,
"eval_steps_per_second": 8.875,
"step": 1
},
{
"epoch": 0.0007224780998825973,
"grad_norm": 11.392064094543457,
"learning_rate": 0.0001,
"loss": 5.6516,
"step": 2
},
{
"epoch": 0.001083717149823896,
"grad_norm": 10.067161560058594,
"learning_rate": 0.00015,
"loss": 5.1962,
"step": 3
},
{
"epoch": 0.0014449561997651947,
"grad_norm": 8.352842330932617,
"learning_rate": 0.0002,
"loss": 4.3996,
"step": 4
},
{
"epoch": 0.0018061952497064934,
"grad_norm": 6.142587661743164,
"learning_rate": 0.00025,
"loss": 3.9857,
"step": 5
},
{
"epoch": 0.002167434299647792,
"grad_norm": 12.07518196105957,
"learning_rate": 0.0003,
"loss": 5.3352,
"step": 6
},
{
"epoch": 0.0025286733495890907,
"grad_norm": 7.874082088470459,
"learning_rate": 0.00035,
"loss": 4.9328,
"step": 7
},
{
"epoch": 0.0028899123995303894,
"grad_norm": 4.830751895904541,
"learning_rate": 0.0004,
"loss": 3.4086,
"step": 8
},
{
"epoch": 0.003251151449471688,
"grad_norm": 4.93334436416626,
"learning_rate": 0.00045000000000000004,
"loss": 3.7905,
"step": 9
},
{
"epoch": 0.0036123904994129867,
"grad_norm": 5.134774684906006,
"learning_rate": 0.0005,
"loss": 4.1506,
"step": 10
},
{
"epoch": 0.003973629549354285,
"grad_norm": 5.855077266693115,
"learning_rate": 0.0004998442655654946,
"loss": 3.7257,
"step": 11
},
{
"epoch": 0.004334868599295584,
"grad_norm": 5.537413120269775,
"learning_rate": 0.0004993772562876909,
"loss": 4.3003,
"step": 12
},
{
"epoch": 0.004696107649236883,
"grad_norm": 5.094138145446777,
"learning_rate": 0.0004985995540019955,
"loss": 4.8794,
"step": 13
},
{
"epoch": 0.005057346699178181,
"grad_norm": 4.8982930183410645,
"learning_rate": 0.0004975121276286136,
"loss": 4.6231,
"step": 14
},
{
"epoch": 0.00541858574911948,
"grad_norm": 4.569957733154297,
"learning_rate": 0.0004961163319653958,
"loss": 4.0061,
"step": 15
},
{
"epoch": 0.005779824799060779,
"grad_norm": 7.189935207366943,
"learning_rate": 0.0004944139059999286,
"loss": 5.0499,
"step": 16
},
{
"epoch": 0.006141063849002077,
"grad_norm": 4.611532688140869,
"learning_rate": 0.000492406970742972,
"loss": 3.2843,
"step": 17
},
{
"epoch": 0.006502302898943376,
"grad_norm": 4.90513277053833,
"learning_rate": 0.0004900980265859448,
"loss": 3.4266,
"step": 18
},
{
"epoch": 0.006863541948884675,
"grad_norm": 6.561873435974121,
"learning_rate": 0.0004874899501857477,
"loss": 4.2261,
"step": 19
},
{
"epoch": 0.007224780998825973,
"grad_norm": 11.763420104980469,
"learning_rate": 0.00048458599088080736,
"loss": 3.9256,
"step": 20
},
{
"epoch": 0.007586020048767272,
"grad_norm": 5.913724422454834,
"learning_rate": 0.0004813897666428053,
"loss": 3.4543,
"step": 21
},
{
"epoch": 0.00794725909870857,
"grad_norm": 8.865498542785645,
"learning_rate": 0.00047790525956913543,
"loss": 5.0105,
"step": 22
},
{
"epoch": 0.00830849814864987,
"grad_norm": 6.202934265136719,
"learning_rate": 0.0004741368109217071,
"loss": 4.0654,
"step": 23
},
{
"epoch": 0.008669737198591168,
"grad_norm": 9.578519821166992,
"learning_rate": 0.00047008911571827283,
"loss": 5.3396,
"step": 24
},
{
"epoch": 0.009030976248532467,
"grad_norm": 7.616293430328369,
"learning_rate": 0.00046576721688302105,
"loss": 3.7428,
"step": 25
},
{
"epoch": 0.009392215298473765,
"grad_norm": 5.799994945526123,
"learning_rate": 0.0004611764989637205,
"loss": 4.0877,
"step": 26
},
{
"epoch": 0.009753454348415064,
"grad_norm": 5.58920955657959,
"learning_rate": 0.0004563226814232444,
"loss": 4.2916,
"step": 27
},
{
"epoch": 0.010114693398356363,
"grad_norm": 4.770939826965332,
"learning_rate": 0.0004512118115138315,
"loss": 4.2956,
"step": 28
},
{
"epoch": 0.010475932448297661,
"grad_norm": 4.9692063331604,
"learning_rate": 0.0004458502567429631,
"loss": 3.542,
"step": 29
},
{
"epoch": 0.01083717149823896,
"grad_norm": 4.753838539123535,
"learning_rate": 0.00044024469694024196,
"loss": 3.0264,
"step": 30
},
{
"epoch": 0.011198410548180259,
"grad_norm": 5.1347575187683105,
"learning_rate": 0.00043440211593515554,
"loss": 3.1049,
"step": 31
},
{
"epoch": 0.011559649598121557,
"grad_norm": 5.5637006759643555,
"learning_rate": 0.0004283297928560951,
"loss": 3.8163,
"step": 32
},
{
"epoch": 0.011920888648062856,
"grad_norm": 5.401363849639893,
"learning_rate": 0.0004220352930614672,
"loss": 3.8318,
"step": 33
},
{
"epoch": 0.012282127698004155,
"grad_norm": 5.22984504699707,
"learning_rate": 0.00041552645871420013,
"loss": 3.8126,
"step": 34
},
{
"epoch": 0.012643366747945453,
"grad_norm": 4.754082679748535,
"learning_rate": 0.00040881139901138467,
"loss": 4.0395,
"step": 35
},
{
"epoch": 0.013004605797886752,
"grad_norm": 4.820829391479492,
"learning_rate": 0.00040189848008122475,
"loss": 3.1665,
"step": 36
},
{
"epoch": 0.01336584484782805,
"grad_norm": 3.9806621074676514,
"learning_rate": 0.00039479631455988334,
"loss": 2.9394,
"step": 37
},
{
"epoch": 0.01372708389776935,
"grad_norm": 4.494911193847656,
"learning_rate": 0.0003875137508612103,
"loss": 3.1021,
"step": 38
},
{
"epoch": 0.014088322947710648,
"grad_norm": 4.835848331451416,
"learning_rate": 0.00038005986215272055,
"loss": 3.6138,
"step": 39
},
{
"epoch": 0.014449561997651947,
"grad_norm": 5.597693920135498,
"learning_rate": 0.0003724439350515571,
"loss": 4.5553,
"step": 40
},
{
"epoch": 0.014810801047593245,
"grad_norm": 4.290703773498535,
"learning_rate": 0.0003646754580545226,
"loss": 3.271,
"step": 41
},
{
"epoch": 0.015172040097534544,
"grad_norm": 4.479038238525391,
"learning_rate": 0.000356764109716594,
"loss": 3.1925,
"step": 42
},
{
"epoch": 0.015533279147475843,
"grad_norm": 4.6170654296875,
"learning_rate": 0.00034871974659264783,
"loss": 2.8589,
"step": 43
},
{
"epoch": 0.01589451819741714,
"grad_norm": 5.0329060554504395,
"learning_rate": 0.0003405523909574206,
"loss": 3.1158,
"step": 44
},
{
"epoch": 0.01625575724735844,
"grad_norm": 5.719956398010254,
"learning_rate": 0.0003322722183190025,
"loss": 3.856,
"step": 45
},
{
"epoch": 0.01661699629729974,
"grad_norm": 6.794640064239502,
"learning_rate": 0.0003238895447414211,
"loss": 4.4511,
"step": 46
},
{
"epoch": 0.016978235347241036,
"grad_norm": 3.9683847427368164,
"learning_rate": 0.0003154148139921102,
"loss": 2.8105,
"step": 47
},
{
"epoch": 0.017339474397182336,
"grad_norm": 5.173192024230957,
"learning_rate": 0.00030685858453027663,
"loss": 2.7872,
"step": 48
},
{
"epoch": 0.017700713447123633,
"grad_norm": 3.741673469543457,
"learning_rate": 0.0002982315163523742,
"loss": 2.6865,
"step": 49
},
{
"epoch": 0.018061952497064934,
"grad_norm": 3.9549710750579834,
"learning_rate": 0.000289544357711076,
"loss": 3.0372,
"step": 50
},
{
"epoch": 0.018061952497064934,
"eval_loss": 0.8259496092796326,
"eval_runtime": 66.5192,
"eval_samples_per_second": 17.529,
"eval_steps_per_second": 8.764,
"step": 50
},
{
"epoch": 0.01842319154700623,
"grad_norm": 5.567710876464844,
"learning_rate": 0.0002808079317242896,
"loss": 3.7515,
"step": 51
},
{
"epoch": 0.01878443059694753,
"grad_norm": 7.243776321411133,
"learning_rate": 0.0002720331228909005,
"loss": 2.6164,
"step": 52
},
{
"epoch": 0.019145669646888828,
"grad_norm": 4.8548431396484375,
"learning_rate": 0.00026323086353004075,
"loss": 3.2759,
"step": 53
},
{
"epoch": 0.019506908696830128,
"grad_norm": 3.919628858566284,
"learning_rate": 0.0002544121201607822,
"loss": 2.9809,
"step": 54
},
{
"epoch": 0.019868147746771425,
"grad_norm": 5.447530269622803,
"learning_rate": 0.00024558787983921783,
"loss": 3.5247,
"step": 55
},
{
"epoch": 0.020229386796712726,
"grad_norm": 5.87801456451416,
"learning_rate": 0.0002367691364699592,
"loss": 2.7521,
"step": 56
},
{
"epoch": 0.020590625846654022,
"grad_norm": 4.117478847503662,
"learning_rate": 0.00022796687710909964,
"loss": 3.7246,
"step": 57
},
{
"epoch": 0.020951864896595323,
"grad_norm": 3.548586845397949,
"learning_rate": 0.00021919206827571036,
"loss": 3.4873,
"step": 58
},
{
"epoch": 0.02131310394653662,
"grad_norm": 4.6662702560424805,
"learning_rate": 0.00021045564228892402,
"loss": 3.6514,
"step": 59
},
{
"epoch": 0.02167434299647792,
"grad_norm": 3.70032000541687,
"learning_rate": 0.00020176848364762578,
"loss": 3.4641,
"step": 60
},
{
"epoch": 0.022035582046419217,
"grad_norm": 5.118409633636475,
"learning_rate": 0.00019314141546972343,
"loss": 3.7131,
"step": 61
},
{
"epoch": 0.022396821096360518,
"grad_norm": 5.562695026397705,
"learning_rate": 0.00018458518600788986,
"loss": 3.2622,
"step": 62
},
{
"epoch": 0.022758060146301815,
"grad_norm": 6.675144195556641,
"learning_rate": 0.00017611045525857898,
"loss": 3.6492,
"step": 63
},
{
"epoch": 0.023119299196243115,
"grad_norm": 4.012448310852051,
"learning_rate": 0.0001677277816809975,
"loss": 2.7895,
"step": 64
},
{
"epoch": 0.023480538246184412,
"grad_norm": 3.4179670810699463,
"learning_rate": 0.00015944760904257942,
"loss": 2.6486,
"step": 65
},
{
"epoch": 0.023841777296125712,
"grad_norm": 5.845145225524902,
"learning_rate": 0.0001512802534073522,
"loss": 4.1475,
"step": 66
},
{
"epoch": 0.02420301634606701,
"grad_norm": 3.7966604232788086,
"learning_rate": 0.00014323589028340596,
"loss": 3.6578,
"step": 67
},
{
"epoch": 0.02456425539600831,
"grad_norm": 4.147678852081299,
"learning_rate": 0.00013532454194547733,
"loss": 3.165,
"step": 68
},
{
"epoch": 0.024925494445949607,
"grad_norm": 4.22127628326416,
"learning_rate": 0.00012755606494844294,
"loss": 3.6101,
"step": 69
},
{
"epoch": 0.025286733495890907,
"grad_norm": 3.980015516281128,
"learning_rate": 0.00011994013784727947,
"loss": 2.9809,
"step": 70
},
{
"epoch": 0.025647972545832204,
"grad_norm": 3.8896799087524414,
"learning_rate": 0.00011248624913878966,
"loss": 2.7593,
"step": 71
},
{
"epoch": 0.026009211595773504,
"grad_norm": 3.7454440593719482,
"learning_rate": 0.0001052036854401166,
"loss": 2.7478,
"step": 72
},
{
"epoch": 0.0263704506457148,
"grad_norm": 3.633699417114258,
"learning_rate": 9.810151991877531e-05,
"loss": 3.4857,
"step": 73
},
{
"epoch": 0.0267316896956561,
"grad_norm": 3.71138334274292,
"learning_rate": 9.118860098861537e-05,
"loss": 2.4003,
"step": 74
},
{
"epoch": 0.0270929287455974,
"grad_norm": 3.327180862426758,
"learning_rate": 8.44735412857999e-05,
"loss": 2.5128,
"step": 75
},
{
"epoch": 0.0274541677955387,
"grad_norm": 3.146794080734253,
"learning_rate": 7.79647069385328e-05,
"loss": 2.2593,
"step": 76
},
{
"epoch": 0.027815406845479996,
"grad_norm": 3.6162102222442627,
"learning_rate": 7.167020714390501e-05,
"loss": 2.586,
"step": 77
},
{
"epoch": 0.028176645895421296,
"grad_norm": 3.828462600708008,
"learning_rate": 6.559788406484446e-05,
"loss": 2.3276,
"step": 78
},
{
"epoch": 0.028537884945362593,
"grad_norm": 3.7188730239868164,
"learning_rate": 5.975530305975807e-05,
"loss": 3.0949,
"step": 79
},
{
"epoch": 0.028899123995303894,
"grad_norm": 4.314111709594727,
"learning_rate": 5.414974325703686e-05,
"loss": 2.537,
"step": 80
},
{
"epoch": 0.02926036304524519,
"grad_norm": 4.148589134216309,
"learning_rate": 4.8788188486168616e-05,
"loss": 2.833,
"step": 81
},
{
"epoch": 0.02962160209518649,
"grad_norm": 3.602301836013794,
"learning_rate": 4.367731857675569e-05,
"loss": 2.4284,
"step": 82
},
{
"epoch": 0.029982841145127788,
"grad_norm": 5.501028537750244,
"learning_rate": 3.882350103627952e-05,
"loss": 2.9707,
"step": 83
},
{
"epoch": 0.03034408019506909,
"grad_norm": 4.275007247924805,
"learning_rate": 3.423278311697897e-05,
"loss": 2.373,
"step": 84
},
{
"epoch": 0.030705319245010385,
"grad_norm": 3.906873941421509,
"learning_rate": 2.9910884281727225e-05,
"loss": 3.7784,
"step": 85
},
{
"epoch": 0.031066558294951686,
"grad_norm": 3.9153549671173096,
"learning_rate": 2.586318907829291e-05,
"loss": 3.0894,
"step": 86
},
{
"epoch": 0.03142779734489298,
"grad_norm": 4.450911521911621,
"learning_rate": 2.209474043086457e-05,
"loss": 3.2057,
"step": 87
},
{
"epoch": 0.03178903639483428,
"grad_norm": 3.1464052200317383,
"learning_rate": 1.861023335719475e-05,
"loss": 2.3528,
"step": 88
},
{
"epoch": 0.03215027544477558,
"grad_norm": 3.700227737426758,
"learning_rate": 1.5414009119192633e-05,
"loss": 2.8889,
"step": 89
},
{
"epoch": 0.03251151449471688,
"grad_norm": 3.8259425163269043,
"learning_rate": 1.25100498142523e-05,
"loss": 2.3349,
"step": 90
},
{
"epoch": 0.03287275354465818,
"grad_norm": 4.0107316970825195,
"learning_rate": 9.901973414055187e-06,
"loss": 2.3647,
"step": 91
},
{
"epoch": 0.03323399259459948,
"grad_norm": 3.5053553581237793,
"learning_rate": 7.593029257027956e-06,
"loss": 3.2543,
"step": 92
},
{
"epoch": 0.03359523164454078,
"grad_norm": 3.6908042430877686,
"learning_rate": 5.5860940000714015e-06,
"loss": 2.9373,
"step": 93
},
{
"epoch": 0.03395647069448207,
"grad_norm": 3.165818214416504,
"learning_rate": 3.8836680346041594e-06,
"loss": 2.6173,
"step": 94
},
{
"epoch": 0.03431770974442337,
"grad_norm": 4.046987056732178,
"learning_rate": 2.487872371386424e-06,
"loss": 3.5724,
"step": 95
},
{
"epoch": 0.03467894879436467,
"grad_norm": 4.770341873168945,
"learning_rate": 1.4004459980045125e-06,
"loss": 3.2451,
"step": 96
},
{
"epoch": 0.03504018784430597,
"grad_norm": 3.3098256587982178,
"learning_rate": 6.22743712309054e-07,
"loss": 2.3355,
"step": 97
},
{
"epoch": 0.035401426894247266,
"grad_norm": 3.6350321769714355,
"learning_rate": 1.557344345054501e-07,
"loss": 3.2009,
"step": 98
},
{
"epoch": 0.03576266594418857,
"grad_norm": 4.967100620269775,
"learning_rate": 0.0,
"loss": 3.182,
"step": 99
}
],
"logging_steps": 1,
"max_steps": 99,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 300,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.308168021016576e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}