|
{ |
|
"best_metric": 0.12525896728038788, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 2.018264840182648, |
|
"eval_steps": 25, |
|
"global_step": 110, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0182648401826484, |
|
"grad_norm": 5.516948699951172, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 6.9436, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0182648401826484, |
|
"eval_loss": 2.470696210861206, |
|
"eval_runtime": 3.3615, |
|
"eval_samples_per_second": 14.874, |
|
"eval_steps_per_second": 2.082, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0365296803652968, |
|
"grad_norm": 6.3923020362854, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 8.2395, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0547945205479452, |
|
"grad_norm": 7.493215560913086, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 8.8171, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0730593607305936, |
|
"grad_norm": 6.273920059204102, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 8.226, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.091324200913242, |
|
"grad_norm": 10.592157363891602, |
|
"learning_rate": 0.00015, |
|
"loss": 7.53, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1095890410958904, |
|
"grad_norm": 9.920427322387695, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 7.9559, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1278538812785388, |
|
"grad_norm": 7.29619836807251, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 6.8911, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1461187214611872, |
|
"grad_norm": 7.143266201019287, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 6.2679, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1643835616438356, |
|
"grad_norm": 9.101966857910156, |
|
"learning_rate": 0.00027, |
|
"loss": 5.7364, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.182648401826484, |
|
"grad_norm": 9.976201057434082, |
|
"learning_rate": 0.0003, |
|
"loss": 5.0311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2009132420091324, |
|
"grad_norm": 9.90187931060791, |
|
"learning_rate": 0.0002999259840548597, |
|
"loss": 3.6094, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2191780821917808, |
|
"grad_norm": 14.418813705444336, |
|
"learning_rate": 0.0002997040092642407, |
|
"loss": 2.6951, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2374429223744292, |
|
"grad_norm": 11.6867094039917, |
|
"learning_rate": 0.000299334294690462, |
|
"loss": 1.4153, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2557077625570776, |
|
"grad_norm": 19.670818328857422, |
|
"learning_rate": 0.0002988172051971717, |
|
"loss": 2.9618, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.273972602739726, |
|
"grad_norm": 7.9418044090271, |
|
"learning_rate": 0.00029815325108927063, |
|
"loss": 1.7467, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2922374429223744, |
|
"grad_norm": 4.048381805419922, |
|
"learning_rate": 0.0002973430876093033, |
|
"loss": 1.1227, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3105022831050228, |
|
"grad_norm": 4.352411270141602, |
|
"learning_rate": 0.0002963875142908121, |
|
"loss": 0.887, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3287671232876712, |
|
"grad_norm": 3.2712793350219727, |
|
"learning_rate": 0.00029528747416929463, |
|
"loss": 0.7917, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3470319634703196, |
|
"grad_norm": 3.5658321380615234, |
|
"learning_rate": 0.0002940440528515414, |
|
"loss": 0.8631, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.365296803652968, |
|
"grad_norm": 2.2600252628326416, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.6248, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3835616438356164, |
|
"grad_norm": 3.6815826892852783, |
|
"learning_rate": 0.0002911321153431338, |
|
"loss": 0.7136, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4018264840182648, |
|
"grad_norm": 2.6013550758361816, |
|
"learning_rate": 0.00028946647288323766, |
|
"loss": 0.54, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.4200913242009132, |
|
"grad_norm": 5.8350043296813965, |
|
"learning_rate": 0.00028766319385259713, |
|
"loss": 0.8035, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4383561643835616, |
|
"grad_norm": 29.41618537902832, |
|
"learning_rate": 0.00028572405786990294, |
|
"loss": 1.2182, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.45662100456621, |
|
"grad_norm": 18.41824722290039, |
|
"learning_rate": 0.00028365097862825513, |
|
"loss": 1.031, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.45662100456621, |
|
"eval_loss": 0.21980659663677216, |
|
"eval_runtime": 3.4137, |
|
"eval_samples_per_second": 14.647, |
|
"eval_steps_per_second": 2.051, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4748858447488584, |
|
"grad_norm": 8.772796630859375, |
|
"learning_rate": 0.0002814460020065795, |
|
"loss": 0.8542, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4931506849315068, |
|
"grad_norm": 12.31684398651123, |
|
"learning_rate": 0.0002791113040505915, |
|
"loss": 1.0956, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5114155251141552, |
|
"grad_norm": 5.33599853515625, |
|
"learning_rate": 0.00027664918882530225, |
|
"loss": 0.9314, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5296803652968036, |
|
"grad_norm": 2.71724534034729, |
|
"learning_rate": 0.00027406208614118424, |
|
"loss": 0.715, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.547945205479452, |
|
"grad_norm": 3.253049850463867, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.6381, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5662100456621004, |
|
"grad_norm": 1.2586455345153809, |
|
"learning_rate": 0.00026852325185635354, |
|
"loss": 0.54, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5844748858447488, |
|
"grad_norm": 1.543694257736206, |
|
"learning_rate": 0.00026557698641636835, |
|
"loss": 0.5199, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.6027397260273972, |
|
"grad_norm": 1.3013904094696045, |
|
"learning_rate": 0.0002625166604445689, |
|
"loss": 0.5086, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6210045662100456, |
|
"grad_norm": 1.0573995113372803, |
|
"learning_rate": 0.0002593452941132117, |
|
"loss": 0.4737, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.639269406392694, |
|
"grad_norm": 1.4775699377059937, |
|
"learning_rate": 0.00025606601717798207, |
|
"loss": 0.5643, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6575342465753424, |
|
"grad_norm": 3.0288829803466797, |
|
"learning_rate": 0.0002526820658893033, |
|
"loss": 0.6765, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6757990867579908, |
|
"grad_norm": 3.3613500595092773, |
|
"learning_rate": 0.00024919677979854776, |
|
"loss": 1.127, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6940639269406392, |
|
"grad_norm": 3.1534173488616943, |
|
"learning_rate": 0.0002456135984623034, |
|
"loss": 1.0773, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7123287671232876, |
|
"grad_norm": 3.1169755458831787, |
|
"learning_rate": 0.00024193605804794646, |
|
"loss": 0.7896, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.730593607305936, |
|
"grad_norm": 1.863209843635559, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 0.567, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7488584474885844, |
|
"grad_norm": 1.6382339000701904, |
|
"learning_rate": 0.00023431250667781958, |
|
"loss": 0.6118, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7671232876712328, |
|
"grad_norm": 1.7901109457015991, |
|
"learning_rate": 0.00023037401924684946, |
|
"loss": 0.5655, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7853881278538812, |
|
"grad_norm": 2.449873685836792, |
|
"learning_rate": 0.00022635621236255567, |
|
"loss": 0.5795, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8036529680365296, |
|
"grad_norm": 2.9242568016052246, |
|
"learning_rate": 0.00022226305111525726, |
|
"loss": 0.5427, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.821917808219178, |
|
"grad_norm": 1.5985544919967651, |
|
"learning_rate": 0.00021809857496093199, |
|
"loss": 0.4842, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8401826484018264, |
|
"grad_norm": 1.5843843221664429, |
|
"learning_rate": 0.00021386689373476087, |
|
"loss": 0.5753, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8584474885844748, |
|
"grad_norm": 20.6035213470459, |
|
"learning_rate": 0.00020957218359521706, |
|
"loss": 0.6594, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8767123287671232, |
|
"grad_norm": 2.7331109046936035, |
|
"learning_rate": 0.0002052186829027017, |
|
"loss": 0.6428, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8949771689497716, |
|
"grad_norm": 2.2731661796569824, |
|
"learning_rate": 0.00020081068803679371, |
|
"loss": 0.6238, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.91324200913242, |
|
"grad_norm": 2.2776832580566406, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.6169, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.91324200913242, |
|
"eval_loss": 0.1614273190498352, |
|
"eval_runtime": 3.4091, |
|
"eval_samples_per_second": 14.666, |
|
"eval_steps_per_second": 2.053, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9315068493150684, |
|
"grad_norm": 2.19893741607666, |
|
"learning_rate": 0.00019184866590588439, |
|
"loss": 0.7239, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9497716894977168, |
|
"grad_norm": 2.0520899295806885, |
|
"learning_rate": 0.00018730348307472824, |
|
"loss": 0.6, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9680365296803652, |
|
"grad_norm": 1.9806722402572632, |
|
"learning_rate": 0.0001827214862094814, |
|
"loss": 0.7017, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9863013698630136, |
|
"grad_norm": 3.993856906890869, |
|
"learning_rate": 0.0001781071971878587, |
|
"loss": 0.9841, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.009132420091324, |
|
"grad_norm": 1.4181034564971924, |
|
"learning_rate": 0.00017346516975603462, |
|
"loss": 0.5512, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.0273972602739727, |
|
"grad_norm": 1.4350292682647705, |
|
"learning_rate": 0.00016879998503464561, |
|
"loss": 0.5089, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.045662100456621, |
|
"grad_norm": 4.159196853637695, |
|
"learning_rate": 0.00016411624699777717, |
|
"loss": 0.5528, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0639269406392695, |
|
"grad_norm": 1.3114010095596313, |
|
"learning_rate": 0.000159418577929397, |
|
"loss": 0.4571, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.0821917808219177, |
|
"grad_norm": 1.4728926420211792, |
|
"learning_rate": 0.00015471161386171922, |
|
"loss": 0.5557, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.1004566210045663, |
|
"grad_norm": 1.4644254446029663, |
|
"learning_rate": 0.00015, |
|
"loss": 0.442, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.1187214611872145, |
|
"grad_norm": 1.3919850587844849, |
|
"learning_rate": 0.00014528838613828076, |
|
"loss": 0.5105, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.1369863013698631, |
|
"grad_norm": 1.4340811967849731, |
|
"learning_rate": 0.000140581422070603, |
|
"loss": 0.4391, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.1552511415525113, |
|
"grad_norm": 2.5181379318237305, |
|
"learning_rate": 0.00013588375300222283, |
|
"loss": 0.6491, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.17351598173516, |
|
"grad_norm": 6.064093112945557, |
|
"learning_rate": 0.00013120001496535433, |
|
"loss": 0.8979, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.191780821917808, |
|
"grad_norm": 2.5452799797058105, |
|
"learning_rate": 0.00012653483024396533, |
|
"loss": 0.7953, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.2100456621004567, |
|
"grad_norm": 3.304936647415161, |
|
"learning_rate": 0.00012189280281214126, |
|
"loss": 0.925, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.228310502283105, |
|
"grad_norm": 3.181483745574951, |
|
"learning_rate": 0.00011727851379051865, |
|
"loss": 0.9497, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.2465753424657535, |
|
"grad_norm": 1.3087420463562012, |
|
"learning_rate": 0.0001126965169252718, |
|
"loss": 0.5897, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.2648401826484017, |
|
"grad_norm": 0.7273179888725281, |
|
"learning_rate": 0.00010815133409411562, |
|
"loss": 0.3759, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.2831050228310503, |
|
"grad_norm": 1.104803442955017, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 0.4504, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.3013698630136985, |
|
"grad_norm": 0.9511451125144958, |
|
"learning_rate": 9.918931196320629e-05, |
|
"loss": 0.4821, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.3196347031963471, |
|
"grad_norm": 0.8015011548995972, |
|
"learning_rate": 9.47813170972983e-05, |
|
"loss": 0.5017, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.3378995433789953, |
|
"grad_norm": 0.999840497970581, |
|
"learning_rate": 9.042781640478291e-05, |
|
"loss": 0.5061, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.356164383561644, |
|
"grad_norm": 1.2065320014953613, |
|
"learning_rate": 8.613310626523909e-05, |
|
"loss": 0.5695, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.374429223744292, |
|
"grad_norm": 0.9272903203964233, |
|
"learning_rate": 8.190142503906798e-05, |
|
"loss": 0.5145, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.374429223744292, |
|
"eval_loss": 0.1285880208015442, |
|
"eval_runtime": 3.4101, |
|
"eval_samples_per_second": 14.662, |
|
"eval_steps_per_second": 2.053, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3926940639269407, |
|
"grad_norm": 1.0205509662628174, |
|
"learning_rate": 7.773694888474267e-05, |
|
"loss": 0.558, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.410958904109589, |
|
"grad_norm": 0.7290596961975098, |
|
"learning_rate": 7.364378763744429e-05, |
|
"loss": 0.5247, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.4292237442922375, |
|
"grad_norm": 1.1354948282241821, |
|
"learning_rate": 6.962598075315046e-05, |
|
"loss": 0.5803, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.4474885844748857, |
|
"grad_norm": 1.6320542097091675, |
|
"learning_rate": 6.568749332218044e-05, |
|
"loss": 0.6953, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.4657534246575343, |
|
"grad_norm": 2.266536235809326, |
|
"learning_rate": 6.183221215612904e-05, |
|
"loss": 0.8087, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4840182648401825, |
|
"grad_norm": 1.3406983613967896, |
|
"learning_rate": 5.806394195205356e-05, |
|
"loss": 0.5684, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.5022831050228311, |
|
"grad_norm": 0.8310356736183167, |
|
"learning_rate": 5.4386401537696536e-05, |
|
"loss": 0.3706, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.5205479452054793, |
|
"grad_norm": 0.6109088659286499, |
|
"learning_rate": 5.080322020145224e-05, |
|
"loss": 0.4452, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.538812785388128, |
|
"grad_norm": 0.797944188117981, |
|
"learning_rate": 4.7317934110696685e-05, |
|
"loss": 0.4388, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.5570776255707761, |
|
"grad_norm": 0.48338568210601807, |
|
"learning_rate": 4.3933982822017876e-05, |
|
"loss": 0.4493, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.5753424657534247, |
|
"grad_norm": 0.8341657519340515, |
|
"learning_rate": 4.06547058867883e-05, |
|
"loss": 0.4436, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.593607305936073, |
|
"grad_norm": 0.9702841639518738, |
|
"learning_rate": 3.7483339555431055e-05, |
|
"loss": 0.4903, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.6118721461187215, |
|
"grad_norm": 0.7687260508537292, |
|
"learning_rate": 3.442301358363163e-05, |
|
"loss": 0.4799, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.6301369863013697, |
|
"grad_norm": 0.8309193849563599, |
|
"learning_rate": 3.1476748143646435e-05, |
|
"loss": 0.5022, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.6484018264840183, |
|
"grad_norm": 1.1130985021591187, |
|
"learning_rate": 2.8647450843757897e-05, |
|
"loss": 0.5636, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.946800708770752, |
|
"learning_rate": 2.5937913858815708e-05, |
|
"loss": 0.5647, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.6849315068493151, |
|
"grad_norm": 1.5327059030532837, |
|
"learning_rate": 2.335081117469777e-05, |
|
"loss": 0.6487, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.7031963470319633, |
|
"grad_norm": 1.5970087051391602, |
|
"learning_rate": 2.0888695949408468e-05, |
|
"loss": 0.6578, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.721461187214612, |
|
"grad_norm": 0.8410404920578003, |
|
"learning_rate": 1.8553997993420495e-05, |
|
"loss": 0.4998, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.7397260273972601, |
|
"grad_norm": 0.6193436980247498, |
|
"learning_rate": 1.634902137174483e-05, |
|
"loss": 0.3661, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.7579908675799087, |
|
"grad_norm": 0.6613103747367859, |
|
"learning_rate": 1.4275942130097096e-05, |
|
"loss": 0.4159, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.776255707762557, |
|
"grad_norm": 0.6441118717193604, |
|
"learning_rate": 1.2336806147402828e-05, |
|
"loss": 0.4134, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.7945205479452055, |
|
"grad_norm": 0.4714259207248688, |
|
"learning_rate": 1.0533527116762296e-05, |
|
"loss": 0.4379, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.8127853881278537, |
|
"grad_norm": 0.8632249236106873, |
|
"learning_rate": 8.867884656866181e-06, |
|
"loss": 0.453, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.8310502283105023, |
|
"grad_norm": 0.6782150268554688, |
|
"learning_rate": 7.34152255572697e-06, |
|
"loss": 0.4734, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8310502283105023, |
|
"eval_loss": 0.12525896728038788, |
|
"eval_runtime": 3.4138, |
|
"eval_samples_per_second": 14.646, |
|
"eval_steps_per_second": 2.05, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8493150684931505, |
|
"grad_norm": 0.7789422273635864, |
|
"learning_rate": 5.95594714845854e-06, |
|
"loss": 0.4551, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.8675799086757991, |
|
"grad_norm": 0.9181280732154846, |
|
"learning_rate": 4.712525830705338e-06, |
|
"loss": 0.4778, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.8858447488584473, |
|
"grad_norm": 0.6837835311889648, |
|
"learning_rate": 3.6124857091878845e-06, |
|
"loss": 0.4939, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.904109589041096, |
|
"grad_norm": 0.522508978843689, |
|
"learning_rate": 2.656912390696708e-06, |
|
"loss": 0.5255, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.9223744292237441, |
|
"grad_norm": 1.1073154211044312, |
|
"learning_rate": 1.8467489107293509e-06, |
|
"loss": 0.6029, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.9406392694063928, |
|
"grad_norm": 1.4917099475860596, |
|
"learning_rate": 1.1827948028283352e-06, |
|
"loss": 0.6545, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.958904109589041, |
|
"grad_norm": 0.9225929379463196, |
|
"learning_rate": 6.657053095380005e-07, |
|
"loss": 0.5178, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.9771689497716896, |
|
"grad_norm": 0.7463716864585876, |
|
"learning_rate": 2.959907357592661e-07, |
|
"loss": 0.4391, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.9954337899543377, |
|
"grad_norm": 0.6585250496864319, |
|
"learning_rate": 7.401594514025999e-08, |
|
"loss": 0.5468, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.018264840182648, |
|
"grad_norm": 0.6586726307868958, |
|
"learning_rate": 0.0, |
|
"loss": 0.3368, |
|
"step": 110 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 110, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5274585469485056e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|