|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9997382884061764, |
|
"eval_steps": 500, |
|
"global_step": 1910, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005234231876472127, |
|
"grad_norm": 165.0, |
|
"learning_rate": 1.0416666666666667e-06, |
|
"logits/chosen": -0.5391381978988647, |
|
"logits/rejected": -0.5323623418807983, |
|
"logps/chosen": -304.4400329589844, |
|
"logps/rejected": -272.86676025390625, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.3812499940395355, |
|
"rewards/chosen": -0.0019484901567921042, |
|
"rewards/margins": -0.0004195678629912436, |
|
"rewards/rejected": -0.0015289222355931997, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010468463752944255, |
|
"grad_norm": 160.0, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"logits/chosen": -0.4907230734825134, |
|
"logits/rejected": -0.4778883457183838, |
|
"logps/chosen": -327.8752136230469, |
|
"logps/rejected": -299.05804443359375, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.0012482209131121635, |
|
"rewards/margins": 0.003201314015313983, |
|
"rewards/rejected": -0.004449534695595503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.015702695629416383, |
|
"grad_norm": 180.0, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": -0.4592881202697754, |
|
"logits/rejected": -0.4383363723754883, |
|
"logps/chosen": -324.7940979003906, |
|
"logps/rejected": -300.43804931640625, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.5093749761581421, |
|
"rewards/chosen": 0.00028852271498180926, |
|
"rewards/margins": 0.0034250388853251934, |
|
"rewards/rejected": -0.0031365156173706055, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02093692750588851, |
|
"grad_norm": 169.0, |
|
"learning_rate": 4.166666666666667e-06, |
|
"logits/chosen": -0.4992567002773285, |
|
"logits/rejected": -0.48144620656967163, |
|
"logps/chosen": -293.142333984375, |
|
"logps/rejected": -259.19207763671875, |
|
"loss": 0.6902, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.001205235836096108, |
|
"rewards/margins": 0.0066679357551038265, |
|
"rewards/rejected": -0.0078731719404459, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02617115938236064, |
|
"grad_norm": 152.0, |
|
"learning_rate": 5.208333333333334e-06, |
|
"logits/chosen": -0.5911587476730347, |
|
"logits/rejected": -0.5686948895454407, |
|
"logps/chosen": -294.60107421875, |
|
"logps/rejected": -268.81158447265625, |
|
"loss": 0.6846, |
|
"rewards/accuracies": 0.590624988079071, |
|
"rewards/chosen": 0.0019417991861701012, |
|
"rewards/margins": 0.01833486184477806, |
|
"rewards/rejected": -0.016393061727285385, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.031405391258832765, |
|
"grad_norm": 158.0, |
|
"learning_rate": 6.25e-06, |
|
"logits/chosen": -0.5607722997665405, |
|
"logits/rejected": -0.4761945605278015, |
|
"logps/chosen": -309.49395751953125, |
|
"logps/rejected": -276.85699462890625, |
|
"loss": 0.6775, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": 0.00560817401856184, |
|
"rewards/margins": 0.03384781628847122, |
|
"rewards/rejected": -0.028239641338586807, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.036639623135304895, |
|
"grad_norm": 144.0, |
|
"learning_rate": 7.291666666666667e-06, |
|
"logits/chosen": -0.5228131413459778, |
|
"logits/rejected": -0.4508543014526367, |
|
"logps/chosen": -294.2858581542969, |
|
"logps/rejected": -290.7100524902344, |
|
"loss": 0.6704, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.001448362017981708, |
|
"rewards/margins": 0.05140659213066101, |
|
"rewards/rejected": -0.049958229064941406, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.04187385501177702, |
|
"grad_norm": 139.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"logits/chosen": -0.5402103066444397, |
|
"logits/rejected": -0.4056556820869446, |
|
"logps/chosen": -282.7431945800781, |
|
"logps/rejected": -259.76861572265625, |
|
"loss": 0.663, |
|
"rewards/accuracies": 0.640625, |
|
"rewards/chosen": -0.007740585599094629, |
|
"rewards/margins": 0.07010926306247711, |
|
"rewards/rejected": -0.0778498649597168, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.04710808688824915, |
|
"grad_norm": 152.0, |
|
"learning_rate": 9.375000000000001e-06, |
|
"logits/chosen": -0.5043436884880066, |
|
"logits/rejected": -0.4741402268409729, |
|
"logps/chosen": -300.0230407714844, |
|
"logps/rejected": -258.2001037597656, |
|
"loss": 0.6516, |
|
"rewards/accuracies": 0.640625, |
|
"rewards/chosen": -0.013870301656425, |
|
"rewards/margins": 0.10374711453914642, |
|
"rewards/rejected": -0.11761742830276489, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.05234231876472128, |
|
"grad_norm": 141.0, |
|
"learning_rate": 9.999880027023295e-06, |
|
"logits/chosen": -0.6226400136947632, |
|
"logits/rejected": -0.5647040009498596, |
|
"logps/chosen": -299.897216796875, |
|
"logps/rejected": -277.0999755859375, |
|
"loss": 0.6251, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.03077390417456627, |
|
"rewards/margins": 0.17102494835853577, |
|
"rewards/rejected": -0.20179884135723114, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05757655064119341, |
|
"grad_norm": 127.5, |
|
"learning_rate": 9.998530397154684e-06, |
|
"logits/chosen": -0.6127622127532959, |
|
"logits/rejected": -0.39612576365470886, |
|
"logps/chosen": -295.1545104980469, |
|
"logps/rejected": -289.87548828125, |
|
"loss": 0.6416, |
|
"rewards/accuracies": 0.628125011920929, |
|
"rewards/chosen": -0.04563731700181961, |
|
"rewards/margins": 0.15100470185279846, |
|
"rewards/rejected": -0.19664199650287628, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.06281078251766553, |
|
"grad_norm": 139.0, |
|
"learning_rate": 9.995681577335256e-06, |
|
"logits/chosen": -0.6091737747192383, |
|
"logits/rejected": -0.458709716796875, |
|
"logps/chosen": -304.33416748046875, |
|
"logps/rejected": -294.13800048828125, |
|
"loss": 0.6044, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.05064667388796806, |
|
"rewards/margins": 0.25388482213020325, |
|
"rewards/rejected": -0.3045315146446228, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.06804501439413765, |
|
"grad_norm": 192.0, |
|
"learning_rate": 9.99133442200056e-06, |
|
"logits/chosen": -0.6719385981559753, |
|
"logits/rejected": -0.5869318246841431, |
|
"logps/chosen": -319.9844055175781, |
|
"logps/rejected": -284.01251220703125, |
|
"loss": 0.6128, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.07997078448534012, |
|
"rewards/margins": 0.2446589469909668, |
|
"rewards/rejected": -0.3246297240257263, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.07327924627060979, |
|
"grad_norm": 215.0, |
|
"learning_rate": 9.985490234976132e-06, |
|
"logits/chosen": -0.6441094279289246, |
|
"logits/rejected": -0.6027123332023621, |
|
"logps/chosen": -309.62957763671875, |
|
"logps/rejected": -265.69049072265625, |
|
"loss": 0.5984, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.12067105621099472, |
|
"rewards/margins": 0.3122618794441223, |
|
"rewards/rejected": -0.43293294310569763, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.07851347814708191, |
|
"grad_norm": 139.0, |
|
"learning_rate": 9.978150769086457e-06, |
|
"logits/chosen": -0.6877347230911255, |
|
"logits/rejected": -0.667587399482727, |
|
"logps/chosen": -310.8448791503906, |
|
"logps/rejected": -281.82666015625, |
|
"loss": 0.6184, |
|
"rewards/accuracies": 0.6468750238418579, |
|
"rewards/chosen": -0.21131880581378937, |
|
"rewards/margins": 0.2875850796699524, |
|
"rewards/rejected": -0.4989038407802582, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.08374771002355404, |
|
"grad_norm": 159.0, |
|
"learning_rate": 9.96931822562924e-06, |
|
"logits/chosen": -0.6773428320884705, |
|
"logits/rejected": -0.6160544157028198, |
|
"logps/chosen": -321.5206604003906, |
|
"logps/rejected": -316.896240234375, |
|
"loss": 0.6165, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.24355646967887878, |
|
"rewards/margins": 0.2874658405780792, |
|
"rewards/rejected": -0.531022310256958, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.08898194190002617, |
|
"grad_norm": 150.0, |
|
"learning_rate": 9.958995253715193e-06, |
|
"logits/chosen": -0.637015163898468, |
|
"logits/rejected": -0.6120925545692444, |
|
"logps/chosen": -328.5833740234375, |
|
"logps/rejected": -293.30133056640625, |
|
"loss": 0.6249, |
|
"rewards/accuracies": 0.653124988079071, |
|
"rewards/chosen": -0.20336680114269257, |
|
"rewards/margins": 0.2829468250274658, |
|
"rewards/rejected": -0.4863136410713196, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0942161737764983, |
|
"grad_norm": 137.0, |
|
"learning_rate": 9.947184949473478e-06, |
|
"logits/chosen": -0.7623378038406372, |
|
"logits/rejected": -0.6125292778015137, |
|
"logps/chosen": -309.94439697265625, |
|
"logps/rejected": -275.16351318359375, |
|
"loss": 0.5756, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.17442843317985535, |
|
"rewards/margins": 0.3826545476913452, |
|
"rewards/rejected": -0.557083010673523, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.09945040565297043, |
|
"grad_norm": 179.0, |
|
"learning_rate": 9.933890855123114e-06, |
|
"logits/chosen": -0.7736495137214661, |
|
"logits/rejected": -0.7243441939353943, |
|
"logps/chosen": -332.80279541015625, |
|
"logps/rejected": -323.1473083496094, |
|
"loss": 0.608, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.24678723514080048, |
|
"rewards/margins": 0.3317185640335083, |
|
"rewards/rejected": -0.5785057544708252, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"grad_norm": 177.0, |
|
"learning_rate": 9.919116957910566e-06, |
|
"logits/chosen": -0.7762905359268188, |
|
"logits/rejected": -0.7918479442596436, |
|
"logps/chosen": -313.58282470703125, |
|
"logps/rejected": -263.818603515625, |
|
"loss": 0.5988, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.23899522423744202, |
|
"rewards/margins": 0.34633320569992065, |
|
"rewards/rejected": -0.5853284597396851, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10991886940591468, |
|
"grad_norm": 140.0, |
|
"learning_rate": 9.902867688913869e-06, |
|
"logits/chosen": -0.7035849690437317, |
|
"logits/rejected": -0.6396470069885254, |
|
"logps/chosen": -325.70037841796875, |
|
"logps/rejected": -289.4329528808594, |
|
"loss": 0.5791, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.24025292694568634, |
|
"rewards/margins": 0.3910112977027893, |
|
"rewards/rejected": -0.6312642097473145, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.11515310128238682, |
|
"grad_norm": 167.0, |
|
"learning_rate": 9.885147921713621e-06, |
|
"logits/chosen": -0.8380182385444641, |
|
"logits/rejected": -0.7782719731330872, |
|
"logps/chosen": -304.9471740722656, |
|
"logps/rejected": -291.9976501464844, |
|
"loss": 0.5809, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.2873040437698364, |
|
"rewards/margins": 0.39733776450157166, |
|
"rewards/rejected": -0.6846417784690857, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.12038733315885894, |
|
"grad_norm": 145.0, |
|
"learning_rate": 9.865962970931287e-06, |
|
"logits/chosen": -0.6981593370437622, |
|
"logits/rejected": -0.6939986944198608, |
|
"logps/chosen": -330.7546081542969, |
|
"logps/rejected": -293.80291748046875, |
|
"loss": 0.5871, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.33381906151771545, |
|
"rewards/margins": 0.39481496810913086, |
|
"rewards/rejected": -0.7286339998245239, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.12562156503533106, |
|
"grad_norm": 132.0, |
|
"learning_rate": 9.845318590635186e-06, |
|
"logits/chosen": -0.7120614647865295, |
|
"logits/rejected": -0.6218434572219849, |
|
"logps/chosen": -322.8934020996094, |
|
"logps/rejected": -290.45330810546875, |
|
"loss": 0.5698, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.3164657652378082, |
|
"rewards/margins": 0.4521929621696472, |
|
"rewards/rejected": -0.7686586976051331, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.13085579691180318, |
|
"grad_norm": 152.0, |
|
"learning_rate": 9.823220972614712e-06, |
|
"logits/chosen": -0.7498207688331604, |
|
"logits/rejected": -0.6882480382919312, |
|
"logps/chosen": -337.97503662109375, |
|
"logps/rejected": -276.526611328125, |
|
"loss": 0.6041, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.3191986083984375, |
|
"rewards/margins": 0.3876217007637024, |
|
"rewards/rejected": -0.7068203091621399, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.1360900287882753, |
|
"grad_norm": 168.0, |
|
"learning_rate": 9.79967674452324e-06, |
|
"logits/chosen": -0.7126627564430237, |
|
"logits/rejected": -0.6269676089286804, |
|
"logps/chosen": -317.61053466796875, |
|
"logps/rejected": -312.0860595703125, |
|
"loss": 0.5864, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.27643534541130066, |
|
"rewards/margins": 0.45011717081069946, |
|
"rewards/rejected": -0.726552426815033, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.14132426066474746, |
|
"grad_norm": 170.0, |
|
"learning_rate": 9.774692967890332e-06, |
|
"logits/chosen": -0.7879582643508911, |
|
"logits/rejected": -0.7155620455741882, |
|
"logps/chosen": -326.31536865234375, |
|
"logps/rejected": -300.6862487792969, |
|
"loss": 0.5801, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.22632746398448944, |
|
"rewards/margins": 0.436007022857666, |
|
"rewards/rejected": -0.6623345613479614, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.14655849254121958, |
|
"grad_norm": 169.0, |
|
"learning_rate": 9.74827713600379e-06, |
|
"logits/chosen": -0.6809985041618347, |
|
"logits/rejected": -0.6144469976425171, |
|
"logps/chosen": -285.4235534667969, |
|
"logps/rejected": -263.20086669921875, |
|
"loss": 0.6344, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.33996862173080444, |
|
"rewards/margins": 0.2895575165748596, |
|
"rewards/rejected": -0.6295260787010193, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.1517927244176917, |
|
"grad_norm": 138.0, |
|
"learning_rate": 9.720437171662232e-06, |
|
"logits/chosen": -0.7260808944702148, |
|
"logits/rejected": -0.6358948945999146, |
|
"logps/chosen": -310.652587890625, |
|
"logps/rejected": -296.210693359375, |
|
"loss": 0.5904, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.260017067193985, |
|
"rewards/margins": 0.38266319036483765, |
|
"rewards/rejected": -0.642680287361145, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.15702695629416383, |
|
"grad_norm": 140.0, |
|
"learning_rate": 9.691181424798825e-06, |
|
"logits/chosen": -0.7846163511276245, |
|
"logits/rejected": -0.8230497241020203, |
|
"logps/chosen": -295.07891845703125, |
|
"logps/rejected": -277.966064453125, |
|
"loss": 0.5818, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.22248828411102295, |
|
"rewards/margins": 0.3829471468925476, |
|
"rewards/rejected": -0.6054354310035706, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.16226118817063595, |
|
"grad_norm": 134.0, |
|
"learning_rate": 9.660518669976936e-06, |
|
"logits/chosen": -0.7545520663261414, |
|
"logits/rejected": -0.7427884340286255, |
|
"logps/chosen": -323.29278564453125, |
|
"logps/rejected": -285.1950988769531, |
|
"loss": 0.5776, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.23153206706047058, |
|
"rewards/margins": 0.4093509316444397, |
|
"rewards/rejected": -0.6408829689025879, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.16749542004710807, |
|
"grad_norm": 147.0, |
|
"learning_rate": 9.628458103758403e-06, |
|
"logits/chosen": -0.8146417737007141, |
|
"logits/rejected": -0.732585072517395, |
|
"logps/chosen": -337.4986877441406, |
|
"logps/rejected": -308.1067199707031, |
|
"loss": 0.5689, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.28897982835769653, |
|
"rewards/margins": 0.42633509635925293, |
|
"rewards/rejected": -0.7153149247169495, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.17272965192358022, |
|
"grad_norm": 152.0, |
|
"learning_rate": 9.595009341945246e-06, |
|
"logits/chosen": -0.8427789807319641, |
|
"logits/rejected": -0.8916895985603333, |
|
"logps/chosen": -296.01397705078125, |
|
"logps/rejected": -289.39495849609375, |
|
"loss": 0.6047, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.36485785245895386, |
|
"rewards/margins": 0.3738064169883728, |
|
"rewards/rejected": -0.7386642694473267, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.17796388380005235, |
|
"grad_norm": 162.0, |
|
"learning_rate": 9.560182416695639e-06, |
|
"logits/chosen": -0.850921630859375, |
|
"logits/rejected": -0.8088645935058594, |
|
"logps/chosen": -301.05859375, |
|
"logps/rejected": -300.81011962890625, |
|
"loss": 0.5755, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.3835056722164154, |
|
"rewards/margins": 0.4628046154975891, |
|
"rewards/rejected": -0.8463103175163269, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.18319811567652447, |
|
"grad_norm": 189.0, |
|
"learning_rate": 9.523987773514999e-06, |
|
"logits/chosen": -0.8792441487312317, |
|
"logits/rejected": -0.8965069055557251, |
|
"logps/chosen": -311.1455383300781, |
|
"logps/rejected": -278.04876708984375, |
|
"loss": 0.6134, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.37413641810417175, |
|
"rewards/margins": 0.32240813970565796, |
|
"rewards/rejected": -0.6965445280075073, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1884323475529966, |
|
"grad_norm": 170.0, |
|
"learning_rate": 9.486436268123112e-06, |
|
"logits/chosen": -0.8645089268684387, |
|
"logits/rejected": -0.8401215672492981, |
|
"logps/chosen": -339.32781982421875, |
|
"logps/rejected": -317.1932678222656, |
|
"loss": 0.6002, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.3370912969112396, |
|
"rewards/margins": 0.38099169731140137, |
|
"rewards/rejected": -0.7180830240249634, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.19366657942946872, |
|
"grad_norm": 185.0, |
|
"learning_rate": 9.447539163198218e-06, |
|
"logits/chosen": -0.7942751049995422, |
|
"logits/rejected": -0.7996883988380432, |
|
"logps/chosen": -312.5692138671875, |
|
"logps/rejected": -282.26629638671875, |
|
"loss": 0.5814, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.2877020835876465, |
|
"rewards/margins": 0.4375336170196533, |
|
"rewards/rejected": -0.7252357602119446, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.19890081130594087, |
|
"grad_norm": 129.0, |
|
"learning_rate": 9.407308124999031e-06, |
|
"logits/chosen": -0.8171411752700806, |
|
"logits/rejected": -0.7716284394264221, |
|
"logps/chosen": -320.85626220703125, |
|
"logps/rejected": -307.2262268066406, |
|
"loss": 0.6016, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.29433757066726685, |
|
"rewards/margins": 0.39002522826194763, |
|
"rewards/rejected": -0.6843627691268921, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.204135043182413, |
|
"grad_norm": 136.0, |
|
"learning_rate": 9.365755219865733e-06, |
|
"logits/chosen": -0.8271177411079407, |
|
"logits/rejected": -0.7634648680686951, |
|
"logps/chosen": -323.30047607421875, |
|
"logps/rejected": -306.6474914550781, |
|
"loss": 0.5503, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.24612458050251007, |
|
"rewards/margins": 0.5447290539741516, |
|
"rewards/rejected": -0.7908536195755005, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"grad_norm": 165.0, |
|
"learning_rate": 9.322892910600959e-06, |
|
"logits/chosen": -0.8124639391899109, |
|
"logits/rejected": -0.6246926188468933, |
|
"logps/chosen": -293.43975830078125, |
|
"logps/rejected": -269.2245178222656, |
|
"loss": 0.5831, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.27127283811569214, |
|
"rewards/margins": 0.4235779345035553, |
|
"rewards/rejected": -0.6948508024215698, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.21460350693535724, |
|
"grad_norm": 210.0, |
|
"learning_rate": 9.278734052731876e-06, |
|
"logits/chosen": -0.743418276309967, |
|
"logits/rejected": -0.7249549031257629, |
|
"logps/chosen": -316.4259033203125, |
|
"logps/rejected": -296.60369873046875, |
|
"loss": 0.5748, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.30628499388694763, |
|
"rewards/margins": 0.4629667401313782, |
|
"rewards/rejected": -0.7692517638206482, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.21983773881182936, |
|
"grad_norm": 181.0, |
|
"learning_rate": 9.233291890654477e-06, |
|
"logits/chosen": -0.8485832214355469, |
|
"logits/rejected": -0.8260629773139954, |
|
"logps/chosen": -319.9491882324219, |
|
"logps/rejected": -283.28875732421875, |
|
"loss": 0.5721, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.31267648935317993, |
|
"rewards/margins": 0.48778074979782104, |
|
"rewards/rejected": -0.8004571795463562, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.22507197068830148, |
|
"grad_norm": 174.0, |
|
"learning_rate": 9.186580053661238e-06, |
|
"logits/chosen": -0.8275023698806763, |
|
"logits/rejected": -0.827506422996521, |
|
"logps/chosen": -313.1556396484375, |
|
"logps/rejected": -332.6429748535156, |
|
"loss": 0.5978, |
|
"rewards/accuracies": 0.6656249761581421, |
|
"rewards/chosen": -0.33983808755874634, |
|
"rewards/margins": 0.4332374036312103, |
|
"rewards/rejected": -0.7730754613876343, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.23030620256477363, |
|
"grad_norm": 128.0, |
|
"learning_rate": 9.138612551853334e-06, |
|
"logits/chosen": -0.8541017770767212, |
|
"logits/rejected": -0.783103883266449, |
|
"logps/chosen": -328.6156311035156, |
|
"logps/rejected": -290.98968505859375, |
|
"loss": 0.5545, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.3181619346141815, |
|
"rewards/margins": 0.5122355222702026, |
|
"rewards/rejected": -0.8303974866867065, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.23554043444124576, |
|
"grad_norm": 132.0, |
|
"learning_rate": 9.089403771938651e-06, |
|
"logits/chosen": -0.8417192697525024, |
|
"logits/rejected": -0.7470638155937195, |
|
"logps/chosen": -316.470458984375, |
|
"logps/rejected": -297.7726745605469, |
|
"loss": 0.557, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3102961480617523, |
|
"rewards/margins": 0.5439842939376831, |
|
"rewards/rejected": -0.8542804718017578, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.24077466631771788, |
|
"grad_norm": 142.0, |
|
"learning_rate": 9.038968472916831e-06, |
|
"logits/chosen": -0.9052237272262573, |
|
"logits/rejected": -0.7872729897499084, |
|
"logps/chosen": -341.5567932128906, |
|
"logps/rejected": -340.7992248535156, |
|
"loss": 0.5571, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.3277885317802429, |
|
"rewards/margins": 0.5650601983070374, |
|
"rewards/rejected": -0.8928486704826355, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.24600889819419, |
|
"grad_norm": 151.0, |
|
"learning_rate": 8.987321781652663e-06, |
|
"logits/chosen": -0.8001490831375122, |
|
"logits/rejected": -0.839720606803894, |
|
"logps/chosen": -300.9017639160156, |
|
"logps/rejected": -276.0289306640625, |
|
"loss": 0.5611, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.3725624680519104, |
|
"rewards/margins": 0.5345943570137024, |
|
"rewards/rejected": -0.907156765460968, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.2512431300706621, |
|
"grad_norm": 160.0, |
|
"learning_rate": 8.93447918833914e-06, |
|
"logits/chosen": -0.8046943545341492, |
|
"logits/rejected": -0.7836379408836365, |
|
"logps/chosen": -331.5021057128906, |
|
"logps/rejected": -284.837890625, |
|
"loss": 0.5885, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.3665297329425812, |
|
"rewards/margins": 0.4427420496940613, |
|
"rewards/rejected": -0.8092718124389648, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.2564773619471343, |
|
"grad_norm": 157.0, |
|
"learning_rate": 8.880456541851544e-06, |
|
"logits/chosen": -0.8196650743484497, |
|
"logits/rejected": -0.7406765222549438, |
|
"logps/chosen": -361.732177734375, |
|
"logps/rejected": -315.06097412109375, |
|
"loss": 0.5242, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.38585495948791504, |
|
"rewards/margins": 0.6256832480430603, |
|
"rewards/rejected": -1.0115381479263306, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.26171159382360637, |
|
"grad_norm": 178.0, |
|
"learning_rate": 8.825270044993963e-06, |
|
"logits/chosen": -0.8560503721237183, |
|
"logits/rejected": -0.7577269077301025, |
|
"logps/chosen": -290.08197021484375, |
|
"logps/rejected": -300.7274169921875, |
|
"loss": 0.5874, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3812199532985687, |
|
"rewards/margins": 0.47264352440834045, |
|
"rewards/rejected": -0.853863537311554, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2669458257000785, |
|
"grad_norm": 181.0, |
|
"learning_rate": 8.768936249639632e-06, |
|
"logits/chosen": -0.8684055209159851, |
|
"logits/rejected": -0.8107744455337524, |
|
"logps/chosen": -304.24237060546875, |
|
"logps/rejected": -300.67071533203125, |
|
"loss": 0.5942, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.4187198281288147, |
|
"rewards/margins": 0.46279722452163696, |
|
"rewards/rejected": -0.8815170526504517, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.2721800575765506, |
|
"grad_norm": 167.0, |
|
"learning_rate": 8.711472051766606e-06, |
|
"logits/chosen": -0.8956015706062317, |
|
"logits/rejected": -0.8282148241996765, |
|
"logps/chosen": -320.9289855957031, |
|
"logps/rejected": -309.4191589355469, |
|
"loss": 0.5331, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.2678002715110779, |
|
"rewards/margins": 0.5917673707008362, |
|
"rewards/rejected": -0.8595676422119141, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.27741428945302277, |
|
"grad_norm": 144.0, |
|
"learning_rate": 8.652894686390205e-06, |
|
"logits/chosen": -0.8632998466491699, |
|
"logits/rejected": -0.8366777300834656, |
|
"logps/chosen": -322.1751708984375, |
|
"logps/rejected": -299.2674865722656, |
|
"loss": 0.5822, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.3521856367588043, |
|
"rewards/margins": 0.5180864930152893, |
|
"rewards/rejected": -0.870272159576416, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.2826485213294949, |
|
"grad_norm": 170.0, |
|
"learning_rate": 8.593221722393789e-06, |
|
"logits/chosen": -0.8790092468261719, |
|
"logits/rejected": -0.8950101137161255, |
|
"logps/chosen": -321.2497253417969, |
|
"logps/rejected": -298.52685546875, |
|
"loss": 0.5664, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.36168113350868225, |
|
"rewards/margins": 0.5366425514221191, |
|
"rewards/rejected": -0.8983237147331238, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.287882753205967, |
|
"grad_norm": 175.0, |
|
"learning_rate": 8.53247105725939e-06, |
|
"logits/chosen": -0.8421729207038879, |
|
"logits/rejected": -0.7963753938674927, |
|
"logps/chosen": -291.4583435058594, |
|
"logps/rejected": -274.41259765625, |
|
"loss": 0.5495, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.38794368505477905, |
|
"rewards/margins": 0.5787752866744995, |
|
"rewards/rejected": -0.9667190313339233, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.29311698508243916, |
|
"grad_norm": 156.0, |
|
"learning_rate": 8.470660911699783e-06, |
|
"logits/chosen": -0.9016990661621094, |
|
"logits/rejected": -0.8757674098014832, |
|
"logps/chosen": -309.50506591796875, |
|
"logps/rejected": -269.655029296875, |
|
"loss": 0.5767, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.38373273611068726, |
|
"rewards/margins": 0.5166894197463989, |
|
"rewards/rejected": -0.900422215461731, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.29835121695891126, |
|
"grad_norm": 161.0, |
|
"learning_rate": 8.407809824193624e-06, |
|
"logits/chosen": -0.8771435022354126, |
|
"logits/rejected": -0.8630800247192383, |
|
"logps/chosen": -339.09454345703125, |
|
"logps/rejected": -313.4652099609375, |
|
"loss": 0.5698, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.4092799723148346, |
|
"rewards/margins": 0.5161824226379395, |
|
"rewards/rejected": -0.9254623651504517, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.3035854488353834, |
|
"grad_norm": 151.0, |
|
"learning_rate": 8.343936645425277e-06, |
|
"logits/chosen": -0.8246205449104309, |
|
"logits/rejected": -0.7505328059196472, |
|
"logps/chosen": -291.7579650878906, |
|
"logps/rejected": -280.5712890625, |
|
"loss": 0.5235, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.3271779716014862, |
|
"rewards/margins": 0.6309775710105896, |
|
"rewards/rejected": -0.9581555128097534, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.30881968071185556, |
|
"grad_norm": 146.0, |
|
"learning_rate": 8.279060532630991e-06, |
|
"logits/chosen": -0.8290132284164429, |
|
"logits/rejected": -0.7051485180854797, |
|
"logps/chosen": -324.5104064941406, |
|
"logps/rejected": -305.4620666503906, |
|
"loss": 0.5559, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.3787265419960022, |
|
"rewards/margins": 0.5293329358100891, |
|
"rewards/rejected": -0.9080594778060913, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"grad_norm": 153.0, |
|
"learning_rate": 8.21320094385316e-06, |
|
"logits/chosen": -0.8384712338447571, |
|
"logits/rejected": -0.8289518356323242, |
|
"logps/chosen": -342.91357421875, |
|
"logps/rejected": -313.59844970703125, |
|
"loss": 0.6357, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.46543198823928833, |
|
"rewards/margins": 0.41465091705322266, |
|
"rewards/rejected": -0.880082905292511, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3192881444647998, |
|
"grad_norm": 140.0, |
|
"learning_rate": 8.146377632104328e-06, |
|
"logits/chosen": -0.9369222521781921, |
|
"logits/rejected": -0.7993670701980591, |
|
"logps/chosen": -349.84454345703125, |
|
"logps/rejected": -303.2465515136719, |
|
"loss": 0.5107, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.36076098680496216, |
|
"rewards/margins": 0.6948108673095703, |
|
"rewards/rejected": -1.0555719137191772, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.3245223763412719, |
|
"grad_norm": 189.0, |
|
"learning_rate": 8.078610639442761e-06, |
|
"logits/chosen": -0.8361372947692871, |
|
"logits/rejected": -0.8584278225898743, |
|
"logps/chosen": -337.16400146484375, |
|
"logps/rejected": -290.342041015625, |
|
"loss": 0.5923, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.4377032220363617, |
|
"rewards/margins": 0.4708578586578369, |
|
"rewards/rejected": -0.908561110496521, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.32975660821774405, |
|
"grad_norm": 177.0, |
|
"learning_rate": 8.009920290961302e-06, |
|
"logits/chosen": -0.8872283697128296, |
|
"logits/rejected": -0.9255092740058899, |
|
"logps/chosen": -316.68572998046875, |
|
"logps/rejected": -311.52178955078125, |
|
"loss": 0.561, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.45245128870010376, |
|
"rewards/margins": 0.5874545574188232, |
|
"rewards/rejected": -1.0399057865142822, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.33499084009421615, |
|
"grad_norm": 174.0, |
|
"learning_rate": 7.94032718869134e-06, |
|
"logits/chosen": -0.8204278945922852, |
|
"logits/rejected": -0.8602889776229858, |
|
"logps/chosen": -329.909912109375, |
|
"logps/rejected": -296.969970703125, |
|
"loss": 0.5581, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.3557141125202179, |
|
"rewards/margins": 0.5819746255874634, |
|
"rewards/rejected": -0.9376887083053589, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.3402250719706883, |
|
"grad_norm": 150.0, |
|
"learning_rate": 7.869852205423738e-06, |
|
"logits/chosen": -0.9533544778823853, |
|
"logits/rejected": -0.9056428670883179, |
|
"logps/chosen": -317.16961669921875, |
|
"logps/rejected": -289.48175048828125, |
|
"loss": 0.5714, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.40887799859046936, |
|
"rewards/margins": 0.5318127870559692, |
|
"rewards/rejected": -0.940690815448761, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.34545930384716045, |
|
"grad_norm": 169.0, |
|
"learning_rate": 7.798516478448514e-06, |
|
"logits/chosen": -0.9307635426521301, |
|
"logits/rejected": -0.9199414253234863, |
|
"logps/chosen": -333.51983642578125, |
|
"logps/rejected": -294.5894470214844, |
|
"loss": 0.5356, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.31998270750045776, |
|
"rewards/margins": 0.5815465450286865, |
|
"rewards/rejected": -0.9015293121337891, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.35069353572363254, |
|
"grad_norm": 155.0, |
|
"learning_rate": 7.726341403215237e-06, |
|
"logits/chosen": -0.8919773101806641, |
|
"logits/rejected": -0.8845760226249695, |
|
"logps/chosen": -311.11920166015625, |
|
"logps/rejected": -272.05419921875, |
|
"loss": 0.5732, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.3387020230293274, |
|
"rewards/margins": 0.5716890096664429, |
|
"rewards/rejected": -0.9103911519050598, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.3559277676001047, |
|
"grad_norm": 147.0, |
|
"learning_rate": 7.653348626915957e-06, |
|
"logits/chosen": -0.9620792269706726, |
|
"logits/rejected": -0.8952232599258423, |
|
"logps/chosen": -308.39019775390625, |
|
"logps/rejected": -297.09027099609375, |
|
"loss": 0.5217, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.3744765818119049, |
|
"rewards/margins": 0.6553180813789368, |
|
"rewards/rejected": -1.029794692993164, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.3611619994765768, |
|
"grad_norm": 143.0, |
|
"learning_rate": 7.5795600419926595e-06, |
|
"logits/chosen": -0.8404722213745117, |
|
"logits/rejected": -0.8333503603935242, |
|
"logps/chosen": -322.2928161621094, |
|
"logps/rejected": -288.204833984375, |
|
"loss": 0.5424, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.31403088569641113, |
|
"rewards/margins": 0.5735478401184082, |
|
"rewards/rejected": -0.8875787854194641, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.36639623135304894, |
|
"grad_norm": 129.0, |
|
"learning_rate": 7.504997779571134e-06, |
|
"logits/chosen": -0.8435978889465332, |
|
"logits/rejected": -0.8222877383232117, |
|
"logps/chosen": -313.178955078125, |
|
"logps/rejected": -293.8034362792969, |
|
"loss": 0.557, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.3511705994606018, |
|
"rewards/margins": 0.5561435222625732, |
|
"rewards/rejected": -0.907314121723175, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3716304632295211, |
|
"grad_norm": 171.0, |
|
"learning_rate": 7.429684202823284e-06, |
|
"logits/chosen": -0.8398513793945312, |
|
"logits/rejected": -0.9239638447761536, |
|
"logps/chosen": -337.5401916503906, |
|
"logps/rejected": -296.4908752441406, |
|
"loss": 0.5498, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.3720497786998749, |
|
"rewards/margins": 0.5916403532028198, |
|
"rewards/rejected": -0.9636901617050171, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.3768646951059932, |
|
"grad_norm": 167.0, |
|
"learning_rate": 7.353641900259823e-06, |
|
"logits/chosen": -0.811373233795166, |
|
"logits/rejected": -0.852791965007782, |
|
"logps/chosen": -316.5249328613281, |
|
"logps/rejected": -292.735107421875, |
|
"loss": 0.5612, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.4169657826423645, |
|
"rewards/margins": 0.5585461854934692, |
|
"rewards/rejected": -0.975511908531189, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.38209892698246534, |
|
"grad_norm": 199.0, |
|
"learning_rate": 7.276893678955387e-06, |
|
"logits/chosen": -0.8853121995925903, |
|
"logits/rejected": -0.7663258910179138, |
|
"logps/chosen": -337.51934814453125, |
|
"logps/rejected": -314.3846740722656, |
|
"loss": 0.5553, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.45522037148475647, |
|
"rewards/margins": 0.648741602897644, |
|
"rewards/rejected": -1.1039619445800781, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.38733315885893743, |
|
"grad_norm": 144.0, |
|
"learning_rate": 7.199462557708098e-06, |
|
"logits/chosen": -0.9284416437149048, |
|
"logits/rejected": -0.8984587788581848, |
|
"logps/chosen": -281.9061279296875, |
|
"logps/rejected": -279.2416076660156, |
|
"loss": 0.5644, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.44900256395339966, |
|
"rewards/margins": 0.5480120778083801, |
|
"rewards/rejected": -0.9970146417617798, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.3925673907354096, |
|
"grad_norm": 182.0, |
|
"learning_rate": 7.1213717601356245e-06, |
|
"logits/chosen": -0.8627074956893921, |
|
"logits/rejected": -0.886041522026062, |
|
"logps/chosen": -329.6059875488281, |
|
"logps/rejected": -300.946044921875, |
|
"loss": 0.5395, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.3599586486816406, |
|
"rewards/margins": 0.5973905324935913, |
|
"rewards/rejected": -0.9573491215705872, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.39780162261188173, |
|
"grad_norm": 170.0, |
|
"learning_rate": 7.042644707709816e-06, |
|
"logits/chosen": -0.8174997568130493, |
|
"logits/rejected": -0.8082202672958374, |
|
"logps/chosen": -323.0093078613281, |
|
"logps/rejected": -311.2084045410156, |
|
"loss": 0.6079, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.4464780390262604, |
|
"rewards/margins": 0.4539749026298523, |
|
"rewards/rejected": -0.9004529714584351, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.40303585448835383, |
|
"grad_norm": 188.0, |
|
"learning_rate": 6.963305012731984e-06, |
|
"logits/chosen": -0.92878657579422, |
|
"logits/rejected": -0.8662070035934448, |
|
"logps/chosen": -278.6803894042969, |
|
"logps/rejected": -282.0479736328125, |
|
"loss": 0.5859, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.4509417414665222, |
|
"rewards/margins": 0.5153474807739258, |
|
"rewards/rejected": -0.966289222240448, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.408270086364826, |
|
"grad_norm": 150.0, |
|
"learning_rate": 6.8833764712509554e-06, |
|
"logits/chosen": -0.8979433178901672, |
|
"logits/rejected": -0.8869358897209167, |
|
"logps/chosen": -290.04791259765625, |
|
"logps/rejected": -285.4765319824219, |
|
"loss": 0.5497, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.34496086835861206, |
|
"rewards/margins": 0.5599159598350525, |
|
"rewards/rejected": -0.9048768877983093, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.4135043182412981, |
|
"grad_norm": 169.0, |
|
"learning_rate": 6.802883055926026e-06, |
|
"logits/chosen": -0.9075584411621094, |
|
"logits/rejected": -0.8922864198684692, |
|
"logps/chosen": -308.772216796875, |
|
"logps/rejected": -282.4748229980469, |
|
"loss": 0.5384, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.36133241653442383, |
|
"rewards/margins": 0.7162569165229797, |
|
"rewards/rejected": -1.0775892734527588, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"grad_norm": 205.0, |
|
"learning_rate": 6.721848908836921e-06, |
|
"logits/chosen": -0.9281147718429565, |
|
"logits/rejected": -0.9094434976577759, |
|
"logps/chosen": -352.7889099121094, |
|
"logps/rejected": -301.88079833984375, |
|
"loss": 0.5193, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.32397741079330444, |
|
"rewards/margins": 0.6619472503662109, |
|
"rewards/rejected": -0.9859245419502258, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.4239727819942423, |
|
"grad_norm": 148.0, |
|
"learning_rate": 6.640298334242959e-06, |
|
"logits/chosen": -0.9870101809501648, |
|
"logits/rejected": -0.9753667712211609, |
|
"logps/chosen": -294.2922058105469, |
|
"logps/rejected": -296.5133361816406, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.38680583238601685, |
|
"rewards/margins": 0.6525617837905884, |
|
"rewards/rejected": -1.0393675565719604, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.42920701387071447, |
|
"grad_norm": 169.0, |
|
"learning_rate": 6.558255791293572e-06, |
|
"logits/chosen": -0.955837607383728, |
|
"logits/rejected": -0.9246317744255066, |
|
"logps/chosen": -326.4897766113281, |
|
"logps/rejected": -303.1717529296875, |
|
"loss": 0.5489, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.40366417169570923, |
|
"rewards/margins": 0.6096054315567017, |
|
"rewards/rejected": -1.0132696628570557, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.4344412457471866, |
|
"grad_norm": 153.0, |
|
"learning_rate": 6.475745886692361e-06, |
|
"logits/chosen": -0.9159433245658875, |
|
"logits/rejected": -0.9025678634643555, |
|
"logps/chosen": -321.8793029785156, |
|
"logps/rejected": -315.39080810546875, |
|
"loss": 0.5333, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.34094110131263733, |
|
"rewards/margins": 0.6946324706077576, |
|
"rewards/rejected": -1.0355734825134277, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.4396754776236587, |
|
"grad_norm": 204.0, |
|
"learning_rate": 6.392793367316905e-06, |
|
"logits/chosen": -0.9800731539726257, |
|
"logits/rejected": -0.9624758958816528, |
|
"logps/chosen": -308.216796875, |
|
"logps/rejected": -302.4878234863281, |
|
"loss": 0.5318, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.3719130754470825, |
|
"rewards/margins": 0.6525188088417053, |
|
"rewards/rejected": -1.0244319438934326, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.44490970950013087, |
|
"grad_norm": 144.0, |
|
"learning_rate": 6.309423112796529e-06, |
|
"logits/chosen": -1.0125744342803955, |
|
"logits/rejected": -0.8463605642318726, |
|
"logps/chosen": -292.89691162109375, |
|
"logps/rejected": -305.55731201171875, |
|
"loss": 0.5577, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.3515411913394928, |
|
"rewards/margins": 0.6196914315223694, |
|
"rewards/rejected": -0.9712325930595398, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.45014394137660296, |
|
"grad_norm": 536.0, |
|
"learning_rate": 6.225660128050248e-06, |
|
"logits/chosen": -0.9816185832023621, |
|
"logits/rejected": -0.985775351524353, |
|
"logps/chosen": -304.9781494140625, |
|
"logps/rejected": -297.0005187988281, |
|
"loss": 0.5614, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.43965521454811096, |
|
"rewards/margins": 0.6236945390701294, |
|
"rewards/rejected": -1.063349723815918, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.4553781732530751, |
|
"grad_norm": 183.0, |
|
"learning_rate": 6.141529535787139e-06, |
|
"logits/chosen": -0.8935590982437134, |
|
"logits/rejected": -0.9019227027893066, |
|
"logps/chosen": -343.50634765625, |
|
"logps/rejected": -315.0626525878906, |
|
"loss": 0.5335, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.34105509519577026, |
|
"rewards/margins": 0.6711279153823853, |
|
"rewards/rejected": -1.0121829509735107, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.46061240512954726, |
|
"grad_norm": 192.0, |
|
"learning_rate": 6.057056568971383e-06, |
|
"logits/chosen": -0.9291516542434692, |
|
"logits/rejected": -0.9267807006835938, |
|
"logps/chosen": -323.21820068359375, |
|
"logps/rejected": -304.6665954589844, |
|
"loss": 0.5171, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.3203659653663635, |
|
"rewards/margins": 0.7515390515327454, |
|
"rewards/rejected": -1.0719050168991089, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.46584663700601936, |
|
"grad_norm": 166.0, |
|
"learning_rate": 5.972266563254246e-06, |
|
"logits/chosen": -0.8043352365493774, |
|
"logits/rejected": -0.8548442721366882, |
|
"logps/chosen": -354.04400634765625, |
|
"logps/rejected": -319.2464904785156, |
|
"loss": 0.5413, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.4417983889579773, |
|
"rewards/margins": 0.6336969137191772, |
|
"rewards/rejected": -1.0754953622817993, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.4710808688824915, |
|
"grad_norm": 159.0, |
|
"learning_rate": 5.887184949375242e-06, |
|
"logits/chosen": -0.9408230781555176, |
|
"logits/rejected": -0.836850643157959, |
|
"logps/chosen": -312.74365234375, |
|
"logps/rejected": -279.9490661621094, |
|
"loss": 0.5246, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3486204743385315, |
|
"rewards/margins": 0.6904363036155701, |
|
"rewards/rejected": -1.0390568971633911, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.4763151007589636, |
|
"grad_norm": 153.0, |
|
"learning_rate": 5.8018372455348e-06, |
|
"logits/chosen": -0.8202241063117981, |
|
"logits/rejected": -0.804111659526825, |
|
"logps/chosen": -328.8868408203125, |
|
"logps/rejected": -298.08758544921875, |
|
"loss": 0.535, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.3657616972923279, |
|
"rewards/margins": 0.7001040577888489, |
|
"rewards/rejected": -1.0658657550811768, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.48154933263543576, |
|
"grad_norm": 169.0, |
|
"learning_rate": 5.71624904974069e-06, |
|
"logits/chosen": -0.8969520330429077, |
|
"logits/rejected": -0.8271117210388184, |
|
"logps/chosen": -333.4234313964844, |
|
"logps/rejected": -317.73291015625, |
|
"loss": 0.5409, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.4135104715824127, |
|
"rewards/margins": 0.6914039850234985, |
|
"rewards/rejected": -1.1049144268035889, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.48678356451190785, |
|
"grad_norm": 131.0, |
|
"learning_rate": 5.630446032130498e-06, |
|
"logits/chosen": -0.9563789367675781, |
|
"logits/rejected": -0.8811948895454407, |
|
"logps/chosen": -319.13946533203125, |
|
"logps/rejected": -310.50225830078125, |
|
"loss": 0.5358, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.45481568574905396, |
|
"rewards/margins": 0.6629388928413391, |
|
"rewards/rejected": -1.117754578590393, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.49201779638838, |
|
"grad_norm": 192.0, |
|
"learning_rate": 5.5444539272724925e-06, |
|
"logits/chosen": -0.9288196563720703, |
|
"logits/rejected": -0.8266056776046753, |
|
"logps/chosen": -318.22003173828125, |
|
"logps/rejected": -311.0535583496094, |
|
"loss": 0.546, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.5185470581054688, |
|
"rewards/margins": 0.662552535533905, |
|
"rewards/rejected": -1.1810996532440186, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.49725202826485215, |
|
"grad_norm": 174.0, |
|
"learning_rate": 5.458298526447155e-06, |
|
"logits/chosen": -0.8332160711288452, |
|
"logits/rejected": -0.804090678691864, |
|
"logps/chosen": -317.2091369628906, |
|
"logps/rejected": -291.1934509277344, |
|
"loss": 0.5697, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.4483262598514557, |
|
"rewards/margins": 0.6129515171051025, |
|
"rewards/rejected": -1.0612777471542358, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.5024862601413242, |
|
"grad_norm": 186.0, |
|
"learning_rate": 5.372005669911694e-06, |
|
"logits/chosen": -1.0038440227508545, |
|
"logits/rejected": -0.8753064274787903, |
|
"logps/chosen": -294.107177734375, |
|
"logps/rejected": -283.8023986816406, |
|
"loss": 0.564, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.44227105379104614, |
|
"rewards/margins": 0.606177568435669, |
|
"rewards/rejected": -1.0484486818313599, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.5077204920177963, |
|
"grad_norm": 174.0, |
|
"learning_rate": 5.285601239149875e-06, |
|
"logits/chosen": -0.8685712814331055, |
|
"logits/rejected": -0.8939132690429688, |
|
"logps/chosen": -333.6595458984375, |
|
"logps/rejected": -310.3068542480469, |
|
"loss": 0.5889, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.44459280371665955, |
|
"rewards/margins": 0.5591896772384644, |
|
"rewards/rejected": -1.0037825107574463, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.5129547238942685, |
|
"grad_norm": 193.0, |
|
"learning_rate": 5.199111149109498e-06, |
|
"logits/chosen": -1.0634597539901733, |
|
"logits/rejected": -0.8613122701644897, |
|
"logps/chosen": -283.17938232421875, |
|
"logps/rejected": -279.54193115234375, |
|
"loss": 0.5775, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.5285903811454773, |
|
"rewards/margins": 0.5923768877983093, |
|
"rewards/rejected": -1.1209673881530762, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.5181889557707406, |
|
"grad_norm": 168.0, |
|
"learning_rate": 5.112561340429817e-06, |
|
"logits/chosen": -0.8614373207092285, |
|
"logits/rejected": -0.9538102149963379, |
|
"logps/chosen": -309.36962890625, |
|
"logps/rejected": -279.4532165527344, |
|
"loss": 0.543, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.4422277510166168, |
|
"rewards/margins": 0.6267827153205872, |
|
"rewards/rejected": -1.0690104961395264, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"grad_norm": 182.0, |
|
"learning_rate": 5.0259777716612665e-06, |
|
"logits/chosen": -0.9109832644462585, |
|
"logits/rejected": -0.8882852792739868, |
|
"logps/chosen": -338.4478759765625, |
|
"logps/rejected": -318.2439880371094, |
|
"loss": 0.599, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.4068952202796936, |
|
"rewards/margins": 0.5494095683097839, |
|
"rewards/rejected": -0.9563047289848328, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.528657419523685, |
|
"grad_norm": 199.0, |
|
"learning_rate": 4.939386411479814e-06, |
|
"logits/chosen": -0.8180404901504517, |
|
"logits/rejected": -0.8569936752319336, |
|
"logps/chosen": -333.63800048828125, |
|
"logps/rejected": -328.7806396484375, |
|
"loss": 0.6057, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.379621684551239, |
|
"rewards/margins": 0.5147837996482849, |
|
"rewards/rejected": -0.8944054841995239, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.533891651400157, |
|
"grad_norm": 188.0, |
|
"learning_rate": 4.85281323089828e-06, |
|
"logits/chosen": -0.8358771204948425, |
|
"logits/rejected": -0.8983470797538757, |
|
"logps/chosen": -348.06158447265625, |
|
"logps/rejected": -319.80999755859375, |
|
"loss": 0.6015, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.4141436517238617, |
|
"rewards/margins": 0.5129367709159851, |
|
"rewards/rejected": -0.9270804524421692, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.5391258832766291, |
|
"grad_norm": 157.0, |
|
"learning_rate": 4.766284195476943e-06, |
|
"logits/chosen": -0.8500849604606628, |
|
"logits/rejected": -0.9064255952835083, |
|
"logps/chosen": -324.3903503417969, |
|
"logps/rejected": -303.69232177734375, |
|
"loss": 0.502, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.2495235949754715, |
|
"rewards/margins": 0.7585932016372681, |
|
"rewards/rejected": -1.0081168413162231, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.5443601151531012, |
|
"grad_norm": 152.0, |
|
"learning_rate": 4.679825257535795e-06, |
|
"logits/chosen": -0.8723493814468384, |
|
"logits/rejected": -0.8765783309936523, |
|
"logps/chosen": -320.4819641113281, |
|
"logps/rejected": -290.561279296875, |
|
"loss": 0.51, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.26510706543922424, |
|
"rewards/margins": 0.7340017557144165, |
|
"rewards/rejected": -0.9991087913513184, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.5495943470295734, |
|
"grad_norm": 202.0, |
|
"learning_rate": 4.593462348370759e-06, |
|
"logits/chosen": -0.9302495718002319, |
|
"logits/rejected": -0.9009528160095215, |
|
"logps/chosen": -312.91815185546875, |
|
"logps/rejected": -297.0504150390625, |
|
"loss": 0.546, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.2718696594238281, |
|
"rewards/margins": 0.640564501285553, |
|
"rewards/rejected": -0.9124342203140259, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5548285789060455, |
|
"grad_norm": 161.0, |
|
"learning_rate": 4.507221370476223e-06, |
|
"logits/chosen": -0.8530869483947754, |
|
"logits/rejected": -0.8366998434066772, |
|
"logps/chosen": -320.64697265625, |
|
"logps/rejected": -315.04351806640625, |
|
"loss": 0.5187, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.3033106327056885, |
|
"rewards/margins": 0.733523964881897, |
|
"rewards/rejected": -1.0368345975875854, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.5600628107825176, |
|
"grad_norm": 121.0, |
|
"learning_rate": 4.421128189776195e-06, |
|
"logits/chosen": -0.9436659812927246, |
|
"logits/rejected": -0.9004136919975281, |
|
"logps/chosen": -289.570556640625, |
|
"logps/rejected": -256.1936950683594, |
|
"loss": 0.5189, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.35822635889053345, |
|
"rewards/margins": 0.7020615339279175, |
|
"rewards/rejected": -1.0602879524230957, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.5652970426589898, |
|
"grad_norm": 144.0, |
|
"learning_rate": 4.335208627866438e-06, |
|
"logits/chosen": -0.8877571821212769, |
|
"logits/rejected": -0.8537706136703491, |
|
"logps/chosen": -314.6681213378906, |
|
"logps/rejected": -278.1452941894531, |
|
"loss": 0.5373, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.3358023762702942, |
|
"rewards/margins": 0.6447587609291077, |
|
"rewards/rejected": -0.9805610775947571, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.5705312745354619, |
|
"grad_norm": 172.0, |
|
"learning_rate": 4.249488454269908e-06, |
|
"logits/chosen": -0.8639429211616516, |
|
"logits/rejected": -0.7370141744613647, |
|
"logps/chosen": -325.21002197265625, |
|
"logps/rejected": -312.51263427734375, |
|
"loss": 0.5401, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3743375241756439, |
|
"rewards/margins": 0.6280248761177063, |
|
"rewards/rejected": -1.0023624897003174, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.575765506411934, |
|
"grad_norm": 156.0, |
|
"learning_rate": 4.163993378707786e-06, |
|
"logits/chosen": -0.9418320655822754, |
|
"logits/rejected": -0.8720990419387817, |
|
"logps/chosen": -289.9245910644531, |
|
"logps/rejected": -276.4418640136719, |
|
"loss": 0.538, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.29466715455055237, |
|
"rewards/margins": 0.6992303133010864, |
|
"rewards/rejected": -0.9938974380493164, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5809997382884062, |
|
"grad_norm": 177.0, |
|
"learning_rate": 4.0787490433884685e-06, |
|
"logits/chosen": -0.8965393304824829, |
|
"logits/rejected": -0.8370422124862671, |
|
"logps/chosen": -297.08758544921875, |
|
"logps/rejected": -280.4624328613281, |
|
"loss": 0.5657, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.40518927574157715, |
|
"rewards/margins": 0.5503846406936646, |
|
"rewards/rejected": -0.9555739164352417, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.5862339701648783, |
|
"grad_norm": 167.0, |
|
"learning_rate": 3.993781015316802e-06, |
|
"logits/chosen": -0.8705169558525085, |
|
"logits/rejected": -0.8391069173812866, |
|
"logps/chosen": -342.8613586425781, |
|
"logps/rejected": -305.7508544921875, |
|
"loss": 0.571, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.4089958071708679, |
|
"rewards/margins": 0.5938762426376343, |
|
"rewards/rejected": -1.002872109413147, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.5914682020413504, |
|
"grad_norm": 226.0, |
|
"learning_rate": 3.909114778625861e-06, |
|
"logits/chosen": -0.9167736172676086, |
|
"logits/rejected": -0.9743949770927429, |
|
"logps/chosen": -350.214599609375, |
|
"logps/rejected": -290.0761413574219, |
|
"loss": 0.5093, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.34655433893203735, |
|
"rewards/margins": 0.7391494512557983, |
|
"rewards/rejected": -1.0857036113739014, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.5967024339178225, |
|
"grad_norm": 169.0, |
|
"learning_rate": 3.824775726933596e-06, |
|
"logits/chosen": -0.8415164947509766, |
|
"logits/rejected": -0.8204275369644165, |
|
"logps/chosen": -316.42401123046875, |
|
"logps/rejected": -277.1077575683594, |
|
"loss": 0.5346, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.3973461985588074, |
|
"rewards/margins": 0.6509358286857605, |
|
"rewards/rejected": -1.0482819080352783, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.6019366657942947, |
|
"grad_norm": 213.0, |
|
"learning_rate": 3.7407891557266242e-06, |
|
"logits/chosen": -0.8892973065376282, |
|
"logits/rejected": -0.863516628742218, |
|
"logps/chosen": -306.97357177734375, |
|
"logps/rejected": -302.066650390625, |
|
"loss": 0.559, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.4817911982536316, |
|
"rewards/margins": 0.6823488473892212, |
|
"rewards/rejected": -1.164139986038208, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.6071708976707668, |
|
"grad_norm": 201.0, |
|
"learning_rate": 3.6571802547734457e-06, |
|
"logits/chosen": -0.8766089677810669, |
|
"logits/rejected": -0.8270937204360962, |
|
"logps/chosen": -313.2008361816406, |
|
"logps/rejected": -300.48834228515625, |
|
"loss": 0.5238, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.40795597434043884, |
|
"rewards/margins": 0.7067397236824036, |
|
"rewards/rejected": -1.11469566822052, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.6124051295472389, |
|
"grad_norm": 160.0, |
|
"learning_rate": 3.5739741005693807e-06, |
|
"logits/chosen": -0.8759579658508301, |
|
"logits/rejected": -0.7904466390609741, |
|
"logps/chosen": -339.73529052734375, |
|
"logps/rejected": -317.6925354003906, |
|
"loss": 0.5403, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.4018010199069977, |
|
"rewards/margins": 0.6779440641403198, |
|
"rewards/rejected": -1.0797450542449951, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.6176393614237111, |
|
"grad_norm": 160.0, |
|
"learning_rate": 3.4911956488154696e-06, |
|
"logits/chosen": -0.9535796046257019, |
|
"logits/rejected": -0.9712865948677063, |
|
"logps/chosen": -307.1703796386719, |
|
"logps/rejected": -287.9497985839844, |
|
"loss": 0.5892, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.48305121064186096, |
|
"rewards/margins": 0.558946967124939, |
|
"rewards/rejected": -1.0419981479644775, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.6228735933001832, |
|
"grad_norm": 155.0, |
|
"learning_rate": 3.4088697269336045e-06, |
|
"logits/chosen": -0.8738953471183777, |
|
"logits/rejected": -0.8270707130432129, |
|
"logps/chosen": -328.30377197265625, |
|
"logps/rejected": -285.1355895996094, |
|
"loss": 0.5035, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.33157122135162354, |
|
"rewards/margins": 0.7550631761550903, |
|
"rewards/rejected": -1.0866343975067139, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"grad_norm": 206.0, |
|
"learning_rate": 3.3270210266201373e-06, |
|
"logits/chosen": -0.8643859624862671, |
|
"logits/rejected": -0.7990683913230896, |
|
"logps/chosen": -318.6554260253906, |
|
"logps/rejected": -300.69342041015625, |
|
"loss": 0.5806, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.49601760506629944, |
|
"rewards/margins": 0.5823889374732971, |
|
"rewards/rejected": -1.078406572341919, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6333420570531274, |
|
"grad_norm": 137.0, |
|
"learning_rate": 3.2456740964401977e-06, |
|
"logits/chosen": -0.812393844127655, |
|
"logits/rejected": -0.7732648849487305, |
|
"logps/chosen": -319.3125, |
|
"logps/rejected": -310.7963562011719, |
|
"loss": 0.5273, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.3820917010307312, |
|
"rewards/margins": 0.6966260075569153, |
|
"rewards/rejected": -1.078717589378357, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.6385762889295996, |
|
"grad_norm": 169.0, |
|
"learning_rate": 3.1648533344649303e-06, |
|
"logits/chosen": -0.9099327325820923, |
|
"logits/rejected": -0.7881625890731812, |
|
"logps/chosen": -304.8463134765625, |
|
"logps/rejected": -320.203857421875, |
|
"loss": 0.5052, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.39941444993019104, |
|
"rewards/margins": 0.6926519870758057, |
|
"rewards/rejected": -1.0920665264129639, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.6438105208060717, |
|
"grad_norm": 195.0, |
|
"learning_rate": 3.084582980953881e-06, |
|
"logits/chosen": -0.8426934480667114, |
|
"logits/rejected": -0.8099411725997925, |
|
"logps/chosen": -350.9564208984375, |
|
"logps/rejected": -283.6780090332031, |
|
"loss": 0.5306, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.38103926181793213, |
|
"rewards/margins": 0.6722493171691895, |
|
"rewards/rejected": -1.0532886981964111, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.6490447526825438, |
|
"grad_norm": 180.0, |
|
"learning_rate": 3.0048871110847043e-06, |
|
"logits/chosen": -0.8573587536811829, |
|
"logits/rejected": -0.9277232885360718, |
|
"logps/chosen": -328.5947570800781, |
|
"logps/rejected": -296.3069763183594, |
|
"loss": 0.5324, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3769867420196533, |
|
"rewards/margins": 0.7075929641723633, |
|
"rewards/rejected": -1.084579586982727, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.654278984559016, |
|
"grad_norm": 181.0, |
|
"learning_rate": 2.925789627732395e-06, |
|
"logits/chosen": -0.933085560798645, |
|
"logits/rejected": -0.8861294984817505, |
|
"logps/chosen": -324.4852294921875, |
|
"logps/rejected": -301.6151428222656, |
|
"loss": 0.5455, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.4185728132724762, |
|
"rewards/margins": 0.7384505271911621, |
|
"rewards/rejected": -1.1570234298706055, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.6595132164354881, |
|
"grad_norm": 155.0, |
|
"learning_rate": 2.8473142543001818e-06, |
|
"logits/chosen": -0.9332107305526733, |
|
"logits/rejected": -0.8888304829597473, |
|
"logps/chosen": -287.0394592285156, |
|
"logps/rejected": -274.85369873046875, |
|
"loss": 0.5511, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.48146143555641174, |
|
"rewards/margins": 0.663101077079773, |
|
"rewards/rejected": -1.1445624828338623, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.6647474483119602, |
|
"grad_norm": 181.0, |
|
"learning_rate": 2.7694845276042714e-06, |
|
"logits/chosen": -0.9158511161804199, |
|
"logits/rejected": -0.9241366386413574, |
|
"logps/chosen": -326.44476318359375, |
|
"logps/rejected": -304.80364990234375, |
|
"loss": 0.5375, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.45183902978897095, |
|
"rewards/margins": 0.6956654787063599, |
|
"rewards/rejected": -1.1475045680999756, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.6699816801884323, |
|
"grad_norm": 182.0, |
|
"learning_rate": 2.6923237908145227e-06, |
|
"logits/chosen": -0.8813109397888184, |
|
"logits/rejected": -0.8199933171272278, |
|
"logps/chosen": -290.83551025390625, |
|
"logps/rejected": -308.5135498046875, |
|
"loss": 0.5145, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.4068705141544342, |
|
"rewards/margins": 0.7652591466903687, |
|
"rewards/rejected": -1.1721296310424805, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.6752159120649045, |
|
"grad_norm": 183.0, |
|
"learning_rate": 2.615855186453241e-06, |
|
"logits/chosen": -0.88966304063797, |
|
"logits/rejected": -0.817095160484314, |
|
"logps/chosen": -320.98590087890625, |
|
"logps/rejected": -317.4378662109375, |
|
"loss": 0.5224, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.37296873331069946, |
|
"rewards/margins": 0.7525426149368286, |
|
"rewards/rejected": -1.1255114078521729, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.6804501439413766, |
|
"grad_norm": 206.0, |
|
"learning_rate": 2.5401016494541193e-06, |
|
"logits/chosen": -0.9210039973258972, |
|
"logits/rejected": -0.8467925190925598, |
|
"logps/chosen": -308.42462158203125, |
|
"logps/rejected": -304.9708557128906, |
|
"loss": 0.5587, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.4827396273612976, |
|
"rewards/margins": 0.6354286074638367, |
|
"rewards/rejected": -1.1181684732437134, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6856843758178487, |
|
"grad_norm": 194.0, |
|
"learning_rate": 2.4650859002834465e-06, |
|
"logits/chosen": -0.8904461860656738, |
|
"logits/rejected": -0.7507106065750122, |
|
"logps/chosen": -313.8423767089844, |
|
"logps/rejected": -304.7760925292969, |
|
"loss": 0.5096, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.38607197999954224, |
|
"rewards/margins": 0.7296286225318909, |
|
"rewards/rejected": -1.1157004833221436, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.6909186076943209, |
|
"grad_norm": 182.0, |
|
"learning_rate": 2.390830438125661e-06, |
|
"logits/chosen": -0.9327608346939087, |
|
"logits/rejected": -0.9067710638046265, |
|
"logps/chosen": -321.62408447265625, |
|
"logps/rejected": -300.6474914550781, |
|
"loss": 0.5848, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.46891599893569946, |
|
"rewards/margins": 0.5822906494140625, |
|
"rewards/rejected": -1.0512068271636963, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.696152839570793, |
|
"grad_norm": 174.0, |
|
"learning_rate": 2.3173575341352457e-06, |
|
"logits/chosen": -0.8878030776977539, |
|
"logits/rejected": -0.838133692741394, |
|
"logps/chosen": -314.01605224609375, |
|
"logps/rejected": -297.20574951171875, |
|
"loss": 0.5795, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.38449767231941223, |
|
"rewards/margins": 0.6112931966781616, |
|
"rewards/rejected": -0.9957907795906067, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.7013870714472651, |
|
"grad_norm": 163.0, |
|
"learning_rate": 2.2446892247570257e-06, |
|
"logits/chosen": -0.8643558621406555, |
|
"logits/rejected": -0.7711443901062012, |
|
"logps/chosen": -315.4075012207031, |
|
"logps/rejected": -305.1708679199219, |
|
"loss": 0.5643, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.3955213129520416, |
|
"rewards/margins": 0.6182621717453003, |
|
"rewards/rejected": -1.0137834548950195, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.7066213033237373, |
|
"grad_norm": 181.0, |
|
"learning_rate": 2.172847305116872e-06, |
|
"logits/chosen": -0.9164339900016785, |
|
"logits/rejected": -0.8563045263290405, |
|
"logps/chosen": -315.6224670410156, |
|
"logps/rejected": -294.5616149902344, |
|
"loss": 0.5792, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.30818018317222595, |
|
"rewards/margins": 0.5649989247322083, |
|
"rewards/rejected": -0.8731790781021118, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.7118555352002094, |
|
"grad_norm": 178.0, |
|
"learning_rate": 2.1018533224847638e-06, |
|
"logits/chosen": -0.8276359438896179, |
|
"logits/rejected": -0.8118055462837219, |
|
"logps/chosen": -341.2742614746094, |
|
"logps/rejected": -308.45458984375, |
|
"loss": 0.55, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.36877232789993286, |
|
"rewards/margins": 0.7281702160835266, |
|
"rewards/rejected": -1.0969425439834595, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.7170897670766815, |
|
"grad_norm": 171.0, |
|
"learning_rate": 2.0317285698122035e-06, |
|
"logits/chosen": -0.9565666317939758, |
|
"logits/rejected": -0.853268027305603, |
|
"logps/chosen": -309.7164611816406, |
|
"logps/rejected": -302.1435546875, |
|
"loss": 0.5566, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.36152976751327515, |
|
"rewards/margins": 0.7023038268089294, |
|
"rewards/rejected": -1.063833475112915, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.7223239989531536, |
|
"grad_norm": 178.0, |
|
"learning_rate": 1.962494079345906e-06, |
|
"logits/chosen": -0.9673864245414734, |
|
"logits/rejected": -0.9198382496833801, |
|
"logps/chosen": -346.85162353515625, |
|
"logps/rejected": -308.24371337890625, |
|
"loss": 0.5453, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.33431005477905273, |
|
"rewards/margins": 0.7109225988388062, |
|
"rewards/rejected": -1.0452326536178589, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.7275582308296258, |
|
"grad_norm": 155.0, |
|
"learning_rate": 1.8941706163196676e-06, |
|
"logits/chosen": -0.8943386077880859, |
|
"logits/rejected": -0.7837470769882202, |
|
"logps/chosen": -283.0511779785156, |
|
"logps/rejected": -283.2401428222656, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3476119637489319, |
|
"rewards/margins": 0.6372144818305969, |
|
"rewards/rejected": -0.9848264455795288, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"grad_norm": 162.0, |
|
"learning_rate": 1.8267786727263426e-06, |
|
"logits/chosen": -0.8945984840393066, |
|
"logits/rejected": -0.8007863163948059, |
|
"logps/chosen": -309.67120361328125, |
|
"logps/rejected": -291.51446533203125, |
|
"loss": 0.5431, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.37134069204330444, |
|
"rewards/margins": 0.6036137342453003, |
|
"rewards/rejected": -0.9749544858932495, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.73802669458257, |
|
"grad_norm": 141.0, |
|
"learning_rate": 1.760338461171755e-06, |
|
"logits/chosen": -0.875231921672821, |
|
"logits/rejected": -0.8146468997001648, |
|
"logps/chosen": -297.78192138671875, |
|
"logps/rejected": -298.21551513671875, |
|
"loss": 0.59, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.44891977310180664, |
|
"rewards/margins": 0.5382051467895508, |
|
"rewards/rejected": -0.9871249198913574, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.7432609264590422, |
|
"grad_norm": 159.0, |
|
"learning_rate": 1.6948699088123992e-06, |
|
"logits/chosen": -0.9376803636550903, |
|
"logits/rejected": -0.9141248464584351, |
|
"logps/chosen": -302.29644775390625, |
|
"logps/rejected": -283.9207763671875, |
|
"loss": 0.5259, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3630935549736023, |
|
"rewards/margins": 0.6829047203063965, |
|
"rewards/rejected": -1.0459983348846436, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.7484951583355143, |
|
"grad_norm": 196.0, |
|
"learning_rate": 1.6303926513787821e-06, |
|
"logits/chosen": -0.9451939463615417, |
|
"logits/rejected": -0.9748193025588989, |
|
"logps/chosen": -307.4441833496094, |
|
"logps/rejected": -287.5481262207031, |
|
"loss": 0.5307, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.3940843641757965, |
|
"rewards/margins": 0.6989505887031555, |
|
"rewards/rejected": -1.0930348634719849, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.7537293902119864, |
|
"grad_norm": 184.0, |
|
"learning_rate": 1.5669260272861426e-06, |
|
"logits/chosen": -0.8915554881095886, |
|
"logits/rejected": -0.8385494351387024, |
|
"logps/chosen": -306.67230224609375, |
|
"logps/rejected": -312.32977294921875, |
|
"loss": 0.5312, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.38271576166152954, |
|
"rewards/margins": 0.735810399055481, |
|
"rewards/rejected": -1.1185262203216553, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.7589636220884585, |
|
"grad_norm": 164.0, |
|
"learning_rate": 1.5044890718343535e-06, |
|
"logits/chosen": -0.947296142578125, |
|
"logits/rejected": -0.9365224838256836, |
|
"logps/chosen": -295.16595458984375, |
|
"logps/rejected": -293.3561096191406, |
|
"loss": 0.5753, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.4716934263706207, |
|
"rewards/margins": 0.5845819711685181, |
|
"rewards/rejected": -1.056275486946106, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.7641978539649307, |
|
"grad_norm": 186.0, |
|
"learning_rate": 1.4431005114987485e-06, |
|
"logits/chosen": -0.8192672729492188, |
|
"logits/rejected": -0.7873446345329285, |
|
"logps/chosen": -359.31671142578125, |
|
"logps/rejected": -324.53302001953125, |
|
"loss": 0.5399, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.3305516242980957, |
|
"rewards/margins": 0.666082501411438, |
|
"rewards/rejected": -0.9966341853141785, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.7694320858414028, |
|
"grad_norm": 153.0, |
|
"learning_rate": 1.3827787583135533e-06, |
|
"logits/chosen": -0.9230923652648926, |
|
"logits/rejected": -0.8657926321029663, |
|
"logps/chosen": -316.85125732421875, |
|
"logps/rejected": -306.98846435546875, |
|
"loss": 0.5881, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.3909241259098053, |
|
"rewards/margins": 0.5649646520614624, |
|
"rewards/rejected": -0.9558887481689453, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.7746663177178749, |
|
"grad_norm": 215.0, |
|
"learning_rate": 1.3235419043496362e-06, |
|
"logits/chosen": -0.8529502749443054, |
|
"logits/rejected": -0.7703090310096741, |
|
"logps/chosen": -311.15960693359375, |
|
"logps/rejected": -296.0032653808594, |
|
"loss": 0.5712, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.39619728922843933, |
|
"rewards/margins": 0.6459029912948608, |
|
"rewards/rejected": -1.042100429534912, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.7799005495943471, |
|
"grad_norm": 169.0, |
|
"learning_rate": 1.2654077162882271e-06, |
|
"logits/chosen": -0.92132967710495, |
|
"logits/rejected": -0.8226190805435181, |
|
"logps/chosen": -312.61846923828125, |
|
"logps/rejected": -293.96270751953125, |
|
"loss": 0.5713, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.36359816789627075, |
|
"rewards/margins": 0.6363967657089233, |
|
"rewards/rejected": -0.9999948740005493, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.7851347814708192, |
|
"grad_norm": 176.0, |
|
"learning_rate": 1.2083936300922238e-06, |
|
"logits/chosen": -0.7473341822624207, |
|
"logits/rejected": -0.7614143490791321, |
|
"logps/chosen": -329.56402587890625, |
|
"logps/rejected": -309.5768127441406, |
|
"loss": 0.5701, |
|
"rewards/accuracies": 0.7093750238418579, |
|
"rewards/chosen": -0.3515954315662384, |
|
"rewards/margins": 0.636785626411438, |
|
"rewards/rejected": -0.988381028175354, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7903690133472913, |
|
"grad_norm": 168.0, |
|
"learning_rate": 1.1525167457766856e-06, |
|
"logits/chosen": -0.9422544240951538, |
|
"logits/rejected": -0.8939880132675171, |
|
"logps/chosen": -305.32415771484375, |
|
"logps/rejected": -290.6331787109375, |
|
"loss": 0.5706, |
|
"rewards/accuracies": 0.6781250238418579, |
|
"rewards/chosen": -0.35911720991134644, |
|
"rewards/margins": 0.6290760636329651, |
|
"rewards/rejected": -0.9881932139396667, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.7956032452237635, |
|
"grad_norm": 191.0, |
|
"learning_rate": 1.0977938222801004e-06, |
|
"logits/chosen": -0.8874338865280151, |
|
"logits/rejected": -0.8152481317520142, |
|
"logps/chosen": -308.3956604003906, |
|
"logps/rejected": -288.2894592285156, |
|
"loss": 0.5853, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.41778063774108887, |
|
"rewards/margins": 0.5453465580940247, |
|
"rewards/rejected": -0.9631271362304688, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.8008374771002356, |
|
"grad_norm": 153.0, |
|
"learning_rate": 1.0442412724379365e-06, |
|
"logits/chosen": -0.8753225207328796, |
|
"logits/rejected": -0.8794354200363159, |
|
"logps/chosen": -316.00201416015625, |
|
"logps/rejected": -271.9585876464844, |
|
"loss": 0.5739, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.4596496522426605, |
|
"rewards/margins": 0.5950717926025391, |
|
"rewards/rejected": -1.054721474647522, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.8060717089767077, |
|
"grad_norm": 173.0, |
|
"learning_rate": 9.9187515806e-07, |
|
"logits/chosen": -0.7824957370758057, |
|
"logits/rejected": -0.8002129793167114, |
|
"logps/chosen": -334.59002685546875, |
|
"logps/rejected": -296.0530700683594, |
|
"loss": 0.5536, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.3669127821922302, |
|
"rewards/margins": 0.6854226589202881, |
|
"rewards/rejected": -1.0523353815078735, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.8113059408531798, |
|
"grad_norm": 140.0, |
|
"learning_rate": 9.407111851130879e-07, |
|
"logits/chosen": -0.7982383966445923, |
|
"logits/rejected": -0.821733295917511, |
|
"logps/chosen": -310.4596252441406, |
|
"logps/rejected": -299.2958984375, |
|
"loss": 0.5253, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3669951558113098, |
|
"rewards/margins": 0.7187011241912842, |
|
"rewards/rejected": -1.0856962203979492, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.816540172729652, |
|
"grad_norm": 140.0, |
|
"learning_rate": 8.907646990103496e-07, |
|
"logits/chosen": -0.8856679797172546, |
|
"logits/rejected": -0.7996614575386047, |
|
"logps/chosen": -299.1997985839844, |
|
"logps/rejected": -281.97979736328125, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.37950748205184937, |
|
"rewards/margins": 0.6418988108634949, |
|
"rewards/rejected": -1.0214061737060547, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.821774404606124, |
|
"grad_norm": 144.0, |
|
"learning_rate": 8.42050680008798e-07, |
|
"logits/chosen": -0.9331056475639343, |
|
"logits/rejected": -0.9064151644706726, |
|
"logps/chosen": -315.20281982421875, |
|
"logps/rejected": -306.29901123046875, |
|
"loss": 0.5645, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.3731600344181061, |
|
"rewards/margins": 0.6315820813179016, |
|
"rewards/rejected": -1.0047420263290405, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 0.8270086364825961, |
|
"grad_norm": 158.0, |
|
"learning_rate": 7.945837387163424e-07, |
|
"logits/chosen": -0.8035078048706055, |
|
"logits/rejected": -0.8399894833564758, |
|
"logps/chosen": -324.2265625, |
|
"logps/rejected": -299.2606506347656, |
|
"loss": 0.5678, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.3819822371006012, |
|
"rewards/margins": 0.6021940112113953, |
|
"rewards/rejected": -0.9841761589050293, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 0.8322428683590684, |
|
"grad_norm": 191.0, |
|
"learning_rate": 7.483781117096828e-07, |
|
"logits/chosen": -0.799006998538971, |
|
"logits/rejected": -0.8053957223892212, |
|
"logps/chosen": -340.7993469238281, |
|
"logps/rejected": -319.54443359375, |
|
"loss": 0.5389, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.3911372423171997, |
|
"rewards/margins": 0.6570714116096497, |
|
"rewards/rejected": -1.0482085943222046, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"grad_norm": 203.0, |
|
"learning_rate": 7.034476572643855e-07, |
|
"logits/chosen": -0.8488192558288574, |
|
"logits/rejected": -0.8340631723403931, |
|
"logps/chosen": -316.52569580078125, |
|
"logps/rejected": -300.2511901855469, |
|
"loss": 0.5573, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.40435901284217834, |
|
"rewards/margins": 0.6302875280380249, |
|
"rewards/rejected": -1.0346465110778809, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.8427113321120125, |
|
"grad_norm": 159.0, |
|
"learning_rate": 6.598058511984307e-07, |
|
"logits/chosen": -0.8294881582260132, |
|
"logits/rejected": -0.8028753995895386, |
|
"logps/chosen": -304.53082275390625, |
|
"logps/rejected": -277.5071716308594, |
|
"loss": 0.561, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.47283536195755005, |
|
"rewards/margins": 0.6403847932815552, |
|
"rewards/rejected": -1.1132203340530396, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 0.8479455639884846, |
|
"grad_norm": 149.0, |
|
"learning_rate": 6.174657828304543e-07, |
|
"logits/chosen": -0.8649441003799438, |
|
"logits/rejected": -0.8102690577507019, |
|
"logps/chosen": -300.7369689941406, |
|
"logps/rejected": -292.52545166015625, |
|
"loss": 0.5711, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.4109218120574951, |
|
"rewards/margins": 0.5619704127311707, |
|
"rewards/rejected": -0.9728924036026001, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 0.8531797958649568, |
|
"grad_norm": 167.0, |
|
"learning_rate": 5.764401510539253e-07, |
|
"logits/chosen": -0.8214017748832703, |
|
"logits/rejected": -0.863512396812439, |
|
"logps/chosen": -329.2702331542969, |
|
"logps/rejected": -280.3655700683594, |
|
"loss": 0.5508, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.37017884850502014, |
|
"rewards/margins": 0.6239677667617798, |
|
"rewards/rejected": -0.9941467046737671, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 0.8584140277414289, |
|
"grad_norm": 144.0, |
|
"learning_rate": 5.36741260528415e-07, |
|
"logits/chosen": -0.8357788324356079, |
|
"logits/rejected": -0.7963896989822388, |
|
"logps/chosen": -341.4158935546875, |
|
"logps/rejected": -333.5056457519531, |
|
"loss": 0.4859, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.2966167628765106, |
|
"rewards/margins": 0.8242661356925964, |
|
"rewards/rejected": -1.1208828687667847, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 0.863648259617901, |
|
"grad_norm": 139.0, |
|
"learning_rate": 4.98381017989103e-07, |
|
"logits/chosen": -0.8318225741386414, |
|
"logits/rejected": -0.8358097076416016, |
|
"logps/chosen": -316.9400939941406, |
|
"logps/rejected": -284.248291015625, |
|
"loss": 0.5292, |
|
"rewards/accuracies": 0.7281249761581421, |
|
"rewards/chosen": -0.34240150451660156, |
|
"rewards/margins": 0.662412703037262, |
|
"rewards/rejected": -1.0048140287399292, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.8688824914943732, |
|
"grad_norm": 174.0, |
|
"learning_rate": 4.6137092867564127e-07, |
|
"logits/chosen": -0.8979905843734741, |
|
"logits/rejected": -0.8630064129829407, |
|
"logps/chosen": -286.62908935546875, |
|
"logps/rejected": -276.69781494140625, |
|
"loss": 0.533, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.3343624472618103, |
|
"rewards/margins": 0.6592388153076172, |
|
"rewards/rejected": -0.9936012029647827, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 0.8741167233708453, |
|
"grad_norm": 139.0, |
|
"learning_rate": 4.2572209288143095e-07, |
|
"logits/chosen": -0.8482540845870972, |
|
"logits/rejected": -0.8265933990478516, |
|
"logps/chosen": -318.8555603027344, |
|
"logps/rejected": -298.1378173828125, |
|
"loss": 0.567, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.3969655930995941, |
|
"rewards/margins": 0.628760576248169, |
|
"rewards/rejected": -1.025726079940796, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 0.8793509552473174, |
|
"grad_norm": 120.0, |
|
"learning_rate": 3.9144520262435094e-07, |
|
"logits/chosen": -0.8585769534111023, |
|
"logits/rejected": -0.8196622133255005, |
|
"logps/chosen": -343.7219543457031, |
|
"logps/rejected": -293.54669189453125, |
|
"loss": 0.5227, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.2649371922016144, |
|
"rewards/margins": 0.7391484379768372, |
|
"rewards/rejected": -1.0040857791900635, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 0.8845851871237895, |
|
"grad_norm": 212.0, |
|
"learning_rate": 3.5855053843994625e-07, |
|
"logits/chosen": -0.8562005162239075, |
|
"logits/rejected": -0.9038397073745728, |
|
"logps/chosen": -299.7960510253906, |
|
"logps/rejected": -319.8832092285156, |
|
"loss": 0.5708, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.36345425248146057, |
|
"rewards/margins": 0.6046686172485352, |
|
"rewards/rejected": -0.9681228399276733, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 0.8898194190002617, |
|
"grad_norm": 166.0, |
|
"learning_rate": 3.270479662980247e-07, |
|
"logits/chosen": -0.788432776927948, |
|
"logits/rejected": -0.752896785736084, |
|
"logps/chosen": -313.11187744140625, |
|
"logps/rejected": -303.4278869628906, |
|
"loss": 0.5763, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.39813023805618286, |
|
"rewards/margins": 0.640109658241272, |
|
"rewards/rejected": -1.03823983669281, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.8950536508767338, |
|
"grad_norm": 175.0, |
|
"learning_rate": 2.9694693464359434e-07, |
|
"logits/chosen": -0.8410743474960327, |
|
"logits/rejected": -0.848852276802063, |
|
"logps/chosen": -328.5920104980469, |
|
"logps/rejected": -327.78192138671875, |
|
"loss": 0.5463, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.39946219325065613, |
|
"rewards/margins": 0.657035768032074, |
|
"rewards/rejected": -1.0564980506896973, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 0.9002878827532059, |
|
"grad_norm": 175.0, |
|
"learning_rate": 2.682564715630287e-07, |
|
"logits/chosen": -0.8599251508712769, |
|
"logits/rejected": -0.8022180795669556, |
|
"logps/chosen": -318.9126892089844, |
|
"logps/rejected": -302.290771484375, |
|
"loss": 0.4909, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": -0.3343242406845093, |
|
"rewards/margins": 0.815506100654602, |
|
"rewards/rejected": -1.1498302221298218, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 0.9055221146296781, |
|
"grad_norm": 183.0, |
|
"learning_rate": 2.4098518207630706e-07, |
|
"logits/chosen": -0.8574069142341614, |
|
"logits/rejected": -0.9127479791641235, |
|
"logps/chosen": -317.14141845703125, |
|
"logps/rejected": -276.6313781738281, |
|
"loss": 0.609, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.42879825830459595, |
|
"rewards/margins": 0.5380840301513672, |
|
"rewards/rejected": -0.9668823480606079, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 0.9107563465061502, |
|
"grad_norm": 225.0, |
|
"learning_rate": 2.1514124555614412e-07, |
|
"logits/chosen": -0.9109845161437988, |
|
"logits/rejected": -0.8660769462585449, |
|
"logps/chosen": -343.34600830078125, |
|
"logps/rejected": -315.65106201171875, |
|
"loss": 0.5489, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.39635831117630005, |
|
"rewards/margins": 0.6720642447471619, |
|
"rewards/rejected": -1.0684224367141724, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 0.9159905783826223, |
|
"grad_norm": 171.0, |
|
"learning_rate": 1.9073241327478287e-07, |
|
"logits/chosen": -0.938220202922821, |
|
"logits/rejected": -0.9592165946960449, |
|
"logps/chosen": -304.1217346191406, |
|
"logps/rejected": -272.75482177734375, |
|
"loss": 0.5789, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.3527128994464874, |
|
"rewards/margins": 0.5340372323989868, |
|
"rewards/rejected": -0.8867500424385071, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.9212248102590945, |
|
"grad_norm": 158.0, |
|
"learning_rate": 1.677660060791836e-07, |
|
"logits/chosen": -0.869836688041687, |
|
"logits/rejected": -0.8696880340576172, |
|
"logps/chosen": -318.1603698730469, |
|
"logps/rejected": -289.1014404296875, |
|
"loss": 0.5217, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.34467941522598267, |
|
"rewards/margins": 0.7117096185684204, |
|
"rewards/rejected": -1.0563890933990479, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 0.9264590421355666, |
|
"grad_norm": 177.0, |
|
"learning_rate": 1.4624891219531256e-07, |
|
"logits/chosen": -0.8298120498657227, |
|
"logits/rejected": -0.8370296359062195, |
|
"logps/chosen": -320.3850402832031, |
|
"logps/rejected": -297.91668701171875, |
|
"loss": 0.5415, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.35271918773651123, |
|
"rewards/margins": 0.6465274095535278, |
|
"rewards/rejected": -0.9992465972900391, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 0.9316932740120387, |
|
"grad_norm": 169.0, |
|
"learning_rate": 1.2618758516218187e-07, |
|
"logits/chosen": -0.8395959138870239, |
|
"logits/rejected": -0.8199805021286011, |
|
"logps/chosen": -280.90325927734375, |
|
"logps/rejected": -267.428955078125, |
|
"loss": 0.5625, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.38890036940574646, |
|
"rewards/margins": 0.5769734978675842, |
|
"rewards/rejected": -0.9658738374710083, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 0.9369275058885108, |
|
"grad_norm": 185.0, |
|
"learning_rate": 1.0758804189626492e-07, |
|
"logits/chosen": -0.9068744778633118, |
|
"logits/rejected": -0.8361620903015137, |
|
"logps/chosen": -305.85516357421875, |
|
"logps/rejected": -287.4258728027344, |
|
"loss": 0.5545, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.38260790705680847, |
|
"rewards/margins": 0.6252450346946716, |
|
"rewards/rejected": -1.0078529119491577, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"grad_norm": 164.0, |
|
"learning_rate": 9.045586088686497e-08, |
|
"logits/chosen": -0.8612390756607056, |
|
"logits/rejected": -0.8741781115531921, |
|
"logps/chosen": -329.1307067871094, |
|
"logps/rejected": -295.0871887207031, |
|
"loss": 0.5526, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.30828380584716797, |
|
"rewards/margins": 0.6826162338256836, |
|
"rewards/rejected": -0.990899920463562, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.9473959696414551, |
|
"grad_norm": 170.0, |
|
"learning_rate": 7.479618052298132e-08, |
|
"logits/chosen": -0.7576864957809448, |
|
"logits/rejected": -0.8044819831848145, |
|
"logps/chosen": -335.6754455566406, |
|
"logps/rejected": -321.34930419921875, |
|
"loss": 0.5417, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.3703181743621826, |
|
"rewards/margins": 0.6590753793716431, |
|
"rewards/rejected": -1.0293935537338257, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 0.9526302015179272, |
|
"grad_norm": 174.0, |
|
"learning_rate": 6.06136975521715e-08, |
|
"logits/chosen": -0.9168386459350586, |
|
"logits/rejected": -0.8157444000244141, |
|
"logps/chosen": -328.9001159667969, |
|
"logps/rejected": -306.4437255859375, |
|
"loss": 0.5355, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.38018369674682617, |
|
"rewards/margins": 0.7300933599472046, |
|
"rewards/rejected": -1.1102771759033203, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 0.9578644333943994, |
|
"grad_norm": 198.0, |
|
"learning_rate": 4.7912665671874246e-08, |
|
"logits/chosen": -0.89152991771698, |
|
"logits/rejected": -0.8414145708084106, |
|
"logps/chosen": -317.2197265625, |
|
"logps/rejected": -297.5346984863281, |
|
"loss": 0.5741, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.4002895951271057, |
|
"rewards/margins": 0.6522494554519653, |
|
"rewards/rejected": -1.0525391101837158, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 0.9630986652708715, |
|
"grad_norm": 177.0, |
|
"learning_rate": 3.669689425361444e-08, |
|
"logits/chosen": -0.8823251724243164, |
|
"logits/rejected": -0.8615130186080933, |
|
"logps/chosen": -287.7296142578125, |
|
"logps/rejected": -288.65704345703125, |
|
"loss": 0.5352, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.34463953971862793, |
|
"rewards/margins": 0.6676210165023804, |
|
"rewards/rejected": -1.0122604370117188, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 0.9683328971473436, |
|
"grad_norm": 187.0, |
|
"learning_rate": 2.6969747200472073e-08, |
|
"logits/chosen": -0.8920859098434448, |
|
"logits/rejected": -0.7850469350814819, |
|
"logps/chosen": -300.5646057128906, |
|
"logps/rejected": -296.7857360839844, |
|
"loss": 0.5626, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.4353879988193512, |
|
"rewards/margins": 0.6619107127189636, |
|
"rewards/rejected": -1.0972988605499268, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.9735671290238157, |
|
"grad_norm": 183.0, |
|
"learning_rate": 1.873414193816092e-08, |
|
"logits/chosen": -0.8299514651298523, |
|
"logits/rejected": -0.8318959474563599, |
|
"logps/chosen": -340.02862548828125, |
|
"logps/rejected": -322.59930419921875, |
|
"loss": 0.5537, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.3306241035461426, |
|
"rewards/margins": 0.6462942361831665, |
|
"rewards/rejected": -0.9769183397293091, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 0.9788013609002879, |
|
"grad_norm": 226.0, |
|
"learning_rate": 1.1992548540016858e-08, |
|
"logits/chosen": -0.9069007635116577, |
|
"logits/rejected": -0.8814927935600281, |
|
"logps/chosen": -341.44195556640625, |
|
"logps/rejected": -317.4871520996094, |
|
"loss": 0.5838, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.4393966794013977, |
|
"rewards/margins": 0.5904891490936279, |
|
"rewards/rejected": -1.0298858880996704, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 0.98403559277676, |
|
"grad_norm": 140.0, |
|
"learning_rate": 6.746988986156e-09, |
|
"logits/chosen": -0.8645192384719849, |
|
"logits/rejected": -0.7837439775466919, |
|
"logps/chosen": -300.27618408203125, |
|
"logps/rejected": -283.22723388671875, |
|
"loss": 0.5309, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.3711549639701843, |
|
"rewards/margins": 0.6681857109069824, |
|
"rewards/rejected": -1.0393407344818115, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 0.9892698246532321, |
|
"grad_norm": 177.0, |
|
"learning_rate": 2.9990365570314874e-09, |
|
"logits/chosen": -0.8303415179252625, |
|
"logits/rejected": -0.8534189462661743, |
|
"logps/chosen": -333.2608337402344, |
|
"logps/rejected": -315.56658935546875, |
|
"loss": 0.4992, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.30531638860702515, |
|
"rewards/margins": 0.7561148405075073, |
|
"rewards/rejected": -1.0614311695098877, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 0.9945040565297043, |
|
"grad_norm": 209.0, |
|
"learning_rate": 7.498153615653758e-10, |
|
"logits/chosen": -0.8541164398193359, |
|
"logits/rejected": -0.8766775131225586, |
|
"logps/chosen": -283.51202392578125, |
|
"logps/rejected": -285.37823486328125, |
|
"loss": 0.5763, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.41173163056373596, |
|
"rewards/margins": 0.6089791059494019, |
|
"rewards/rejected": -1.0207107067108154, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.9997382884061764, |
|
"grad_norm": 167.0, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -0.8995628356933594, |
|
"logits/rejected": -0.8839740753173828, |
|
"logps/chosen": -324.6961975097656, |
|
"logps/rejected": -297.1225280761719, |
|
"loss": 0.5615, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.39212650060653687, |
|
"rewards/margins": 0.582420289516449, |
|
"rewards/rejected": -0.9745469093322754, |
|
"step": 1910 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1910, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|