File size: 10,620 Bytes
c09dfe5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9968652037617555,
"eval_steps": 500,
"global_step": 159,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006269592476489028,
"grad_norm": 48.727538993129954,
"learning_rate": 2.5e-08,
"logits/chosen": -1.7884750366210938,
"logits/rejected": -1.7773182392120361,
"logps/chosen": -178.7047882080078,
"logps/pi_response": -149.36985778808594,
"logps/ref_response": -149.36985778808594,
"logps/rejected": -327.8221435546875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06269592476489028,
"grad_norm": 44.27733622419657,
"learning_rate": 2.5e-07,
"logits/chosen": -1.925902009010315,
"logits/rejected": -1.8669484853744507,
"logps/chosen": -259.4127197265625,
"logps/pi_response": -168.5211639404297,
"logps/ref_response": -168.7109375,
"logps/rejected": -363.8843994140625,
"loss": 0.6877,
"rewards/accuracies": 0.5208333134651184,
"rewards/chosen": -0.007133293431252241,
"rewards/margins": 0.008144436404109001,
"rewards/rejected": -0.01527773030102253,
"step": 10
},
{
"epoch": 0.12539184952978055,
"grad_norm": 77.06299399731483,
"learning_rate": 3.9922826507434426e-07,
"logits/chosen": -1.7760452032089233,
"logits/rejected": -1.7109419107437134,
"logps/chosen": -287.03887939453125,
"logps/pi_response": -184.39227294921875,
"logps/ref_response": -179.93296813964844,
"logps/rejected": -452.4369201660156,
"loss": 0.6483,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -0.2718208432197571,
"rewards/margins": 0.36123865842819214,
"rewards/rejected": -0.6330594420433044,
"step": 20
},
{
"epoch": 0.18808777429467086,
"grad_norm": 37.12655893399871,
"learning_rate": 3.9061450010948387e-07,
"logits/chosen": -1.6445729732513428,
"logits/rejected": -1.5510730743408203,
"logps/chosen": -272.7137756347656,
"logps/pi_response": -159.47608947753906,
"logps/ref_response": -154.75852966308594,
"logps/rejected": -406.7279968261719,
"loss": 0.6095,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.29393836855888367,
"rewards/margins": 0.42385944724082947,
"rewards/rejected": -0.7177978157997131,
"step": 30
},
{
"epoch": 0.2507836990595611,
"grad_norm": 43.17045113037534,
"learning_rate": 3.7283776752336964e-07,
"logits/chosen": -1.585517168045044,
"logits/rejected": -1.5017468929290771,
"logps/chosen": -241.1661376953125,
"logps/pi_response": -170.72608947753906,
"logps/ref_response": -165.39320373535156,
"logps/rejected": -458.1199645996094,
"loss": 0.5888,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.12789960205554962,
"rewards/margins": 0.4231465756893158,
"rewards/rejected": -0.5510461330413818,
"step": 40
},
{
"epoch": 0.31347962382445144,
"grad_norm": 56.15552525880845,
"learning_rate": 3.4675260684476076e-07,
"logits/chosen": -1.31333327293396,
"logits/rejected": -1.1443239450454712,
"logps/chosen": -280.60247802734375,
"logps/pi_response": -187.87725830078125,
"logps/ref_response": -162.92739868164062,
"logps/rejected": -448.25238037109375,
"loss": 0.56,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": -0.38959115743637085,
"rewards/margins": 0.5290592908859253,
"rewards/rejected": -0.9186504483222961,
"step": 50
},
{
"epoch": 0.3761755485893417,
"grad_norm": 59.31824188989313,
"learning_rate": 3.1361294934623114e-07,
"logits/chosen": -0.8963761329650879,
"logits/rejected": -0.7264960408210754,
"logps/chosen": -326.63250732421875,
"logps/pi_response": -210.930419921875,
"logps/ref_response": -159.56680297851562,
"logps/rejected": -436.368408203125,
"loss": 0.5461,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.676125168800354,
"rewards/margins": 0.5084071755409241,
"rewards/rejected": -1.1845324039459229,
"step": 60
},
{
"epoch": 0.438871473354232,
"grad_norm": 48.22413613389395,
"learning_rate": 2.750118407219124e-07,
"logits/chosen": -0.902199923992157,
"logits/rejected": -0.6483638882637024,
"logps/chosen": -299.6310729980469,
"logps/pi_response": -208.26123046875,
"logps/ref_response": -157.38909912109375,
"logps/rejected": -434.2081604003906,
"loss": 0.5688,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.5099861025810242,
"rewards/margins": 0.5872552394866943,
"rewards/rejected": -1.0972412824630737,
"step": 70
},
{
"epoch": 0.5015673981191222,
"grad_norm": 45.59894096049591,
"learning_rate": 2.3280486230620434e-07,
"logits/chosen": -0.756037175655365,
"logits/rejected": -0.477339506149292,
"logps/chosen": -285.1630554199219,
"logps/pi_response": -204.59786987304688,
"logps/ref_response": -150.49502563476562,
"logps/rejected": -451.710693359375,
"loss": 0.531,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.5814803242683411,
"rewards/margins": 0.663509726524353,
"rewards/rejected": -1.2449901103973389,
"step": 80
},
{
"epoch": 0.5642633228840125,
"grad_norm": 51.60912561342815,
"learning_rate": 1.890209320271345e-07,
"logits/chosen": -0.6508065462112427,
"logits/rejected": -0.365335613489151,
"logps/chosen": -306.8770751953125,
"logps/pi_response": -212.5884246826172,
"logps/ref_response": -147.56716918945312,
"logps/rejected": -485.7850036621094,
"loss": 0.4977,
"rewards/accuracies": 0.65625,
"rewards/chosen": -0.6989418864250183,
"rewards/margins": 0.8249226808547974,
"rewards/rejected": -1.523864507675171,
"step": 90
},
{
"epoch": 0.6269592476489029,
"grad_norm": 47.512990245630185,
"learning_rate": 1.457647729527166e-07,
"logits/chosen": -0.5415297746658325,
"logits/rejected": -0.25003066658973694,
"logps/chosen": -314.3260192871094,
"logps/pi_response": -253.16586303710938,
"logps/ref_response": -160.88829040527344,
"logps/rejected": -611.5823974609375,
"loss": 0.5027,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": -0.8261867761611938,
"rewards/margins": 1.2384675741195679,
"rewards/rejected": -2.06465482711792,
"step": 100
},
{
"epoch": 0.6896551724137931,
"grad_norm": 40.671499365551824,
"learning_rate": 1.0511573783308798e-07,
"logits/chosen": -0.45151621103286743,
"logits/rejected": -0.2101474106311798,
"logps/chosen": -330.63751220703125,
"logps/pi_response": -239.79678344726562,
"logps/ref_response": -149.11666870117188,
"logps/rejected": -537.8972778320312,
"loss": 0.4933,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.9507951736450195,
"rewards/margins": 0.9308554530143738,
"rewards/rejected": -1.881650686264038,
"step": 110
},
{
"epoch": 0.7523510971786834,
"grad_norm": 49.168767905757875,
"learning_rate": 6.9027853210943e-08,
"logits/chosen": -0.5997277498245239,
"logits/rejected": -0.22453825175762177,
"logps/chosen": -349.99212646484375,
"logps/pi_response": -280.00726318359375,
"logps/ref_response": -178.89942932128906,
"logps/rejected": -565.8890380859375,
"loss": 0.5114,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.9609189033508301,
"rewards/margins": 0.9585698843002319,
"rewards/rejected": -1.9194889068603516,
"step": 120
},
{
"epoch": 0.8150470219435737,
"grad_norm": 49.00943764848692,
"learning_rate": 3.9235888047312215e-08,
"logits/chosen": -0.6853595972061157,
"logits/rejected": -0.2808631658554077,
"logps/chosen": -327.33953857421875,
"logps/pi_response": -256.1371154785156,
"logps/ref_response": -166.0455780029297,
"logps/rejected": -572.0056762695312,
"loss": 0.5109,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.8519734144210815,
"rewards/margins": 1.0642752647399902,
"rewards/rejected": -1.9162486791610718,
"step": 130
},
{
"epoch": 0.877742946708464,
"grad_norm": 65.70628626767385,
"learning_rate": 1.7171962207216682e-08,
"logits/chosen": -0.5600570440292358,
"logits/rejected": -0.11410878598690033,
"logps/chosen": -324.69439697265625,
"logps/pi_response": -264.94036865234375,
"logps/ref_response": -165.58712768554688,
"logps/rejected": -562.1802978515625,
"loss": 0.5045,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.8904104232788086,
"rewards/margins": 0.9812215566635132,
"rewards/rejected": -1.8716322183609009,
"step": 140
},
{
"epoch": 0.9404388714733543,
"grad_norm": 44.83563629682317,
"learning_rate": 3.8967034915552865e-09,
"logits/chosen": -0.47401466965675354,
"logits/rejected": -0.1014849916100502,
"logps/chosen": -298.55377197265625,
"logps/pi_response": -241.1999969482422,
"logps/ref_response": -146.22021484375,
"logps/rejected": -519.8775634765625,
"loss": 0.5097,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.8489562273025513,
"rewards/margins": 1.053095817565918,
"rewards/rejected": -1.9020519256591797,
"step": 150
},
{
"epoch": 0.9968652037617555,
"step": 159,
"total_flos": 0.0,
"train_loss": 0.5458380771133134,
"train_runtime": 3385.2715,
"train_samples_per_second": 6.02,
"train_steps_per_second": 0.047
}
],
"logging_steps": 10,
"max_steps": 159,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|