|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.010022222222222222, |
|
"eval_steps": 500, |
|
"global_step": 451, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 342.55, |
|
"epoch": 0.00022222222222222223, |
|
"grad_norm": 0.21378593146800995, |
|
"kl": 0.0005054072127677501, |
|
"learning_rate": 4.99391012564956e-07, |
|
"loss": 0.0, |
|
"reward": 0.25, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.25, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 337.3, |
|
"epoch": 0.00044444444444444447, |
|
"grad_norm": 1.5290135706891306e-05, |
|
"kl": 0.0005718494998291135, |
|
"learning_rate": 4.975670171853925e-07, |
|
"loss": 0.0, |
|
"reward": 0.65, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.65, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 388.8, |
|
"epoch": 0.0006666666666666666, |
|
"grad_norm": 0.31574714183807373, |
|
"kl": 0.0005932338128332049, |
|
"learning_rate": 4.945369001834514e-07, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 390.3, |
|
"epoch": 0.0008888888888888889, |
|
"grad_norm": 1.8352753613726236e-05, |
|
"kl": 0.0006036393286194652, |
|
"learning_rate": 4.903154239845797e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.1414213538169861, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 466.7, |
|
"epoch": 0.0011111111111111111, |
|
"grad_norm": 1.8186246961704455e-05, |
|
"kl": 0.0005532185896299779, |
|
"learning_rate": 4.849231551964771e-07, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 446.8, |
|
"epoch": 0.0013333333333333333, |
|
"grad_norm": 3.2005907996790484e-05, |
|
"kl": 0.0006301910732872784, |
|
"learning_rate": 4.783863644106502e-07, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 371.15, |
|
"epoch": 0.0015555555555555555, |
|
"grad_norm": 1.834248541854322e-05, |
|
"kl": 0.0005721298075513914, |
|
"learning_rate": 4.707368982147317e-07, |
|
"loss": 0.0, |
|
"reward": 0.3, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.3, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 380.9, |
|
"epoch": 0.0017777777777777779, |
|
"grad_norm": 0.24635720252990723, |
|
"kl": 0.0005963630188489333, |
|
"learning_rate": 4.6201202403910643e-07, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.6363960921764373, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 371.1, |
|
"epoch": 0.002, |
|
"grad_norm": 0.20243793725967407, |
|
"kl": 0.0006782339944038541, |
|
"learning_rate": 4.5225424859373684e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 319.75, |
|
"epoch": 0.0022222222222222222, |
|
"grad_norm": 0.2666068971157074, |
|
"kl": 0.0006597590865567327, |
|
"learning_rate": 4.415111107797445e-07, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 464.1, |
|
"epoch": 0.0024444444444444444, |
|
"grad_norm": 0.19521790742874146, |
|
"kl": 0.0005753735254984349, |
|
"learning_rate": 4.2983495008466273e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 413.25, |
|
"epoch": 0.0026666666666666666, |
|
"grad_norm": 0.18933118879795074, |
|
"kl": 0.0005773572018370032, |
|
"learning_rate": 4.172826515897145e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 422.75, |
|
"epoch": 0.0028888888888888888, |
|
"grad_norm": 0.2125905156135559, |
|
"kl": 0.0006250702834222465, |
|
"learning_rate": 4.039153688314145e-07, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 403.05, |
|
"epoch": 0.003111111111111111, |
|
"grad_norm": 1.8821670892066322e-05, |
|
"kl": 0.0005768304574303329, |
|
"learning_rate": 3.8979822586768666e-07, |
|
"loss": 0.0, |
|
"reward": 0.3, |
|
"reward_std": 0.1414213538169861, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.3, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 445.75, |
|
"epoch": 0.0033333333333333335, |
|
"grad_norm": 4.957288547302596e-05, |
|
"kl": 0.000591657921904698, |
|
"learning_rate": 3.75e-07, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 409.8, |
|
"epoch": 0.0035555555555555557, |
|
"grad_norm": 0.20501621067523956, |
|
"kl": 0.0005684028874384239, |
|
"learning_rate": 3.595927866972693e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 364.95, |
|
"epoch": 0.003777777777777778, |
|
"grad_norm": 0.3308781385421753, |
|
"kl": 0.0005750875250669196, |
|
"learning_rate": 3.43651648353978e-07, |
|
"loss": 0.0, |
|
"reward": 0.6, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.6, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 310.55, |
|
"epoch": 0.004, |
|
"grad_norm": 9.957168913388159e-06, |
|
"kl": 0.0005386626056861132, |
|
"learning_rate": 3.272542485937368e-07, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 378.6, |
|
"epoch": 0.004222222222222222, |
|
"grad_norm": 0.2291877716779709, |
|
"kl": 0.0006608614959986881, |
|
"learning_rate": 3.104804738999169e-07, |
|
"loss": 0.0, |
|
"reward": 0.55, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.55, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 340.6, |
|
"epoch": 0.0044444444444444444, |
|
"grad_norm": 0.18360783159732819, |
|
"kl": 0.0005953012092504651, |
|
"learning_rate": 2.934120444167326e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.5656854152679444, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 396.55, |
|
"epoch": 0.004666666666666667, |
|
"grad_norm": 2.1740559532190673e-05, |
|
"kl": 0.0005484546738443897, |
|
"learning_rate": 2.761321158169134e-07, |
|
"loss": 0.0, |
|
"reward": 0.55, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.55, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 330.25, |
|
"epoch": 0.004888888888888889, |
|
"grad_norm": 6.363703869283199e-05, |
|
"kl": 0.0006559955596458167, |
|
"learning_rate": 2.5872487417562527e-07, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 419.25, |
|
"epoch": 0.005111111111111111, |
|
"grad_norm": 0.1977580189704895, |
|
"kl": 0.0006136057461844757, |
|
"learning_rate": 2.412751258243748e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 399.65, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 2.009019226534292e-05, |
|
"kl": 0.000605167931644246, |
|
"learning_rate": 2.2386788418308665e-07, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 365.25, |
|
"epoch": 0.005555555555555556, |
|
"grad_norm": 9.316376963397488e-05, |
|
"kl": 0.0005963615287328139, |
|
"learning_rate": 2.065879555832674e-07, |
|
"loss": 0.0, |
|
"reward": 0.6, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.6, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 422.1, |
|
"epoch": 0.0057777777777777775, |
|
"grad_norm": 0.16928204894065857, |
|
"kl": 0.0006461634649895132, |
|
"learning_rate": 1.895195261000831e-07, |
|
"loss": 0.0, |
|
"reward": 0.7, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.7, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 445.45, |
|
"epoch": 0.006, |
|
"grad_norm": 0.17555122077465057, |
|
"kl": 0.0005888437968678773, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 409.55, |
|
"epoch": 0.006222222222222222, |
|
"grad_norm": 2.910105285991449e-05, |
|
"kl": 0.0005833886214531958, |
|
"learning_rate": 1.5634835164602196e-07, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 355.3, |
|
"epoch": 0.0064444444444444445, |
|
"grad_norm": 0.2147456258535385, |
|
"kl": 0.000609121096204035, |
|
"learning_rate": 1.404072133027306e-07, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 380.9, |
|
"epoch": 0.006666666666666667, |
|
"grad_norm": 2.5209470550180413e-05, |
|
"kl": 0.0006293962011113763, |
|
"learning_rate": 1.2500000000000005e-07, |
|
"loss": 0.0, |
|
"reward": 0.6, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.6, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 385.7, |
|
"epoch": 0.006888888888888889, |
|
"grad_norm": 7.603858830407262e-05, |
|
"kl": 0.0006043465342372656, |
|
"learning_rate": 1.1020177413231332e-07, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 369.15, |
|
"epoch": 0.0071111111111111115, |
|
"grad_norm": 1.2851673091063276e-05, |
|
"kl": 0.0005439497355837375, |
|
"learning_rate": 9.608463116858542e-08, |
|
"loss": 0.0, |
|
"reward": 0.3, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.3, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 316.85, |
|
"epoch": 0.007333333333333333, |
|
"grad_norm": 0.20440705120563507, |
|
"kl": 0.0005756207800004631, |
|
"learning_rate": 8.271734841028552e-08, |
|
"loss": 0.0, |
|
"reward": 0.75, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.75, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 429.55, |
|
"epoch": 0.007555555555555556, |
|
"grad_norm": 0.2662304639816284, |
|
"kl": 0.0005803581763757393, |
|
"learning_rate": 7.016504991533726e-08, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.5656854152679444, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 467.8, |
|
"epoch": 0.0077777777777777776, |
|
"grad_norm": 1.7572709111846052e-05, |
|
"kl": 0.0005397438129875809, |
|
"learning_rate": 5.848888922025552e-08, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 430.1, |
|
"epoch": 0.008, |
|
"grad_norm": 1.761410203471314e-05, |
|
"kl": 0.0005661552131641656, |
|
"learning_rate": 4.774575140626316e-08, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 386.6, |
|
"epoch": 0.008222222222222223, |
|
"grad_norm": 0.18963927030563354, |
|
"kl": 0.0005982606788165867, |
|
"learning_rate": 3.798797596089351e-08, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 378.0, |
|
"epoch": 0.008444444444444444, |
|
"grad_norm": 2.9212462322902866e-05, |
|
"kl": 0.0006032192788552493, |
|
"learning_rate": 2.9263101785268252e-08, |
|
"loss": 0.0, |
|
"reward": 0.2, |
|
"reward_std": 0.1414213538169861, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.2, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 375.95, |
|
"epoch": 0.008666666666666666, |
|
"grad_norm": 1.570535641803872e-05, |
|
"kl": 0.0006192138127516956, |
|
"learning_rate": 2.1613635589349756e-08, |
|
"loss": 0.0, |
|
"reward": 0.55, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.55, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 399.75, |
|
"epoch": 0.008888888888888889, |
|
"grad_norm": 0.1566227674484253, |
|
"kl": 0.0005766067682998255, |
|
"learning_rate": 1.507684480352292e-08, |
|
"loss": 0.0, |
|
"reward": 0.4, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.4, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 317.9, |
|
"epoch": 0.009111111111111111, |
|
"grad_norm": 0.42514702677726746, |
|
"kl": 0.000586391575052403, |
|
"learning_rate": 9.684576015420275e-09, |
|
"loss": 0.0, |
|
"reward": 0.5, |
|
"reward_std": 0.42426406145095824, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.5, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 440.05, |
|
"epoch": 0.009333333333333334, |
|
"grad_norm": 1.8029251805273816e-05, |
|
"kl": 0.0005952615960268304, |
|
"learning_rate": 5.463099816548577e-09, |
|
"loss": 0.0, |
|
"reward": 0.35, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.35, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 391.65, |
|
"epoch": 0.009555555555555555, |
|
"grad_norm": 0.23369018733501434, |
|
"kl": 0.0005777814978500829, |
|
"learning_rate": 2.4329828146074096e-09, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.3535533845424652, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 368.75, |
|
"epoch": 0.009777777777777778, |
|
"grad_norm": 0.27213847637176514, |
|
"kl": 0.0006278897577431053, |
|
"learning_rate": 6.089874350439505e-10, |
|
"loss": 0.0, |
|
"reward": 0.45, |
|
"reward_std": 0.49497473835945127, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.45, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 445.5, |
|
"epoch": 0.01, |
|
"grad_norm": 2.1452080545714125e-05, |
|
"kl": 0.000612707485561259, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"reward": 0.2, |
|
"reward_std": 0.2828427076339722, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.2, |
|
"step": 450 |
|
}, |
|
{ |
|
"completion_length": 379.875, |
|
"epoch": 0.010022222222222222, |
|
"kl": 0.0005991907673887908, |
|
"reward": 0.375, |
|
"reward_std": 0.5175491571426392, |
|
"rewards/equation_reward_func": 0.0, |
|
"rewards/format_reward_func": 0.375, |
|
"step": 451, |
|
"total_flos": 0.0, |
|
"train_loss": 1.3050906145387637e-09, |
|
"train_runtime": 4363.1554, |
|
"train_samples_per_second": 0.103, |
|
"train_steps_per_second": 0.103 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|