|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.005333333333333333, |
|
"eval_steps": 500, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 495.080379486084, |
|
"epoch": 0.00035555555555555557, |
|
"grad_norm": 0.2174892404258369, |
|
"kl": 0.0, |
|
"learning_rate": 4.985344892885899e-07, |
|
"loss": 0.0, |
|
"reward": 0.2857143022119999, |
|
"reward_std": 0.43269163370132446, |
|
"rewards/equation_reward_func": 0.0357142873108387, |
|
"rewards/format_reward_func": 0.2500000149011612, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 497.4018096923828, |
|
"epoch": 0.0007111111111111111, |
|
"grad_norm": 0.22154184238668786, |
|
"kl": 0.0003987550735473633, |
|
"learning_rate": 4.869132927957006e-07, |
|
"loss": 0.0, |
|
"reward": 0.35714287776499987, |
|
"reward_std": 0.4787949416786432, |
|
"rewards/equation_reward_func": 0.05357143096625805, |
|
"rewards/format_reward_func": 0.30357144866138697, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 502.78573989868164, |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 0.18277559355029982, |
|
"kl": 0.0004475116729736328, |
|
"learning_rate": 4.642142940418973e-07, |
|
"loss": 0.0, |
|
"reward": 0.28571430314332247, |
|
"reward_std": 0.4405473303049803, |
|
"rewards/equation_reward_func": 0.0357142873108387, |
|
"rewards/format_reward_func": 0.2500000139698386, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 475.142879486084, |
|
"epoch": 0.0014222222222222223, |
|
"grad_norm": 0.19482331072759956, |
|
"kl": 0.0005053281784057617, |
|
"learning_rate": 4.314988729807827e-07, |
|
"loss": 0.0, |
|
"reward": 0.3660714477300644, |
|
"reward_std": 0.48892509937286377, |
|
"rewards/equation_reward_func": 0.04464286006987095, |
|
"rewards/format_reward_func": 0.32142858672887087, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 484.68752098083496, |
|
"epoch": 0.0017777777777777779, |
|
"grad_norm": 0.18758626438214032, |
|
"kl": 0.0006405115127563477, |
|
"learning_rate": 3.902967663405956e-07, |
|
"loss": 0.0, |
|
"reward": 0.4107143012806773, |
|
"reward_std": 0.5162536893039942, |
|
"rewards/equation_reward_func": 0.05357143096625805, |
|
"rewards/format_reward_func": 0.3571428721770644, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 476.7410888671875, |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 0.19195993642334963, |
|
"kl": 0.0009016990661621094, |
|
"learning_rate": 3.4253453883497864e-07, |
|
"loss": 0.0, |
|
"reward": 0.4375000251457095, |
|
"reward_std": 0.5308322850614786, |
|
"rewards/equation_reward_func": 0.05357143096625805, |
|
"rewards/format_reward_func": 0.38392859138548374, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 513.0893096923828, |
|
"epoch": 0.002488888888888889, |
|
"grad_norm": 0.1780296803915782, |
|
"kl": 0.0014204978942871094, |
|
"learning_rate": 2.9044549913819124e-07, |
|
"loss": 0.0, |
|
"reward": 0.5803571734577417, |
|
"reward_std": 0.5420807637274265, |
|
"rewards/equation_reward_func": 0.08035714738070965, |
|
"rewards/format_reward_func": 0.5000000242143869, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 475.6964473724365, |
|
"epoch": 0.0028444444444444446, |
|
"grad_norm": 0.18578955531425126, |
|
"kl": 0.0019216537475585938, |
|
"learning_rate": 2.3646527285364563e-07, |
|
"loss": 0.0, |
|
"reward": 0.6339286062866449, |
|
"reward_std": 0.5776853580027819, |
|
"rewards/equation_reward_func": 0.08928571827709675, |
|
"rewards/format_reward_func": 0.5446428842842579, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 472.723237991333, |
|
"epoch": 0.0032, |
|
"grad_norm": 0.17761745784153318, |
|
"kl": 0.002444744110107422, |
|
"learning_rate": 1.8311791536769483e-07, |
|
"loss": 0.0, |
|
"reward": 0.5625000214204192, |
|
"reward_std": 0.5268590040504932, |
|
"rewards/equation_reward_func": 0.044642859138548374, |
|
"rewards/format_reward_func": 0.517857170663774, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 443.6964530944824, |
|
"epoch": 0.0035555555555555557, |
|
"grad_norm": 0.21019280132551116, |
|
"kl": 0.0029592514038085938, |
|
"learning_rate": 1.328978898250525e-07, |
|
"loss": 0.0, |
|
"reward": 0.5982143143191934, |
|
"reward_std": 0.5197608303278685, |
|
"rewards/equation_reward_func": 0.05357143096625805, |
|
"rewards/format_reward_func": 0.5446428870782256, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 448.9821662902832, |
|
"epoch": 0.003911111111111111, |
|
"grad_norm": 0.21013630596816346, |
|
"kl": 0.0033364295959472656, |
|
"learning_rate": 8.81534288045431e-08, |
|
"loss": 0.0, |
|
"reward": 0.6517857424914837, |
|
"reward_std": 0.5414303578436375, |
|
"rewards/equation_reward_func": 0.044642859138548374, |
|
"rewards/format_reward_func": 0.6071428805589676, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 495.6785888671875, |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.18425036339737497, |
|
"kl": 0.0029821395874023438, |
|
"learning_rate": 5.097673357358906e-08, |
|
"loss": 0.0, |
|
"reward": 0.6607143096625805, |
|
"reward_std": 0.5480529572814703, |
|
"rewards/equation_reward_func": 0.06250000279396772, |
|
"rewards/format_reward_func": 0.5982143189758062, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 429.2589511871338, |
|
"epoch": 0.004622222222222222, |
|
"grad_norm": 0.21101924326333663, |
|
"kl": 0.004176139831542969, |
|
"learning_rate": 2.3106145082260774e-08, |
|
"loss": 0.0, |
|
"reward": 0.6339285988360643, |
|
"reward_std": 0.6071197390556335, |
|
"rewards/equation_reward_func": 0.08035714738070965, |
|
"rewards/format_reward_func": 0.5535714533179998, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 461.3214530944824, |
|
"epoch": 0.004977777777777778, |
|
"grad_norm": 0.18232336491131187, |
|
"kl": 0.0030603408813476562, |
|
"learning_rate": 5.844861072478335e-09, |
|
"loss": 0.0, |
|
"reward": 0.7500000353902578, |
|
"reward_std": 0.5386870224028826, |
|
"rewards/equation_reward_func": 0.08928571827709675, |
|
"rewards/format_reward_func": 0.6607143077999353, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 432.61609077453613, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.1956194297501798, |
|
"kl": 0.0031890869140625, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"reward": 0.7321428880095482, |
|
"reward_std": 0.505688888952136, |
|
"rewards/equation_reward_func": 0.026785715483129025, |
|
"rewards/format_reward_func": 0.7053571790456772, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.005333333333333333, |
|
"step": 30, |
|
"total_flos": 0.0, |
|
"train_loss": 1.9422600227395983e-06, |
|
"train_runtime": 1359.2142, |
|
"train_samples_per_second": 1.236, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 30, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|