|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2528977871443625, |
|
"eval_steps": 10, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01053740779768177, |
|
"grad_norm": 3.3616116046905518, |
|
"learning_rate": 4.982437653670531e-05, |
|
"loss": 1.7881, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01053740779768177, |
|
"eval_loss": 0.6833231449127197, |
|
"eval_runtime": 13.3064, |
|
"eval_samples_per_second": 36.073, |
|
"eval_steps_per_second": 2.255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02107481559536354, |
|
"grad_norm": 1.3658663034439087, |
|
"learning_rate": 4.964875307341061e-05, |
|
"loss": 0.5571, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02107481559536354, |
|
"eval_loss": 0.5293903350830078, |
|
"eval_runtime": 13.4286, |
|
"eval_samples_per_second": 35.745, |
|
"eval_steps_per_second": 2.234, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03161222339304531, |
|
"grad_norm": 1.0748717784881592, |
|
"learning_rate": 4.947312961011591e-05, |
|
"loss": 0.4482, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03161222339304531, |
|
"eval_loss": 0.47912800312042236, |
|
"eval_runtime": 13.4918, |
|
"eval_samples_per_second": 35.577, |
|
"eval_steps_per_second": 2.224, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04214963119072708, |
|
"grad_norm": 0.890529215335846, |
|
"learning_rate": 4.929750614682122e-05, |
|
"loss": 0.3722, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04214963119072708, |
|
"eval_loss": 0.44374603033065796, |
|
"eval_runtime": 13.5807, |
|
"eval_samples_per_second": 35.344, |
|
"eval_steps_per_second": 2.209, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"grad_norm": 0.8233364224433899, |
|
"learning_rate": 4.9121882683526524e-05, |
|
"loss": 0.3534, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"eval_loss": 0.4318523108959198, |
|
"eval_runtime": 13.6096, |
|
"eval_samples_per_second": 35.269, |
|
"eval_steps_per_second": 2.204, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06322444678609063, |
|
"grad_norm": 0.7144497036933899, |
|
"learning_rate": 4.894625922023183e-05, |
|
"loss": 0.3512, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06322444678609063, |
|
"eval_loss": 0.41558387875556946, |
|
"eval_runtime": 13.6488, |
|
"eval_samples_per_second": 35.168, |
|
"eval_steps_per_second": 2.198, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0737618545837724, |
|
"grad_norm": 0.6796255707740784, |
|
"learning_rate": 4.877063575693713e-05, |
|
"loss": 0.3588, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0737618545837724, |
|
"eval_loss": 0.4085235297679901, |
|
"eval_runtime": 13.6512, |
|
"eval_samples_per_second": 35.162, |
|
"eval_steps_per_second": 2.198, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08429926238145416, |
|
"grad_norm": 0.6846384406089783, |
|
"learning_rate": 4.8595012293642434e-05, |
|
"loss": 0.3504, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08429926238145416, |
|
"eval_loss": 0.3993188142776489, |
|
"eval_runtime": 13.6377, |
|
"eval_samples_per_second": 35.197, |
|
"eval_steps_per_second": 2.2, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09483667017913593, |
|
"grad_norm": 0.7391332983970642, |
|
"learning_rate": 4.841938883034774e-05, |
|
"loss": 0.3299, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09483667017913593, |
|
"eval_loss": 0.3922707140445709, |
|
"eval_runtime": 13.6059, |
|
"eval_samples_per_second": 35.279, |
|
"eval_steps_per_second": 2.205, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"grad_norm": 0.6717132925987244, |
|
"learning_rate": 4.824376536705304e-05, |
|
"loss": 0.3358, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"eval_loss": 0.390600323677063, |
|
"eval_runtime": 13.6022, |
|
"eval_samples_per_second": 35.288, |
|
"eval_steps_per_second": 2.206, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11591148577449947, |
|
"grad_norm": 0.6522348523139954, |
|
"learning_rate": 4.8068141903758344e-05, |
|
"loss": 0.311, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.11591148577449947, |
|
"eval_loss": 0.3844388425350189, |
|
"eval_runtime": 13.5921, |
|
"eval_samples_per_second": 35.315, |
|
"eval_steps_per_second": 2.207, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12644889357218125, |
|
"grad_norm": 0.5558998584747314, |
|
"learning_rate": 4.789251844046364e-05, |
|
"loss": 0.3161, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.12644889357218125, |
|
"eval_loss": 0.3852270245552063, |
|
"eval_runtime": 13.5961, |
|
"eval_samples_per_second": 35.304, |
|
"eval_steps_per_second": 2.207, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"grad_norm": 0.639998197555542, |
|
"learning_rate": 4.7716894977168955e-05, |
|
"loss": 0.3037, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"eval_loss": 0.3841981887817383, |
|
"eval_runtime": 13.6057, |
|
"eval_samples_per_second": 35.279, |
|
"eval_steps_per_second": 2.205, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1475237091675448, |
|
"grad_norm": 0.583459198474884, |
|
"learning_rate": 4.754127151387426e-05, |
|
"loss": 0.3232, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1475237091675448, |
|
"eval_loss": 0.3742731213569641, |
|
"eval_runtime": 13.5955, |
|
"eval_samples_per_second": 35.306, |
|
"eval_steps_per_second": 2.207, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15806111696522657, |
|
"grad_norm": 0.5834677219390869, |
|
"learning_rate": 4.736564805057956e-05, |
|
"loss": 0.317, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.15806111696522657, |
|
"eval_loss": 0.3765258193016052, |
|
"eval_runtime": 13.5995, |
|
"eval_samples_per_second": 35.295, |
|
"eval_steps_per_second": 2.206, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16859852476290832, |
|
"grad_norm": 0.5975239872932434, |
|
"learning_rate": 4.7190024587284866e-05, |
|
"loss": 0.2987, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.16859852476290832, |
|
"eval_loss": 0.37060925364494324, |
|
"eval_runtime": 13.5971, |
|
"eval_samples_per_second": 35.302, |
|
"eval_steps_per_second": 2.206, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1791359325605901, |
|
"grad_norm": 0.6860706806182861, |
|
"learning_rate": 4.7014401123990165e-05, |
|
"loss": 0.3166, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.1791359325605901, |
|
"eval_loss": 0.37034350633621216, |
|
"eval_runtime": 13.5923, |
|
"eval_samples_per_second": 35.314, |
|
"eval_steps_per_second": 2.207, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18967334035827185, |
|
"grad_norm": 0.7161134481430054, |
|
"learning_rate": 4.683877766069547e-05, |
|
"loss": 0.3036, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.18967334035827185, |
|
"eval_loss": 0.3633898198604584, |
|
"eval_runtime": 13.6104, |
|
"eval_samples_per_second": 35.267, |
|
"eval_steps_per_second": 2.204, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.20021074815595363, |
|
"grad_norm": 0.8212491273880005, |
|
"learning_rate": 4.6663154197400776e-05, |
|
"loss": 0.3023, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.20021074815595363, |
|
"eval_loss": 0.36356136202812195, |
|
"eval_runtime": 13.5967, |
|
"eval_samples_per_second": 35.303, |
|
"eval_steps_per_second": 2.206, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2107481559536354, |
|
"grad_norm": 0.5965659022331238, |
|
"learning_rate": 4.6487530734106075e-05, |
|
"loss": 0.2999, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2107481559536354, |
|
"eval_loss": 0.3622604310512543, |
|
"eval_runtime": 13.5997, |
|
"eval_samples_per_second": 35.295, |
|
"eval_steps_per_second": 2.206, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22128556375131717, |
|
"grad_norm": 0.6638055443763733, |
|
"learning_rate": 4.631190727081138e-05, |
|
"loss": 0.2924, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.22128556375131717, |
|
"eval_loss": 0.36028483510017395, |
|
"eval_runtime": 13.6089, |
|
"eval_samples_per_second": 35.271, |
|
"eval_steps_per_second": 2.204, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.23182297154899895, |
|
"grad_norm": 0.4681139886379242, |
|
"learning_rate": 4.6136283807516686e-05, |
|
"loss": 0.3012, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.23182297154899895, |
|
"eval_loss": 0.3585042357444763, |
|
"eval_runtime": 13.6099, |
|
"eval_samples_per_second": 35.269, |
|
"eval_steps_per_second": 2.204, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24236037934668073, |
|
"grad_norm": 0.6495727300643921, |
|
"learning_rate": 4.596066034422199e-05, |
|
"loss": 0.2884, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.24236037934668073, |
|
"eval_loss": 0.36549657583236694, |
|
"eval_runtime": 13.6085, |
|
"eval_samples_per_second": 35.272, |
|
"eval_steps_per_second": 2.205, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.2528977871443625, |
|
"grad_norm": 0.4928750991821289, |
|
"learning_rate": 4.57850368809273e-05, |
|
"loss": 0.2838, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2528977871443625, |
|
"eval_loss": 0.35502439737319946, |
|
"eval_runtime": 13.6002, |
|
"eval_samples_per_second": 35.293, |
|
"eval_steps_per_second": 2.206, |
|
"step": 240 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2847, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.8558313421275136e+16, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|