|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1475237091675448, |
|
"eval_steps": 10, |
|
"global_step": 140, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01053740779768177, |
|
"grad_norm": 3.3616116046905518, |
|
"learning_rate": 4.982437653670531e-05, |
|
"loss": 1.7881, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01053740779768177, |
|
"eval_loss": 0.6833231449127197, |
|
"eval_runtime": 13.3064, |
|
"eval_samples_per_second": 36.073, |
|
"eval_steps_per_second": 2.255, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02107481559536354, |
|
"grad_norm": 1.3658663034439087, |
|
"learning_rate": 4.964875307341061e-05, |
|
"loss": 0.5571, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02107481559536354, |
|
"eval_loss": 0.5293903350830078, |
|
"eval_runtime": 13.4286, |
|
"eval_samples_per_second": 35.745, |
|
"eval_steps_per_second": 2.234, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03161222339304531, |
|
"grad_norm": 1.0748717784881592, |
|
"learning_rate": 4.947312961011591e-05, |
|
"loss": 0.4482, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03161222339304531, |
|
"eval_loss": 0.47912800312042236, |
|
"eval_runtime": 13.4918, |
|
"eval_samples_per_second": 35.577, |
|
"eval_steps_per_second": 2.224, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04214963119072708, |
|
"grad_norm": 0.890529215335846, |
|
"learning_rate": 4.929750614682122e-05, |
|
"loss": 0.3722, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04214963119072708, |
|
"eval_loss": 0.44374603033065796, |
|
"eval_runtime": 13.5807, |
|
"eval_samples_per_second": 35.344, |
|
"eval_steps_per_second": 2.209, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"grad_norm": 0.8233364224433899, |
|
"learning_rate": 4.9121882683526524e-05, |
|
"loss": 0.3534, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"eval_loss": 0.4318523108959198, |
|
"eval_runtime": 13.6096, |
|
"eval_samples_per_second": 35.269, |
|
"eval_steps_per_second": 2.204, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06322444678609063, |
|
"grad_norm": 0.7144497036933899, |
|
"learning_rate": 4.894625922023183e-05, |
|
"loss": 0.3512, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06322444678609063, |
|
"eval_loss": 0.41558387875556946, |
|
"eval_runtime": 13.6488, |
|
"eval_samples_per_second": 35.168, |
|
"eval_steps_per_second": 2.198, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0737618545837724, |
|
"grad_norm": 0.6796255707740784, |
|
"learning_rate": 4.877063575693713e-05, |
|
"loss": 0.3588, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0737618545837724, |
|
"eval_loss": 0.4085235297679901, |
|
"eval_runtime": 13.6512, |
|
"eval_samples_per_second": 35.162, |
|
"eval_steps_per_second": 2.198, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08429926238145416, |
|
"grad_norm": 0.6846384406089783, |
|
"learning_rate": 4.8595012293642434e-05, |
|
"loss": 0.3504, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08429926238145416, |
|
"eval_loss": 0.3993188142776489, |
|
"eval_runtime": 13.6377, |
|
"eval_samples_per_second": 35.197, |
|
"eval_steps_per_second": 2.2, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09483667017913593, |
|
"grad_norm": 0.7391332983970642, |
|
"learning_rate": 4.841938883034774e-05, |
|
"loss": 0.3299, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09483667017913593, |
|
"eval_loss": 0.3922707140445709, |
|
"eval_runtime": 13.6059, |
|
"eval_samples_per_second": 35.279, |
|
"eval_steps_per_second": 2.205, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"grad_norm": 0.6717132925987244, |
|
"learning_rate": 4.824376536705304e-05, |
|
"loss": 0.3358, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"eval_loss": 0.390600323677063, |
|
"eval_runtime": 13.6022, |
|
"eval_samples_per_second": 35.288, |
|
"eval_steps_per_second": 2.206, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11591148577449947, |
|
"grad_norm": 0.6522348523139954, |
|
"learning_rate": 4.8068141903758344e-05, |
|
"loss": 0.311, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.11591148577449947, |
|
"eval_loss": 0.3844388425350189, |
|
"eval_runtime": 13.5921, |
|
"eval_samples_per_second": 35.315, |
|
"eval_steps_per_second": 2.207, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12644889357218125, |
|
"grad_norm": 0.5558998584747314, |
|
"learning_rate": 4.789251844046364e-05, |
|
"loss": 0.3161, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.12644889357218125, |
|
"eval_loss": 0.3852270245552063, |
|
"eval_runtime": 13.5961, |
|
"eval_samples_per_second": 35.304, |
|
"eval_steps_per_second": 2.207, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"grad_norm": 0.639998197555542, |
|
"learning_rate": 4.7716894977168955e-05, |
|
"loss": 0.3037, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"eval_loss": 0.3841981887817383, |
|
"eval_runtime": 13.6057, |
|
"eval_samples_per_second": 35.279, |
|
"eval_steps_per_second": 2.205, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1475237091675448, |
|
"grad_norm": 0.583459198474884, |
|
"learning_rate": 4.754127151387426e-05, |
|
"loss": 0.3232, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1475237091675448, |
|
"eval_loss": 0.3742731213569641, |
|
"eval_runtime": 13.5955, |
|
"eval_samples_per_second": 35.306, |
|
"eval_steps_per_second": 2.207, |
|
"step": 140 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2847, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6464971099799552e+16, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|