|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.31612223393045313, |
|
"eval_steps": 50, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01053740779768177, |
|
"grad_norm": 3.070249080657959, |
|
"learning_rate": 4.982437653670531e-05, |
|
"loss": 1.7879, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02107481559536354, |
|
"grad_norm": 1.702326774597168, |
|
"learning_rate": 4.964875307341061e-05, |
|
"loss": 0.5567, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03161222339304531, |
|
"grad_norm": 1.1947294473648071, |
|
"learning_rate": 4.947312961011591e-05, |
|
"loss": 0.4493, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04214963119072708, |
|
"grad_norm": 0.9556658267974854, |
|
"learning_rate": 4.929750614682122e-05, |
|
"loss": 0.3728, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"grad_norm": 0.7952510714530945, |
|
"learning_rate": 4.9121882683526524e-05, |
|
"loss": 0.3535, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05268703898840885, |
|
"eval_loss": 0.4311191439628601, |
|
"eval_runtime": 13.6539, |
|
"eval_samples_per_second": 35.155, |
|
"eval_steps_per_second": 2.197, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06322444678609063, |
|
"grad_norm": 0.6962826251983643, |
|
"learning_rate": 4.894625922023183e-05, |
|
"loss": 0.3507, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0737618545837724, |
|
"grad_norm": 0.6941961646080017, |
|
"learning_rate": 4.877063575693713e-05, |
|
"loss": 0.3585, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08429926238145416, |
|
"grad_norm": 0.6864392757415771, |
|
"learning_rate": 4.8595012293642434e-05, |
|
"loss": 0.3496, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09483667017913593, |
|
"grad_norm": 0.7322937846183777, |
|
"learning_rate": 4.841938883034774e-05, |
|
"loss": 0.3295, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"grad_norm": 0.6921488046646118, |
|
"learning_rate": 4.824376536705304e-05, |
|
"loss": 0.3357, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1053740779768177, |
|
"eval_loss": 0.39120009541511536, |
|
"eval_runtime": 13.7031, |
|
"eval_samples_per_second": 35.029, |
|
"eval_steps_per_second": 2.189, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11591148577449947, |
|
"grad_norm": 0.6553240418434143, |
|
"learning_rate": 4.8068141903758344e-05, |
|
"loss": 0.3105, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12644889357218125, |
|
"grad_norm": 0.5637819170951843, |
|
"learning_rate": 4.789251844046364e-05, |
|
"loss": 0.3164, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.136986301369863, |
|
"grad_norm": 0.6341928839683533, |
|
"learning_rate": 4.7716894977168955e-05, |
|
"loss": 0.304, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1475237091675448, |
|
"grad_norm": 0.5917785167694092, |
|
"learning_rate": 4.754127151387426e-05, |
|
"loss": 0.3234, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15806111696522657, |
|
"grad_norm": 0.5884453654289246, |
|
"learning_rate": 4.736564805057956e-05, |
|
"loss": 0.317, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.15806111696522657, |
|
"eval_loss": 0.37688738107681274, |
|
"eval_runtime": 13.6535, |
|
"eval_samples_per_second": 35.156, |
|
"eval_steps_per_second": 2.197, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16859852476290832, |
|
"grad_norm": 0.5819964408874512, |
|
"learning_rate": 4.7190024587284866e-05, |
|
"loss": 0.2992, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.1791359325605901, |
|
"grad_norm": 0.689468264579773, |
|
"learning_rate": 4.7014401123990165e-05, |
|
"loss": 0.3168, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18967334035827185, |
|
"grad_norm": 0.6950872540473938, |
|
"learning_rate": 4.683877766069547e-05, |
|
"loss": 0.3041, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.20021074815595363, |
|
"grad_norm": 0.8322122097015381, |
|
"learning_rate": 4.6663154197400776e-05, |
|
"loss": 0.3028, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2107481559536354, |
|
"grad_norm": 0.5850774645805359, |
|
"learning_rate": 4.6487530734106075e-05, |
|
"loss": 0.2992, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2107481559536354, |
|
"eval_loss": 0.36230018734931946, |
|
"eval_runtime": 13.6165, |
|
"eval_samples_per_second": 35.251, |
|
"eval_steps_per_second": 2.203, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22128556375131717, |
|
"grad_norm": 0.6668715476989746, |
|
"learning_rate": 4.631190727081138e-05, |
|
"loss": 0.2924, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.23182297154899895, |
|
"grad_norm": 0.4749641418457031, |
|
"learning_rate": 4.6136283807516686e-05, |
|
"loss": 0.3017, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24236037934668073, |
|
"grad_norm": 0.6381515860557556, |
|
"learning_rate": 4.596066034422199e-05, |
|
"loss": 0.2887, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.2528977871443625, |
|
"grad_norm": 0.49952977895736694, |
|
"learning_rate": 4.57850368809273e-05, |
|
"loss": 0.2833, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.26343519494204426, |
|
"grad_norm": 0.699518084526062, |
|
"learning_rate": 4.5609413417632596e-05, |
|
"loss": 0.2859, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.26343519494204426, |
|
"eval_loss": 0.3563433885574341, |
|
"eval_runtime": 13.6275, |
|
"eval_samples_per_second": 35.223, |
|
"eval_steps_per_second": 2.201, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.273972602739726, |
|
"grad_norm": 0.5912085175514221, |
|
"learning_rate": 4.54337899543379e-05, |
|
"loss": 0.302, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2845100105374078, |
|
"grad_norm": 0.6353363990783691, |
|
"learning_rate": 4.525816649104321e-05, |
|
"loss": 0.3112, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.2950474183350896, |
|
"grad_norm": 0.5483567118644714, |
|
"learning_rate": 4.5082543027748506e-05, |
|
"loss": 0.2808, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3055848261327713, |
|
"grad_norm": 0.5003193616867065, |
|
"learning_rate": 4.490691956445381e-05, |
|
"loss": 0.3185, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.31612223393045313, |
|
"grad_norm": 0.4919240176677704, |
|
"learning_rate": 4.473129610115912e-05, |
|
"loss": 0.2883, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31612223393045313, |
|
"eval_loss": 0.35315924882888794, |
|
"eval_runtime": 13.6133, |
|
"eval_samples_per_second": 35.26, |
|
"eval_steps_per_second": 2.204, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2847, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.584170488771379e+16, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|