|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.899601304820587,
|
|
"eval_steps": 500,
|
|
"global_step": 4000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.07249003262051468,
|
|
"grad_norm": 0.09461668878793716,
|
|
"learning_rate": 0.00019992615059139993,
|
|
"loss": 0.7791,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.14498006524102935,
|
|
"grad_norm": 0.06766393035650253,
|
|
"learning_rate": 0.0001993360096097155,
|
|
"loss": 0.5521,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.21747009786154403,
|
|
"grad_norm": 0.0734836608171463,
|
|
"learning_rate": 0.00019815921288395655,
|
|
"loss": 0.5053,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.2899601304820587,
|
|
"grad_norm": 0.060219183564186096,
|
|
"learning_rate": 0.00019640271030632727,
|
|
"loss": 0.4836,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.3624501631025734,
|
|
"grad_norm": 0.0633341521024704,
|
|
"learning_rate": 0.00019407687537915269,
|
|
"loss": 0.4692,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.43494019572308806,
|
|
"grad_norm": 0.06684491038322449,
|
|
"learning_rate": 0.0001911954439513476,
|
|
"loss": 0.4508,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.5074302283436027,
|
|
"grad_norm": 0.10767890512943268,
|
|
"learning_rate": 0.00018777543309763566,
|
|
"loss": 0.4432,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.5799202609641174,
|
|
"grad_norm": 0.06926553696393967,
|
|
"learning_rate": 0.00018383704061959965,
|
|
"loss": 0.4313,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.6524102935846321,
|
|
"grad_norm": 0.0965363085269928,
|
|
"learning_rate": 0.00017940352576208688,
|
|
"loss": 0.4179,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.7249003262051468,
|
|
"grad_norm": 0.07616274058818817,
|
|
"learning_rate": 0.00017450107184943073,
|
|
"loss": 0.4116,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.7973903588256614,
|
|
"grad_norm": 0.08681479096412659,
|
|
"learning_rate": 0.00016915863165272818,
|
|
"loss": 0.4044,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.8698803914461761,
|
|
"grad_norm": 0.07483861595392227,
|
|
"learning_rate": 0.00016340775640139835,
|
|
"loss": 0.4006,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.9423704240666908,
|
|
"grad_norm": 0.0695137232542038,
|
|
"learning_rate": 0.00015728240944884046,
|
|
"loss": 0.3945,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.0148604566872055,
|
|
"grad_norm": 0.09992991387844086,
|
|
"learning_rate": 0.00015081876569263987,
|
|
"loss": 0.3896,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.0873504893077202,
|
|
"grad_norm": 0.058682847768068314,
|
|
"learning_rate": 0.00014405499793389992,
|
|
"loss": 0.3771,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.1598405219282348,
|
|
"grad_norm": 0.054995909333229065,
|
|
"learning_rate": 0.0001370310514374126,
|
|
"loss": 0.3682,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.2323305545487495,
|
|
"grad_norm": 0.08551865816116333,
|
|
"learning_rate": 0.00012978840802406474,
|
|
"loss": 0.3683,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.3048205871692642,
|
|
"grad_norm": 0.06695013493299484,
|
|
"learning_rate": 0.0001223698410886949,
|
|
"loss": 0.3689,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.3773106197897789,
|
|
"grad_norm": 0.0619310662150383,
|
|
"learning_rate": 0.00011481916299021028,
|
|
"loss": 0.3639,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.4498006524102935,
|
|
"grad_norm": 0.05403854325413704,
|
|
"learning_rate": 0.00010718096630581976,
|
|
"loss": 0.3628,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.5222906850308082,
|
|
"grad_norm": 0.08368243277072906,
|
|
"learning_rate": 9.950036047747562e-05,
|
|
"loss": 0.3612,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.594780717651323,
|
|
"grad_norm": 0.05811639130115509,
|
|
"learning_rate": 9.182270540583096e-05,
|
|
"loss": 0.3624,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.6672707502718376,
|
|
"grad_norm": 0.05284130945801735,
|
|
"learning_rate": 8.419334356504496e-05,
|
|
"loss": 0.3588,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 1.7397607828923523,
|
|
"grad_norm": 0.05519060045480728,
|
|
"learning_rate": 7.665733222050506e-05,
|
|
"loss": 0.3594,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 1.812250815512867,
|
|
"grad_norm": 0.0576307587325573,
|
|
"learning_rate": 6.925917733092715e-05,
|
|
"loss": 0.3584,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 1.8847408481333816,
|
|
"grad_norm": 0.06832610815763474,
|
|
"learning_rate": 6.204257070634643e-05,
|
|
"loss": 0.3556,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 1.9572308807538963,
|
|
"grad_norm": 0.05261433124542236,
|
|
"learning_rate": 5.5050131974284314e-05,
|
|
"loss": 0.3577,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.029720913374411,
|
|
"grad_norm": 0.05833235755562782,
|
|
"learning_rate": 4.832315687798013e-05,
|
|
"loss": 0.3453,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.102210945994926,
|
|
"grad_norm": 0.05576351657509804,
|
|
"learning_rate": 4.190137339317991e-05,
|
|
"loss": 0.3488,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.1747009786154403,
|
|
"grad_norm": 0.05975164473056793,
|
|
"learning_rate": 3.5822707103802e-05,
|
|
"loss": 0.3448,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 2.247191011235955,
|
|
"grad_norm": 0.05862308666110039,
|
|
"learning_rate": 3.0123057222115836e-05,
|
|
"loss": 0.3441,
|
|
"step": 3100
|
|
},
|
|
{
|
|
"epoch": 2.3196810438564697,
|
|
"grad_norm": 0.06690463423728943,
|
|
"learning_rate": 2.483608457620823e-05,
|
|
"loss": 0.3513,
|
|
"step": 3200
|
|
},
|
|
{
|
|
"epoch": 2.3921710764769846,
|
|
"grad_norm": 0.0602315217256546,
|
|
"learning_rate": 1.99930128168345e-05,
|
|
"loss": 0.3441,
|
|
"step": 3300
|
|
},
|
|
{
|
|
"epoch": 2.464661109097499,
|
|
"grad_norm": 0.05553460866212845,
|
|
"learning_rate": 1.562244401768144e-05,
|
|
"loss": 0.3444,
|
|
"step": 3400
|
|
},
|
|
{
|
|
"epoch": 2.537151141718014,
|
|
"grad_norm": 0.05820445343852043,
|
|
"learning_rate": 1.1750189758064211e-05,
|
|
"loss": 0.3465,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 2.6096411743385284,
|
|
"grad_norm": 0.06841924786567688,
|
|
"learning_rate": 8.399118685644858e-06,
|
|
"loss": 0.3486,
|
|
"step": 3600
|
|
},
|
|
{
|
|
"epoch": 2.6821312069590433,
|
|
"grad_norm": 0.06084201857447624,
|
|
"learning_rate": 5.589021459431787e-06,
|
|
"loss": 0.3426,
|
|
"step": 3700
|
|
},
|
|
{
|
|
"epoch": 2.7546212395795577,
|
|
"grad_norm": 0.06527499854564667,
|
|
"learning_rate": 3.3364938706757275e-06,
|
|
"loss": 0.3428,
|
|
"step": 3800
|
|
},
|
|
{
|
|
"epoch": 2.8271112722000726,
|
|
"grad_norm": 0.06700863689184189,
|
|
"learning_rate": 1.6548388319239105e-06,
|
|
"loss": 0.3414,
|
|
"step": 3900
|
|
},
|
|
{
|
|
"epoch": 2.899601304820587,
|
|
"grad_norm": 0.08079268038272858,
|
|
"learning_rate": 5.539878130623732e-07,
|
|
"loss": 0.3453,
|
|
"step": 4000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 4137,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1.493402810056704e+18,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|