|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.899601304820587,
|
|
"eval_steps": 500,
|
|
"global_step": 4000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.07249003262051468,
|
|
"grad_norm": 0.06301224231719971,
|
|
"learning_rate": 0.00019992615059139993,
|
|
"loss": 0.7675,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.14498006524102935,
|
|
"grad_norm": 0.10580525547266006,
|
|
"learning_rate": 0.0001993360096097155,
|
|
"loss": 0.5641,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.21747009786154403,
|
|
"grad_norm": 0.07278159260749817,
|
|
"learning_rate": 0.00019815921288395655,
|
|
"loss": 0.5124,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.2899601304820587,
|
|
"grad_norm": 0.07537836581468582,
|
|
"learning_rate": 0.00019640271030632727,
|
|
"loss": 0.4841,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.3624501631025734,
|
|
"grad_norm": 0.09485650062561035,
|
|
"learning_rate": 0.00019407687537915269,
|
|
"loss": 0.4602,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.43494019572308806,
|
|
"grad_norm": 0.07589354366064072,
|
|
"learning_rate": 0.0001911954439513476,
|
|
"loss": 0.4577,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.5074302283436027,
|
|
"grad_norm": 0.0784345343708992,
|
|
"learning_rate": 0.00018777543309763566,
|
|
"loss": 0.4435,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.5799202609641174,
|
|
"grad_norm": 0.13163290917873383,
|
|
"learning_rate": 0.00018383704061959965,
|
|
"loss": 0.4362,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.6524102935846321,
|
|
"grad_norm": 0.10098472982645035,
|
|
"learning_rate": 0.00017940352576208688,
|
|
"loss": 0.4244,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.7249003262051468,
|
|
"grad_norm": 0.08392923325300217,
|
|
"learning_rate": 0.00017450107184943073,
|
|
"loss": 0.4194,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.7973903588256614,
|
|
"grad_norm": 0.08855585008859634,
|
|
"learning_rate": 0.00016915863165272818,
|
|
"loss": 0.401,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.8698803914461761,
|
|
"grad_norm": 0.07977194339036942,
|
|
"learning_rate": 0.00016340775640139835,
|
|
"loss": 0.401,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.9423704240666908,
|
|
"grad_norm": 0.049693815410137177,
|
|
"learning_rate": 0.00015728240944884046,
|
|
"loss": 0.3995,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.0148604566872055,
|
|
"grad_norm": 0.05138128623366356,
|
|
"learning_rate": 0.00015081876569263987,
|
|
"loss": 0.3858,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.0873504893077202,
|
|
"grad_norm": 0.09676031768321991,
|
|
"learning_rate": 0.00014405499793389992,
|
|
"loss": 0.3771,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.1598405219282348,
|
|
"grad_norm": 0.08200570940971375,
|
|
"learning_rate": 0.0001370310514374126,
|
|
"loss": 0.3714,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.2323305545487495,
|
|
"grad_norm": 0.0593099370598793,
|
|
"learning_rate": 0.00012978840802406474,
|
|
"loss": 0.3689,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.3048205871692642,
|
|
"grad_norm": 0.07377779483795166,
|
|
"learning_rate": 0.0001223698410886949,
|
|
"loss": 0.3683,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.3773106197897789,
|
|
"grad_norm": 0.07046964764595032,
|
|
"learning_rate": 0.00011481916299021028,
|
|
"loss": 0.3655,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.4498006524102935,
|
|
"grad_norm": 0.054052457213401794,
|
|
"learning_rate": 0.00010718096630581976,
|
|
"loss": 0.3662,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.5222906850308082,
|
|
"grad_norm": 0.05863339453935623,
|
|
"learning_rate": 9.950036047747562e-05,
|
|
"loss": 0.3631,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.594780717651323,
|
|
"grad_norm": 0.07018906623125076,
|
|
"learning_rate": 9.182270540583096e-05,
|
|
"loss": 0.3625,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.6672707502718376,
|
|
"grad_norm": 0.07417762279510498,
|
|
"learning_rate": 8.419334356504496e-05,
|
|
"loss": 0.362,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 1.7397607828923523,
|
|
"grad_norm": 0.06391692906618118,
|
|
"learning_rate": 7.665733222050506e-05,
|
|
"loss": 0.3621,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 1.812250815512867,
|
|
"grad_norm": 0.05379035323858261,
|
|
"learning_rate": 6.925917733092715e-05,
|
|
"loss": 0.3549,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 1.8847408481333816,
|
|
"grad_norm": 0.06350132077932358,
|
|
"learning_rate": 6.204257070634643e-05,
|
|
"loss": 0.3564,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 1.9572308807538963,
|
|
"grad_norm": 0.07076149433851242,
|
|
"learning_rate": 5.5050131974284314e-05,
|
|
"loss": 0.3543,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.029720913374411,
|
|
"grad_norm": 0.07655489444732666,
|
|
"learning_rate": 4.832315687798013e-05,
|
|
"loss": 0.3538,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.102210945994926,
|
|
"grad_norm": 0.0633474811911583,
|
|
"learning_rate": 4.190137339317991e-05,
|
|
"loss": 0.345,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.1747009786154403,
|
|
"grad_norm": 0.05732415243983269,
|
|
"learning_rate": 3.5822707103802e-05,
|
|
"loss": 0.3409,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 2.247191011235955,
|
|
"grad_norm": 0.059786632657051086,
|
|
"learning_rate": 3.0123057222115836e-05,
|
|
"loss": 0.3481,
|
|
"step": 3100
|
|
},
|
|
{
|
|
"epoch": 2.3196810438564697,
|
|
"grad_norm": 0.06117291748523712,
|
|
"learning_rate": 2.483608457620823e-05,
|
|
"loss": 0.3483,
|
|
"step": 3200
|
|
},
|
|
{
|
|
"epoch": 2.3921710764769846,
|
|
"grad_norm": 0.06071442365646362,
|
|
"learning_rate": 1.99930128168345e-05,
|
|
"loss": 0.342,
|
|
"step": 3300
|
|
},
|
|
{
|
|
"epoch": 2.464661109097499,
|
|
"grad_norm": 0.05658793821930885,
|
|
"learning_rate": 1.562244401768144e-05,
|
|
"loss": 0.3386,
|
|
"step": 3400
|
|
},
|
|
{
|
|
"epoch": 2.537151141718014,
|
|
"grad_norm": 0.0743192508816719,
|
|
"learning_rate": 1.1750189758064211e-05,
|
|
"loss": 0.3465,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 2.6096411743385284,
|
|
"grad_norm": 0.05902181193232536,
|
|
"learning_rate": 8.399118685644858e-06,
|
|
"loss": 0.3501,
|
|
"step": 3600
|
|
},
|
|
{
|
|
"epoch": 2.6821312069590433,
|
|
"grad_norm": 0.07102780789136887,
|
|
"learning_rate": 5.589021459431787e-06,
|
|
"loss": 0.3427,
|
|
"step": 3700
|
|
},
|
|
{
|
|
"epoch": 2.7546212395795577,
|
|
"grad_norm": 0.05868164449930191,
|
|
"learning_rate": 3.3364938706757275e-06,
|
|
"loss": 0.3486,
|
|
"step": 3800
|
|
},
|
|
{
|
|
"epoch": 2.8271112722000726,
|
|
"grad_norm": 0.06294699013233185,
|
|
"learning_rate": 1.6548388319239105e-06,
|
|
"loss": 0.3454,
|
|
"step": 3900
|
|
},
|
|
{
|
|
"epoch": 2.899601304820587,
|
|
"grad_norm": 0.06135398522019386,
|
|
"learning_rate": 5.539878130623732e-07,
|
|
"loss": 0.3456,
|
|
"step": 4000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 4137,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1.493402810056704e+18,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|