|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.12974375608173858, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025948751216347712, |
|
"grad_norm": 108.15426635742188, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 86.6731, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025948751216347712, |
|
"eval_loss": 11.188285827636719, |
|
"eval_runtime": 77.1484, |
|
"eval_samples_per_second": 4.213, |
|
"eval_steps_per_second": 2.113, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0051897502432695424, |
|
"grad_norm": 107.09851837158203, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 86.5966, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007784625364904314, |
|
"grad_norm": 139.3520965576172, |
|
"learning_rate": 0.0001, |
|
"loss": 86.4316, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010379500486539085, |
|
"grad_norm": 96.97356414794922, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 76.9083, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012974375608173857, |
|
"grad_norm": 59.65872573852539, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 69.4136, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015569250729808628, |
|
"grad_norm": 71.96248626708984, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 53.4392, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.018164125851443398, |
|
"grad_norm": 63.12929153442383, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 43.0964, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02075900097307817, |
|
"grad_norm": 66.42699432373047, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 30.6773, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02335387609471294, |
|
"grad_norm": 76.70232391357422, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 18.9185, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.025948751216347713, |
|
"grad_norm": 56.63690185546875, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 7.7619, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.028543626337982485, |
|
"grad_norm": 59.534576416015625, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 5.8875, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.031138501459617256, |
|
"grad_norm": 53.30662155151367, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 3.8854, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03373337658125203, |
|
"grad_norm": 40.798580169677734, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 3.7457, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.036328251702886796, |
|
"grad_norm": 33.860923767089844, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 2.6321, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03892312682452157, |
|
"grad_norm": 34.119388580322266, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.8475, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04151800194615634, |
|
"grad_norm": 58.588966369628906, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 2.3496, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.044112877067791115, |
|
"grad_norm": 62.44844055175781, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 3.7984, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04670775218942588, |
|
"grad_norm": 31.974451065063477, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.5873, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04930262731106066, |
|
"grad_norm": 75.9893569946289, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 3.1131, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.051897502432695426, |
|
"grad_norm": 117.58428192138672, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 4.7777, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054492377554330194, |
|
"grad_norm": 62.850887298583984, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.7957, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05708725267596497, |
|
"grad_norm": 26.412378311157227, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.9778, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05968212779759974, |
|
"grad_norm": 15.020607948303223, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.5826, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06227700291923451, |
|
"grad_norm": 36.43248748779297, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.9239, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06487187804086929, |
|
"grad_norm": 25.667245864868164, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.7288, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06487187804086929, |
|
"eval_loss": 0.16820967197418213, |
|
"eval_runtime": 78.3083, |
|
"eval_samples_per_second": 4.15, |
|
"eval_steps_per_second": 2.082, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06746675316250406, |
|
"grad_norm": 15.00766658782959, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.4404, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07006162828413882, |
|
"grad_norm": 13.661465644836426, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.4163, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07265650340577359, |
|
"grad_norm": 19.321388244628906, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.6755, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07525137852740837, |
|
"grad_norm": 22.267826080322266, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.9669, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07784625364904314, |
|
"grad_norm": 7.646611213684082, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.3168, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08044112877067791, |
|
"grad_norm": 36.85639190673828, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.2373, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08303600389231268, |
|
"grad_norm": 10.440677642822266, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 1.3768, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08563087901394745, |
|
"grad_norm": 38.025997161865234, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.7102, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08822575413558223, |
|
"grad_norm": 10.360336303710938, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.5797, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.090820629257217, |
|
"grad_norm": 19.017549514770508, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.0114, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09341550437885177, |
|
"grad_norm": 44.58971405029297, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 4.6878, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09601037950048653, |
|
"grad_norm": 30.749067306518555, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 1.1462, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09860525462212132, |
|
"grad_norm": 14.544421195983887, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.3816, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10120012974375608, |
|
"grad_norm": 20.164777755737305, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1987, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10379500486539085, |
|
"grad_norm": 9.724178314208984, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.3555, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10638987998702562, |
|
"grad_norm": 24.466894149780273, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.6404, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10898475510866039, |
|
"grad_norm": 7.151639938354492, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.3062, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11157963023029517, |
|
"grad_norm": 3.3515334129333496, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0569, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11417450535192994, |
|
"grad_norm": 24.75587272644043, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.9388, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11676938047356471, |
|
"grad_norm": 7.871204376220703, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.1559, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11936425559519948, |
|
"grad_norm": 24.41685676574707, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.894, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12195913071683426, |
|
"grad_norm": 28.60738182067871, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.8821, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12455400583846903, |
|
"grad_norm": 29.086261749267578, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.6304, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1271488809601038, |
|
"grad_norm": 23.895931243896484, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.7597, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.12974375608173858, |
|
"grad_norm": 53.810604095458984, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 2.9703, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12974375608173858, |
|
"eval_loss": 0.1682109385728836, |
|
"eval_runtime": 78.312, |
|
"eval_samples_per_second": 4.15, |
|
"eval_steps_per_second": 2.081, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.57394539429888e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|