|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.06194507536650836, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0008259343382201115, |
|
"grad_norm": 1677.547119140625, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 103.096, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0008259343382201115, |
|
"eval_loss": 13.492105484008789, |
|
"eval_runtime": 244.2801, |
|
"eval_samples_per_second": 4.176, |
|
"eval_steps_per_second": 2.088, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001651868676440223, |
|
"grad_norm": 1696.8392333984375, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 105.2538, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0024778030146603344, |
|
"grad_norm": 941.0369873046875, |
|
"learning_rate": 0.0001, |
|
"loss": 66.4773, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003303737352880446, |
|
"grad_norm": 699.8087768554688, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 29.6233, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0041296716911005575, |
|
"grad_norm": 504.3501892089844, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 9.9401, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004955606029320669, |
|
"grad_norm": 1.2597764730453491, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.0057, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005781540367540781, |
|
"grad_norm": 0.005221178755164146, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006607474705760892, |
|
"grad_norm": 0.026343591511249542, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.0001, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007433409043981004, |
|
"grad_norm": 0.6239492297172546, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.0002, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.008259343382201115, |
|
"grad_norm": 0.001214036368764937, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.009085277720421227, |
|
"grad_norm": 0.0005943904398009181, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009911212058641337, |
|
"grad_norm": 0.00028382017626427114, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01073714639686145, |
|
"grad_norm": 0.00030877735116519034, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011563080735081561, |
|
"grad_norm": 0.00028087597456760705, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.012389015073301672, |
|
"grad_norm": 0.00024960265727713704, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.013214949411521784, |
|
"grad_norm": 0.0001821557671064511, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.014040883749741896, |
|
"grad_norm": 0.00019730778876692057, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.014866818087962008, |
|
"grad_norm": 0.0001254954986507073, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015692752426182118, |
|
"grad_norm": 0.00014692585682496428, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01651868676440223, |
|
"grad_norm": 0.00016185363347176462, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.017344621102622342, |
|
"grad_norm": 0.00012402540596667677, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.018170555440842454, |
|
"grad_norm": 8.475293725496158e-05, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.018996489779062563, |
|
"grad_norm": 7.959387585287914e-05, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.019822424117282675, |
|
"grad_norm": 7.412952982122079e-05, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.020648358455502787, |
|
"grad_norm": 5.231930845184252e-05, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.020648358455502787, |
|
"eval_loss": 4.6826649224840367e-08, |
|
"eval_runtime": 246.3015, |
|
"eval_samples_per_second": 4.141, |
|
"eval_steps_per_second": 2.071, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0214742927937229, |
|
"grad_norm": 6.69602959533222e-05, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02230022713194301, |
|
"grad_norm": 5.6196640798589215e-05, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.023126161470163123, |
|
"grad_norm": 4.406289372127503e-05, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.023952095808383235, |
|
"grad_norm": 6.054918412701227e-05, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.024778030146603344, |
|
"grad_norm": 3.9682545320829377e-05, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.025603964484823456, |
|
"grad_norm": 4.490738865570165e-05, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.026429898823043568, |
|
"grad_norm": 3.491883762762882e-05, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02725583316126368, |
|
"grad_norm": 3.7224541301839054e-05, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02808176749948379, |
|
"grad_norm": 3.5606222809292376e-05, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.028907701837703904, |
|
"grad_norm": 3.647877383627929e-05, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.029733636175924016, |
|
"grad_norm": 4.453373549040407e-05, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.030559570514144124, |
|
"grad_norm": 3.978102540713735e-05, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.031385504852364236, |
|
"grad_norm": 3.958147135563195e-05, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03221143919058435, |
|
"grad_norm": 4.5364879042608663e-05, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03303737352880446, |
|
"grad_norm": 4.673544754041359e-05, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03386330786702457, |
|
"grad_norm": 4.447162427823059e-05, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.034689242205244684, |
|
"grad_norm": 4.427820385899395e-05, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.035515176543464796, |
|
"grad_norm": 4.0053564589470625e-05, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03634111088168491, |
|
"grad_norm": 4.3088726670248434e-05, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03716704521990502, |
|
"grad_norm": 4.5698117901338264e-05, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.037992979558125126, |
|
"grad_norm": 5.060286639491096e-05, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03881891389634524, |
|
"grad_norm": 5.1584192988229915e-05, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03964484823456535, |
|
"grad_norm": 5.470534597407095e-05, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.04047078257278546, |
|
"grad_norm": 5.4673917475156486e-05, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.041296716911005574, |
|
"grad_norm": 5.439592860057019e-05, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.041296716911005574, |
|
"eval_loss": 2.423142397844913e-08, |
|
"eval_runtime": 245.9876, |
|
"eval_samples_per_second": 4.147, |
|
"eval_steps_per_second": 2.073, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.042122651249225686, |
|
"grad_norm": 0.00018684551469050348, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0429485855874458, |
|
"grad_norm": 0.000440300878835842, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.04377451992566591, |
|
"grad_norm": 0.0004225766460876912, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04460045426388602, |
|
"grad_norm": 0.0001671666104812175, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.045426388602106134, |
|
"grad_norm": 0.0016776153352111578, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.046252322940326246, |
|
"grad_norm": 5.6094853789545596e-05, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04707825727854636, |
|
"grad_norm": 6.723932892782614e-05, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04790419161676647, |
|
"grad_norm": 0.00029996901866979897, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.04873012595498658, |
|
"grad_norm": 5.901329132029787e-05, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.04955606029320669, |
|
"grad_norm": 0.0002947987522929907, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0503819946314268, |
|
"grad_norm": 4.6376018872251734e-05, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.05120792896964691, |
|
"grad_norm": 4.3167357944184914e-05, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.05203386330786702, |
|
"grad_norm": 0.000724351208191365, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.052859797646087135, |
|
"grad_norm": 3.387459219084121e-05, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05368573198430725, |
|
"grad_norm": 4.283706221031025e-05, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05451166632252736, |
|
"grad_norm": 3.590101550798863e-05, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05533760066074747, |
|
"grad_norm": 3.176596146658994e-05, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.05616353499896758, |
|
"grad_norm": 4.798637019121088e-05, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.056989469337187695, |
|
"grad_norm": 4.3243224354228005e-05, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05781540367540781, |
|
"grad_norm": 3.807907341979444e-05, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05864133801362792, |
|
"grad_norm": 3.945823846152052e-05, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05946727235184803, |
|
"grad_norm": 3.7366255128290504e-05, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.06029320669006814, |
|
"grad_norm": 3.241099329898134e-05, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.06111914102828825, |
|
"grad_norm": 3.987809031968936e-05, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.06194507536650836, |
|
"grad_norm": 3.887756975018419e-05, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06194507536650836, |
|
"eval_loss": 1.59335264982019e-08, |
|
"eval_runtime": 245.9135, |
|
"eval_samples_per_second": 4.148, |
|
"eval_steps_per_second": 2.074, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0626483425692877e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|