|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.12514377731769036, |
|
"eval_steps": 50, |
|
"global_step": 85, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014722797331492984, |
|
"grad_norm": 0.26644960045814514, |
|
"learning_rate": 7.499999999999999e-06, |
|
"loss": 0.296, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014722797331492984, |
|
"eval_loss": 0.4772031009197235, |
|
"eval_runtime": 847.2862, |
|
"eval_samples_per_second": 2.7, |
|
"eval_steps_per_second": 1.35, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0029445594662985968, |
|
"grad_norm": 0.4351513385772705, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 0.408, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004416839199447895, |
|
"grad_norm": 0.3042258024215698, |
|
"learning_rate": 2.2499999999999998e-05, |
|
"loss": 0.393, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0058891189325971935, |
|
"grad_norm": 0.3164433538913727, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.409, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007361398665746492, |
|
"grad_norm": 0.37835752964019775, |
|
"learning_rate": 3.75e-05, |
|
"loss": 0.2635, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00883367839889579, |
|
"grad_norm": 0.22859500348567963, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 0.2615, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010305958132045089, |
|
"grad_norm": 0.37722575664520264, |
|
"learning_rate": 5.2499999999999995e-05, |
|
"loss": 0.296, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.011778237865194387, |
|
"grad_norm": 0.8506933450698853, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.2655, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013250517598343685, |
|
"grad_norm": 0.45279720425605774, |
|
"learning_rate": 6.75e-05, |
|
"loss": 0.259, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014722797331492983, |
|
"grad_norm": 0.18526464700698853, |
|
"learning_rate": 7.5e-05, |
|
"loss": 0.2053, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.016195077064642283, |
|
"grad_norm": 0.20497167110443115, |
|
"learning_rate": 8.25e-05, |
|
"loss": 0.1554, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01766735679779158, |
|
"grad_norm": 0.14627239108085632, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.1306, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01913963653094088, |
|
"grad_norm": 0.240932434797287, |
|
"learning_rate": 9.75e-05, |
|
"loss": 0.1564, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.020611916264090178, |
|
"grad_norm": 0.3040555715560913, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 0.1677, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.022084195997239476, |
|
"grad_norm": 0.4959651231765747, |
|
"learning_rate": 0.0001125, |
|
"loss": 0.3867, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.023556475730388774, |
|
"grad_norm": 0.20291835069656372, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.2207, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.025028755463538072, |
|
"grad_norm": 0.16518566012382507, |
|
"learning_rate": 0.00012749999999999998, |
|
"loss": 0.1309, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02650103519668737, |
|
"grad_norm": 0.18677189946174622, |
|
"learning_rate": 0.000135, |
|
"loss": 0.181, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.02797331492983667, |
|
"grad_norm": 0.1576640009880066, |
|
"learning_rate": 0.0001425, |
|
"loss": 0.1562, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.029445594662985967, |
|
"grad_norm": 0.19666102528572083, |
|
"learning_rate": 0.00015, |
|
"loss": 0.2439, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.030917874396135265, |
|
"grad_norm": 0.12512515485286713, |
|
"learning_rate": 0.00014998857713672935, |
|
"loss": 0.144, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03239015412928457, |
|
"grad_norm": 0.1219751164317131, |
|
"learning_rate": 0.00014995431202643217, |
|
"loss": 0.1047, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.033862433862433865, |
|
"grad_norm": 0.1390693038702011, |
|
"learning_rate": 0.000149897215106593, |
|
"loss": 0.1022, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03533471359558316, |
|
"grad_norm": 0.19809921085834503, |
|
"learning_rate": 0.0001498173037694868, |
|
"loss": 0.24, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03680699332873246, |
|
"grad_norm": 0.17452572286128998, |
|
"learning_rate": 0.0001497146023568809, |
|
"loss": 0.2497, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03827927306188176, |
|
"grad_norm": 0.10941721498966217, |
|
"learning_rate": 0.00014958914215262048, |
|
"loss": 0.0914, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03975155279503106, |
|
"grad_norm": 0.12793776392936707, |
|
"learning_rate": 0.00014944096137309914, |
|
"loss": 0.114, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.041223832528180356, |
|
"grad_norm": 0.14925755560398102, |
|
"learning_rate": 0.00014927010515561776, |
|
"loss": 0.1931, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.042696112261329654, |
|
"grad_norm": 0.12749770283699036, |
|
"learning_rate": 0.00014907662554463532, |
|
"loss": 0.1342, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04416839199447895, |
|
"grad_norm": 0.12666098773479462, |
|
"learning_rate": 0.0001488605814759156, |
|
"loss": 0.1404, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04564067172762825, |
|
"grad_norm": 0.12451935559511185, |
|
"learning_rate": 0.00014862203875857477, |
|
"loss": 0.1297, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04711295146077755, |
|
"grad_norm": 0.12354013323783875, |
|
"learning_rate": 0.0001483610700550354, |
|
"loss": 0.0667, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.048585231193926846, |
|
"grad_norm": 0.11861127614974976, |
|
"learning_rate": 0.00014807775485889264, |
|
"loss": 0.1057, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.050057510927076145, |
|
"grad_norm": 0.11489235609769821, |
|
"learning_rate": 0.0001477721794706997, |
|
"loss": 0.0717, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05152979066022544, |
|
"grad_norm": 0.15129899978637695, |
|
"learning_rate": 0.0001474444369716801, |
|
"loss": 0.1103, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05300207039337474, |
|
"grad_norm": 0.14446967840194702, |
|
"learning_rate": 0.0001470946271953739, |
|
"loss": 0.1674, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05447435012652404, |
|
"grad_norm": 0.10094312578439713, |
|
"learning_rate": 0.00014672285669722765, |
|
"loss": 0.0696, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05594662985967334, |
|
"grad_norm": 0.17120350897312164, |
|
"learning_rate": 0.00014632923872213652, |
|
"loss": 0.2139, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.057418909592822635, |
|
"grad_norm": 0.14146435260772705, |
|
"learning_rate": 0.00014591389316994876, |
|
"loss": 0.0925, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.058891189325971934, |
|
"grad_norm": 0.14251448214054108, |
|
"learning_rate": 0.0001454769465589431, |
|
"loss": 0.1002, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06036346905912123, |
|
"grad_norm": 0.07004090398550034, |
|
"learning_rate": 0.00014501853198729012, |
|
"loss": 0.0538, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06183574879227053, |
|
"grad_norm": 0.14318852126598358, |
|
"learning_rate": 0.00014453878909250904, |
|
"loss": 0.1316, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06330802852541983, |
|
"grad_norm": 0.10623105615377426, |
|
"learning_rate": 0.00014403786400893302, |
|
"loss": 0.0866, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06478030825856913, |
|
"grad_norm": 0.10893028974533081, |
|
"learning_rate": 0.00014351590932319504, |
|
"loss": 0.0518, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06625258799171843, |
|
"grad_norm": 0.1411529928445816, |
|
"learning_rate": 0.00014297308402774875, |
|
"loss": 0.1357, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06772486772486773, |
|
"grad_norm": 0.10105417668819427, |
|
"learning_rate": 0.0001424095534724375, |
|
"loss": 0.0654, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06919714745801703, |
|
"grad_norm": 0.14420634508132935, |
|
"learning_rate": 0.00014182548931412757, |
|
"loss": 0.0935, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07066942719116633, |
|
"grad_norm": 0.12569449841976166, |
|
"learning_rate": 0.0001412210694644195, |
|
"loss": 0.0848, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07214170692431562, |
|
"grad_norm": 0.09209802001714706, |
|
"learning_rate": 0.00014059647803545467, |
|
"loss": 0.0473, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07361398665746492, |
|
"grad_norm": 0.12560804188251495, |
|
"learning_rate": 0.0001399519052838329, |
|
"loss": 0.0785, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07361398665746492, |
|
"eval_loss": 0.08997488021850586, |
|
"eval_runtime": 784.5444, |
|
"eval_samples_per_second": 2.916, |
|
"eval_steps_per_second": 1.458, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07508626639061422, |
|
"grad_norm": 0.15386323630809784, |
|
"learning_rate": 0.00013928754755265842, |
|
"loss": 0.1427, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.07655854612376352, |
|
"grad_norm": 0.12584620714187622, |
|
"learning_rate": 0.00013860360721173193, |
|
"loss": 0.0863, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.07803082585691282, |
|
"grad_norm": 0.11394777148962021, |
|
"learning_rate": 0.0001379002925959068, |
|
"loss": 0.0691, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.07950310559006211, |
|
"grad_norm": 0.10938312113285065, |
|
"learning_rate": 0.0001371778179416281, |
|
"loss": 0.0826, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.08097538532321141, |
|
"grad_norm": 0.12558647990226746, |
|
"learning_rate": 0.00013643640332167438, |
|
"loss": 0.0766, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08244766505636071, |
|
"grad_norm": 0.10897130519151688, |
|
"learning_rate": 0.00013567627457812106, |
|
"loss": 0.062, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.08391994478951001, |
|
"grad_norm": 0.09030541032552719, |
|
"learning_rate": 0.00013489766325354695, |
|
"loss": 0.0393, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.08539222452265931, |
|
"grad_norm": 0.1143849790096283, |
|
"learning_rate": 0.00013410080652050412, |
|
"loss": 0.1219, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0868645042558086, |
|
"grad_norm": 0.1283547431230545, |
|
"learning_rate": 0.0001332859471092728, |
|
"loss": 0.1195, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0883367839889579, |
|
"grad_norm": 0.11828132718801498, |
|
"learning_rate": 0.00013245333323392333, |
|
"loss": 0.0925, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0898090637221072, |
|
"grad_norm": 0.10878538340330124, |
|
"learning_rate": 0.0001316032185167079, |
|
"loss": 0.0542, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.0912813434552565, |
|
"grad_norm": 0.1320784091949463, |
|
"learning_rate": 0.00013073586191080457, |
|
"loss": 0.1005, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0927536231884058, |
|
"grad_norm": 0.10936389863491058, |
|
"learning_rate": 0.00012985152762143778, |
|
"loss": 0.0526, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0942259029215551, |
|
"grad_norm": 0.13538584113121033, |
|
"learning_rate": 0.00012895048502539882, |
|
"loss": 0.0956, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0956981826547044, |
|
"grad_norm": 0.13087764382362366, |
|
"learning_rate": 0.00012803300858899104, |
|
"loss": 0.0795, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09717046238785369, |
|
"grad_norm": 0.1554757058620453, |
|
"learning_rate": 0.0001270993777844248, |
|
"loss": 0.1455, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.09864274212100299, |
|
"grad_norm": 0.12131679803133011, |
|
"learning_rate": 0.0001261498770046874, |
|
"loss": 0.0625, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.10011502185415229, |
|
"grad_norm": 0.11470583826303482, |
|
"learning_rate": 0.00012518479547691435, |
|
"loss": 0.0903, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.10158730158730159, |
|
"grad_norm": 0.1023801937699318, |
|
"learning_rate": 0.00012420442717428804, |
|
"loss": 0.0845, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.10305958132045089, |
|
"grad_norm": 0.10249310731887817, |
|
"learning_rate": 0.00012320907072649044, |
|
"loss": 0.0539, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.10453186105360018, |
|
"grad_norm": 0.10111914575099945, |
|
"learning_rate": 0.0001221990293287378, |
|
"loss": 0.0424, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.10600414078674948, |
|
"grad_norm": 0.16136892139911652, |
|
"learning_rate": 0.00012117461064942435, |
|
"loss": 0.1277, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.10747642051989878, |
|
"grad_norm": 0.13022761046886444, |
|
"learning_rate": 0.00012013612673640363, |
|
"loss": 0.118, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.10894870025304808, |
|
"grad_norm": 0.10115568339824677, |
|
"learning_rate": 0.00011908389392193547, |
|
"loss": 0.0554, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.11042097998619738, |
|
"grad_norm": 0.1352306455373764, |
|
"learning_rate": 0.00011801823272632844, |
|
"loss": 0.0683, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11189325971934667, |
|
"grad_norm": 0.11654029786586761, |
|
"learning_rate": 0.00011693946776030599, |
|
"loss": 0.0656, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.11336553945249597, |
|
"grad_norm": 0.1405310332775116, |
|
"learning_rate": 0.00011584792762612703, |
|
"loss": 0.0681, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.11483781918564527, |
|
"grad_norm": 0.19620081782341003, |
|
"learning_rate": 0.00011474394481749035, |
|
"loss": 0.1183, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.11631009891879457, |
|
"grad_norm": 0.09413562715053558, |
|
"learning_rate": 0.00011362785561825406, |
|
"loss": 0.0377, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.11778237865194387, |
|
"grad_norm": 0.10567747801542282, |
|
"learning_rate": 0.0001125, |
|
"loss": 0.0776, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.11925465838509317, |
|
"grad_norm": 0.15690375864505768, |
|
"learning_rate": 0.00011136072151847529, |
|
"loss": 0.0366, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.12072693811824246, |
|
"grad_norm": 0.09006724506616592, |
|
"learning_rate": 0.00011021036720894179, |
|
"loss": 0.0319, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.12219921785139176, |
|
"grad_norm": 0.1135464459657669, |
|
"learning_rate": 0.00010904928748046599, |
|
"loss": 0.0482, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.12367149758454106, |
|
"grad_norm": 0.09596288949251175, |
|
"learning_rate": 0.0001078778360091808, |
|
"loss": 0.0454, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.12514377731769036, |
|
"grad_norm": 0.17637494206428528, |
|
"learning_rate": 0.00010669636963055245, |
|
"loss": 0.1116, |
|
"step": 85 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 17, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.995021026974761e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|