lesso's picture
Training in progress, step 200, checkpoint
715cd41 verified
raw
history blame
35.6 kB
{
"best_metric": 11.5,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.03374957813027337,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016874789065136686,
"grad_norm": 1.204833279189188e-05,
"learning_rate": 1.007e-05,
"loss": 23.0,
"step": 1
},
{
"epoch": 0.00016874789065136686,
"eval_loss": 11.5,
"eval_runtime": 31.0185,
"eval_samples_per_second": 80.436,
"eval_steps_per_second": 20.117,
"step": 1
},
{
"epoch": 0.0003374957813027337,
"grad_norm": 1.1431009625084698e-05,
"learning_rate": 2.014e-05,
"loss": 23.0,
"step": 2
},
{
"epoch": 0.0005062436719541006,
"grad_norm": 1.5091093700903002e-05,
"learning_rate": 3.0209999999999997e-05,
"loss": 23.0,
"step": 3
},
{
"epoch": 0.0006749915626054674,
"grad_norm": 1.993294426938519e-05,
"learning_rate": 4.028e-05,
"loss": 23.0,
"step": 4
},
{
"epoch": 0.0008437394532568343,
"grad_norm": 1.4757441022084095e-05,
"learning_rate": 5.035e-05,
"loss": 23.0,
"step": 5
},
{
"epoch": 0.0010124873439082012,
"grad_norm": 1.3547244634537492e-05,
"learning_rate": 6.0419999999999994e-05,
"loss": 23.0,
"step": 6
},
{
"epoch": 0.001181235234559568,
"grad_norm": 1.6674213838996366e-05,
"learning_rate": 7.049e-05,
"loss": 23.0,
"step": 7
},
{
"epoch": 0.0013499831252109348,
"grad_norm": 1.7742673662723973e-05,
"learning_rate": 8.056e-05,
"loss": 23.0,
"step": 8
},
{
"epoch": 0.0015187310158623017,
"grad_norm": 1.2505891390901525e-05,
"learning_rate": 9.062999999999999e-05,
"loss": 23.0,
"step": 9
},
{
"epoch": 0.0016874789065136687,
"grad_norm": 1.3835131539963186e-05,
"learning_rate": 0.0001007,
"loss": 23.0,
"step": 10
},
{
"epoch": 0.0018562267971650355,
"grad_norm": 2.4900897187762894e-05,
"learning_rate": 0.00010017,
"loss": 23.0,
"step": 11
},
{
"epoch": 0.0020249746878164025,
"grad_norm": 1.7070025933207944e-05,
"learning_rate": 9.963999999999999e-05,
"loss": 23.0,
"step": 12
},
{
"epoch": 0.0021937225784677693,
"grad_norm": 2.7361658794688992e-05,
"learning_rate": 9.910999999999999e-05,
"loss": 23.0,
"step": 13
},
{
"epoch": 0.002362470469119136,
"grad_norm": 1.2697471902356483e-05,
"learning_rate": 9.858e-05,
"loss": 23.0,
"step": 14
},
{
"epoch": 0.002531218359770503,
"grad_norm": 1.9702922145370394e-05,
"learning_rate": 9.805e-05,
"loss": 23.0,
"step": 15
},
{
"epoch": 0.0026999662504218697,
"grad_norm": 2.4902374207158573e-05,
"learning_rate": 9.752e-05,
"loss": 23.0,
"step": 16
},
{
"epoch": 0.0028687141410732365,
"grad_norm": 2.721859527810011e-05,
"learning_rate": 9.698999999999999e-05,
"loss": 23.0,
"step": 17
},
{
"epoch": 0.0030374620317246033,
"grad_norm": 1.8586168152978644e-05,
"learning_rate": 9.646e-05,
"loss": 23.0,
"step": 18
},
{
"epoch": 0.00320620992237597,
"grad_norm": 1.872859866125509e-05,
"learning_rate": 9.593e-05,
"loss": 23.0,
"step": 19
},
{
"epoch": 0.0033749578130273373,
"grad_norm": 3.305126665509306e-05,
"learning_rate": 9.539999999999999e-05,
"loss": 23.0,
"step": 20
},
{
"epoch": 0.003543705703678704,
"grad_norm": 2.0460800442378968e-05,
"learning_rate": 9.487e-05,
"loss": 23.0,
"step": 21
},
{
"epoch": 0.003712453594330071,
"grad_norm": 2.6154682927881368e-05,
"learning_rate": 9.434e-05,
"loss": 23.0,
"step": 22
},
{
"epoch": 0.0038812014849814377,
"grad_norm": 2.1470910724019632e-05,
"learning_rate": 9.381e-05,
"loss": 23.0,
"step": 23
},
{
"epoch": 0.004049949375632805,
"grad_norm": 2.203008989454247e-05,
"learning_rate": 9.327999999999999e-05,
"loss": 23.0,
"step": 24
},
{
"epoch": 0.004218697266284171,
"grad_norm": 2.6265855922247283e-05,
"learning_rate": 9.274999999999999e-05,
"loss": 23.0,
"step": 25
},
{
"epoch": 0.004387445156935539,
"grad_norm": 3.531806214596145e-05,
"learning_rate": 9.222e-05,
"loss": 23.0,
"step": 26
},
{
"epoch": 0.004556193047586905,
"grad_norm": 3.6674504372058436e-05,
"learning_rate": 9.169e-05,
"loss": 23.0,
"step": 27
},
{
"epoch": 0.004724940938238272,
"grad_norm": 3.1309191399486735e-05,
"learning_rate": 9.116e-05,
"loss": 23.0,
"step": 28
},
{
"epoch": 0.0048936888288896386,
"grad_norm": 2.5531946448609233e-05,
"learning_rate": 9.062999999999999e-05,
"loss": 23.0,
"step": 29
},
{
"epoch": 0.005062436719541006,
"grad_norm": 3.693735197884962e-05,
"learning_rate": 9.01e-05,
"loss": 23.0,
"step": 30
},
{
"epoch": 0.005231184610192373,
"grad_norm": 3.671792001114227e-05,
"learning_rate": 8.957e-05,
"loss": 23.0,
"step": 31
},
{
"epoch": 0.005399932500843739,
"grad_norm": 2.8840735467383638e-05,
"learning_rate": 8.903999999999999e-05,
"loss": 23.0,
"step": 32
},
{
"epoch": 0.005568680391495107,
"grad_norm": 4.73731524834875e-05,
"learning_rate": 8.850999999999999e-05,
"loss": 23.0,
"step": 33
},
{
"epoch": 0.005737428282146473,
"grad_norm": 4.9730784667190164e-05,
"learning_rate": 8.798e-05,
"loss": 23.0,
"step": 34
},
{
"epoch": 0.00590617617279784,
"grad_norm": 4.867700408794917e-05,
"learning_rate": 8.745e-05,
"loss": 23.0,
"step": 35
},
{
"epoch": 0.006074924063449207,
"grad_norm": 4.8206897190539166e-05,
"learning_rate": 8.692e-05,
"loss": 23.0,
"step": 36
},
{
"epoch": 0.006243671954100574,
"grad_norm": 5.011281609768048e-05,
"learning_rate": 8.638999999999999e-05,
"loss": 23.0,
"step": 37
},
{
"epoch": 0.00641241984475194,
"grad_norm": 8.074403740465641e-05,
"learning_rate": 8.586e-05,
"loss": 23.0,
"step": 38
},
{
"epoch": 0.0065811677354033074,
"grad_norm": 6.544839561684057e-05,
"learning_rate": 8.533e-05,
"loss": 23.0,
"step": 39
},
{
"epoch": 0.006749915626054675,
"grad_norm": 7.167661533458158e-05,
"learning_rate": 8.479999999999999e-05,
"loss": 23.0,
"step": 40
},
{
"epoch": 0.006918663516706041,
"grad_norm": 6.072892210795544e-05,
"learning_rate": 8.427e-05,
"loss": 23.0,
"step": 41
},
{
"epoch": 0.007087411407357408,
"grad_norm": 4.846288720727898e-05,
"learning_rate": 8.374e-05,
"loss": 23.0,
"step": 42
},
{
"epoch": 0.007256159298008775,
"grad_norm": 7.074383756844327e-05,
"learning_rate": 8.321e-05,
"loss": 23.0,
"step": 43
},
{
"epoch": 0.007424907188660142,
"grad_norm": 4.74240405310411e-05,
"learning_rate": 8.268e-05,
"loss": 23.0,
"step": 44
},
{
"epoch": 0.007593655079311508,
"grad_norm": 0.00011488777818158269,
"learning_rate": 8.214999999999999e-05,
"loss": 23.0,
"step": 45
},
{
"epoch": 0.0077624029699628755,
"grad_norm": 9.618495823815465e-05,
"learning_rate": 8.162e-05,
"loss": 23.0,
"step": 46
},
{
"epoch": 0.007931150860614243,
"grad_norm": 7.153845945140347e-05,
"learning_rate": 8.108999999999998e-05,
"loss": 23.0,
"step": 47
},
{
"epoch": 0.00809989875126561,
"grad_norm": 0.00012329178571235389,
"learning_rate": 8.056e-05,
"loss": 23.0,
"step": 48
},
{
"epoch": 0.008268646641916975,
"grad_norm": 9.25509084481746e-05,
"learning_rate": 8.003e-05,
"loss": 23.0,
"step": 49
},
{
"epoch": 0.008437394532568343,
"grad_norm": 0.00014846926205791533,
"learning_rate": 7.95e-05,
"loss": 23.0,
"step": 50
},
{
"epoch": 0.008437394532568343,
"eval_loss": 11.5,
"eval_runtime": 31.0281,
"eval_samples_per_second": 80.411,
"eval_steps_per_second": 20.111,
"step": 50
},
{
"epoch": 0.00860614242321971,
"grad_norm": 3.109794488409534e-05,
"learning_rate": 7.897e-05,
"loss": 23.0,
"step": 51
},
{
"epoch": 0.008774890313871077,
"grad_norm": 4.0770904888631776e-05,
"learning_rate": 7.843999999999999e-05,
"loss": 23.0,
"step": 52
},
{
"epoch": 0.008943638204522443,
"grad_norm": 4.711969813797623e-05,
"learning_rate": 7.790999999999999e-05,
"loss": 23.0,
"step": 53
},
{
"epoch": 0.00911238609517381,
"grad_norm": 5.1224196795374155e-05,
"learning_rate": 7.738e-05,
"loss": 23.0,
"step": 54
},
{
"epoch": 0.009281133985825177,
"grad_norm": 3.525058491504751e-05,
"learning_rate": 7.685e-05,
"loss": 23.0,
"step": 55
},
{
"epoch": 0.009449881876476544,
"grad_norm": 4.9862577725434676e-05,
"learning_rate": 7.632e-05,
"loss": 23.0,
"step": 56
},
{
"epoch": 0.009618629767127912,
"grad_norm": 9.993968706112355e-05,
"learning_rate": 7.578999999999999e-05,
"loss": 23.0,
"step": 57
},
{
"epoch": 0.009787377657779277,
"grad_norm": 4.630363400792703e-05,
"learning_rate": 7.526e-05,
"loss": 23.0,
"step": 58
},
{
"epoch": 0.009956125548430644,
"grad_norm": 3.37816818500869e-05,
"learning_rate": 7.473e-05,
"loss": 23.0,
"step": 59
},
{
"epoch": 0.010124873439082012,
"grad_norm": 7.735610415693372e-05,
"learning_rate": 7.419999999999999e-05,
"loss": 23.0,
"step": 60
},
{
"epoch": 0.010293621329733379,
"grad_norm": 3.4660544770304114e-05,
"learning_rate": 7.367e-05,
"loss": 23.0,
"step": 61
},
{
"epoch": 0.010462369220384746,
"grad_norm": 7.357754657277837e-05,
"learning_rate": 7.314e-05,
"loss": 23.0,
"step": 62
},
{
"epoch": 0.010631117111036112,
"grad_norm": 6.574280996574089e-05,
"learning_rate": 7.261e-05,
"loss": 23.0,
"step": 63
},
{
"epoch": 0.010799865001687479,
"grad_norm": 7.882023783167824e-05,
"learning_rate": 7.208e-05,
"loss": 23.0,
"step": 64
},
{
"epoch": 0.010968612892338846,
"grad_norm": 7.584910781588405e-05,
"learning_rate": 7.154999999999999e-05,
"loss": 23.0,
"step": 65
},
{
"epoch": 0.011137360782990213,
"grad_norm": 8.913804776966572e-05,
"learning_rate": 7.102e-05,
"loss": 23.0,
"step": 66
},
{
"epoch": 0.011306108673641579,
"grad_norm": 6.0102716815890744e-05,
"learning_rate": 7.049e-05,
"loss": 23.0,
"step": 67
},
{
"epoch": 0.011474856564292946,
"grad_norm": 6.130318797659129e-05,
"learning_rate": 6.996e-05,
"loss": 23.0,
"step": 68
},
{
"epoch": 0.011643604454944313,
"grad_norm": 3.667854252853431e-05,
"learning_rate": 6.943e-05,
"loss": 23.0,
"step": 69
},
{
"epoch": 0.01181235234559568,
"grad_norm": 7.211296178866178e-05,
"learning_rate": 6.89e-05,
"loss": 23.0,
"step": 70
},
{
"epoch": 0.011981100236247048,
"grad_norm": 0.0001030169878504239,
"learning_rate": 6.837e-05,
"loss": 23.0,
"step": 71
},
{
"epoch": 0.012149848126898413,
"grad_norm": 0.0001360457536065951,
"learning_rate": 6.784e-05,
"loss": 23.0,
"step": 72
},
{
"epoch": 0.01231859601754978,
"grad_norm": 6.413022492779419e-05,
"learning_rate": 6.730999999999999e-05,
"loss": 23.0,
"step": 73
},
{
"epoch": 0.012487343908201148,
"grad_norm": 0.00011897562217200175,
"learning_rate": 6.678e-05,
"loss": 23.0,
"step": 74
},
{
"epoch": 0.012656091798852515,
"grad_norm": 6.470419612014666e-05,
"learning_rate": 6.625e-05,
"loss": 23.0,
"step": 75
},
{
"epoch": 0.01282483968950388,
"grad_norm": 7.391779217869043e-05,
"learning_rate": 6.572e-05,
"loss": 23.0,
"step": 76
},
{
"epoch": 0.012993587580155248,
"grad_norm": 0.00013688707258552313,
"learning_rate": 6.519e-05,
"loss": 23.0,
"step": 77
},
{
"epoch": 0.013162335470806615,
"grad_norm": 0.00010867404489545152,
"learning_rate": 6.466e-05,
"loss": 23.0,
"step": 78
},
{
"epoch": 0.013331083361457982,
"grad_norm": 7.905341772129759e-05,
"learning_rate": 6.413e-05,
"loss": 23.0,
"step": 79
},
{
"epoch": 0.01349983125210935,
"grad_norm": 7.693654333706945e-05,
"learning_rate": 6.359999999999999e-05,
"loss": 23.0,
"step": 80
},
{
"epoch": 0.013668579142760715,
"grad_norm": 0.00014570896746590734,
"learning_rate": 6.306999999999999e-05,
"loss": 23.0,
"step": 81
},
{
"epoch": 0.013837327033412082,
"grad_norm": 9.294889605371282e-05,
"learning_rate": 6.254000000000001e-05,
"loss": 23.0,
"step": 82
},
{
"epoch": 0.01400607492406345,
"grad_norm": 6.171363202156499e-05,
"learning_rate": 6.201e-05,
"loss": 23.0,
"step": 83
},
{
"epoch": 0.014174822814714817,
"grad_norm": 0.00016772393428254873,
"learning_rate": 6.148e-05,
"loss": 23.0,
"step": 84
},
{
"epoch": 0.014343570705366182,
"grad_norm": 9.405903983861208e-05,
"learning_rate": 6.095e-05,
"loss": 23.0,
"step": 85
},
{
"epoch": 0.01451231859601755,
"grad_norm": 0.00011767326213885099,
"learning_rate": 6.0419999999999994e-05,
"loss": 23.0,
"step": 86
},
{
"epoch": 0.014681066486668917,
"grad_norm": 6.386899622157216e-05,
"learning_rate": 5.988999999999999e-05,
"loss": 23.0,
"step": 87
},
{
"epoch": 0.014849814377320284,
"grad_norm": 0.00018532607646193355,
"learning_rate": 5.9359999999999994e-05,
"loss": 23.0,
"step": 88
},
{
"epoch": 0.015018562267971651,
"grad_norm": 7.472938159480691e-05,
"learning_rate": 5.8830000000000004e-05,
"loss": 23.0,
"step": 89
},
{
"epoch": 0.015187310158623017,
"grad_norm": 0.00012555929424706846,
"learning_rate": 5.83e-05,
"loss": 23.0,
"step": 90
},
{
"epoch": 0.015356058049274384,
"grad_norm": 9.398732800036669e-05,
"learning_rate": 5.777e-05,
"loss": 23.0,
"step": 91
},
{
"epoch": 0.015524805939925751,
"grad_norm": 0.00015068292850628495,
"learning_rate": 5.7239999999999994e-05,
"loss": 23.0,
"step": 92
},
{
"epoch": 0.015693553830577116,
"grad_norm": 0.0002185801713494584,
"learning_rate": 5.671e-05,
"loss": 23.0,
"step": 93
},
{
"epoch": 0.015862301721228485,
"grad_norm": 7.917396578704938e-05,
"learning_rate": 5.6179999999999994e-05,
"loss": 23.0,
"step": 94
},
{
"epoch": 0.01603104961187985,
"grad_norm": 0.00011329845438012853,
"learning_rate": 5.5650000000000004e-05,
"loss": 23.0,
"step": 95
},
{
"epoch": 0.01619979750253122,
"grad_norm": 0.00015697274648118764,
"learning_rate": 5.512e-05,
"loss": 23.0,
"step": 96
},
{
"epoch": 0.016368545393182585,
"grad_norm": 0.00018705315596889704,
"learning_rate": 5.459e-05,
"loss": 23.0,
"step": 97
},
{
"epoch": 0.01653729328383395,
"grad_norm": 0.00033546588383615017,
"learning_rate": 5.406e-05,
"loss": 23.0,
"step": 98
},
{
"epoch": 0.01670604117448532,
"grad_norm": 0.00029555062064900994,
"learning_rate": 5.353e-05,
"loss": 23.0,
"step": 99
},
{
"epoch": 0.016874789065136685,
"grad_norm": 0.00036910088965669274,
"learning_rate": 5.2999999999999994e-05,
"loss": 23.0,
"step": 100
},
{
"epoch": 0.016874789065136685,
"eval_loss": 11.5,
"eval_runtime": 31.1248,
"eval_samples_per_second": 80.161,
"eval_steps_per_second": 20.048,
"step": 100
},
{
"epoch": 0.017043536955788054,
"grad_norm": 7.682123396079987e-05,
"learning_rate": 5.246999999999999e-05,
"loss": 23.0,
"step": 101
},
{
"epoch": 0.01721228484643942,
"grad_norm": 7.189896132331342e-05,
"learning_rate": 5.194e-05,
"loss": 23.0,
"step": 102
},
{
"epoch": 0.017381032737090785,
"grad_norm": 6.011120058246888e-05,
"learning_rate": 5.141e-05,
"loss": 23.0,
"step": 103
},
{
"epoch": 0.017549780627742154,
"grad_norm": 6.253538595046848e-05,
"learning_rate": 5.088e-05,
"loss": 23.0,
"step": 104
},
{
"epoch": 0.01771852851839352,
"grad_norm": 5.117239925311878e-05,
"learning_rate": 5.035e-05,
"loss": 23.0,
"step": 105
},
{
"epoch": 0.017887276409044885,
"grad_norm": 8.745577360969037e-05,
"learning_rate": 4.9819999999999994e-05,
"loss": 23.0,
"step": 106
},
{
"epoch": 0.018056024299696254,
"grad_norm": 0.00013368716463446617,
"learning_rate": 4.929e-05,
"loss": 23.0,
"step": 107
},
{
"epoch": 0.01822477219034762,
"grad_norm": 9.314952330896631e-05,
"learning_rate": 4.876e-05,
"loss": 23.0,
"step": 108
},
{
"epoch": 0.01839352008099899,
"grad_norm": 0.0001433063152944669,
"learning_rate": 4.823e-05,
"loss": 23.0,
"step": 109
},
{
"epoch": 0.018562267971650354,
"grad_norm": 5.847116335644387e-05,
"learning_rate": 4.7699999999999994e-05,
"loss": 23.0,
"step": 110
},
{
"epoch": 0.01873101586230172,
"grad_norm": 8.442316902801394e-05,
"learning_rate": 4.717e-05,
"loss": 23.0,
"step": 111
},
{
"epoch": 0.01889976375295309,
"grad_norm": 0.00013739695714320987,
"learning_rate": 4.6639999999999994e-05,
"loss": 23.0,
"step": 112
},
{
"epoch": 0.019068511643604454,
"grad_norm": 0.00012455254909582436,
"learning_rate": 4.611e-05,
"loss": 23.0,
"step": 113
},
{
"epoch": 0.019237259534255823,
"grad_norm": 5.286196028464474e-05,
"learning_rate": 4.558e-05,
"loss": 23.0,
"step": 114
},
{
"epoch": 0.01940600742490719,
"grad_norm": 0.00013484164082910866,
"learning_rate": 4.505e-05,
"loss": 23.0,
"step": 115
},
{
"epoch": 0.019574755315558554,
"grad_norm": 0.00012526288628578186,
"learning_rate": 4.4519999999999994e-05,
"loss": 23.0,
"step": 116
},
{
"epoch": 0.019743503206209923,
"grad_norm": 0.00020170622156001627,
"learning_rate": 4.399e-05,
"loss": 23.0,
"step": 117
},
{
"epoch": 0.01991225109686129,
"grad_norm": 8.426374552072957e-05,
"learning_rate": 4.346e-05,
"loss": 23.0,
"step": 118
},
{
"epoch": 0.020080998987512658,
"grad_norm": 0.00027226656675338745,
"learning_rate": 4.293e-05,
"loss": 23.0,
"step": 119
},
{
"epoch": 0.020249746878164023,
"grad_norm": 0.00015929453365970403,
"learning_rate": 4.2399999999999994e-05,
"loss": 23.0,
"step": 120
},
{
"epoch": 0.02041849476881539,
"grad_norm": 0.00010509557614568621,
"learning_rate": 4.187e-05,
"loss": 23.0,
"step": 121
},
{
"epoch": 0.020587242659466758,
"grad_norm": 0.0001663622388150543,
"learning_rate": 4.134e-05,
"loss": 23.0,
"step": 122
},
{
"epoch": 0.020755990550118123,
"grad_norm": 0.0001297971175517887,
"learning_rate": 4.081e-05,
"loss": 23.0,
"step": 123
},
{
"epoch": 0.020924738440769492,
"grad_norm": 0.0001566116261528805,
"learning_rate": 4.028e-05,
"loss": 23.0,
"step": 124
},
{
"epoch": 0.021093486331420858,
"grad_norm": 0.00023184812744148076,
"learning_rate": 3.975e-05,
"loss": 23.0,
"step": 125
},
{
"epoch": 0.021262234222072223,
"grad_norm": 0.0001358414883725345,
"learning_rate": 3.9219999999999994e-05,
"loss": 23.0,
"step": 126
},
{
"epoch": 0.021430982112723592,
"grad_norm": 0.00010918582847807556,
"learning_rate": 3.869e-05,
"loss": 23.0,
"step": 127
},
{
"epoch": 0.021599730003374958,
"grad_norm": 0.0001907457917695865,
"learning_rate": 3.816e-05,
"loss": 23.0,
"step": 128
},
{
"epoch": 0.021768477894026323,
"grad_norm": 0.00012014003732474521,
"learning_rate": 3.763e-05,
"loss": 23.0,
"step": 129
},
{
"epoch": 0.021937225784677692,
"grad_norm": 0.00033094800892286,
"learning_rate": 3.7099999999999994e-05,
"loss": 23.0,
"step": 130
},
{
"epoch": 0.022105973675329058,
"grad_norm": 0.0001388384698657319,
"learning_rate": 3.657e-05,
"loss": 23.0,
"step": 131
},
{
"epoch": 0.022274721565980427,
"grad_norm": 0.00015554000856354833,
"learning_rate": 3.604e-05,
"loss": 23.0,
"step": 132
},
{
"epoch": 0.022443469456631792,
"grad_norm": 0.0001033740772982128,
"learning_rate": 3.551e-05,
"loss": 23.0,
"step": 133
},
{
"epoch": 0.022612217347283158,
"grad_norm": 0.00017761386698111892,
"learning_rate": 3.498e-05,
"loss": 23.0,
"step": 134
},
{
"epoch": 0.022780965237934526,
"grad_norm": 0.0001914931199280545,
"learning_rate": 3.445e-05,
"loss": 23.0,
"step": 135
},
{
"epoch": 0.022949713128585892,
"grad_norm": 0.0001077301349141635,
"learning_rate": 3.392e-05,
"loss": 23.0,
"step": 136
},
{
"epoch": 0.02311846101923726,
"grad_norm": 0.00014837745402473956,
"learning_rate": 3.339e-05,
"loss": 23.0,
"step": 137
},
{
"epoch": 0.023287208909888626,
"grad_norm": 0.00013769303041044623,
"learning_rate": 3.286e-05,
"loss": 23.0,
"step": 138
},
{
"epoch": 0.023455956800539992,
"grad_norm": 0.00011395317415008321,
"learning_rate": 3.233e-05,
"loss": 23.0,
"step": 139
},
{
"epoch": 0.02362470469119136,
"grad_norm": 0.0002430876047583297,
"learning_rate": 3.1799999999999994e-05,
"loss": 23.0,
"step": 140
},
{
"epoch": 0.023793452581842726,
"grad_norm": 0.00016095052706077695,
"learning_rate": 3.1270000000000004e-05,
"loss": 23.0,
"step": 141
},
{
"epoch": 0.023962200472494095,
"grad_norm": 0.00023896507627796382,
"learning_rate": 3.074e-05,
"loss": 23.0,
"step": 142
},
{
"epoch": 0.02413094836314546,
"grad_norm": 0.0002305787493241951,
"learning_rate": 3.0209999999999997e-05,
"loss": 23.0,
"step": 143
},
{
"epoch": 0.024299696253796826,
"grad_norm": 0.00044004127266816795,
"learning_rate": 2.9679999999999997e-05,
"loss": 23.0,
"step": 144
},
{
"epoch": 0.024468444144448195,
"grad_norm": 0.0004754870606120676,
"learning_rate": 2.915e-05,
"loss": 23.0,
"step": 145
},
{
"epoch": 0.02463719203509956,
"grad_norm": 0.00039452221244573593,
"learning_rate": 2.8619999999999997e-05,
"loss": 23.0,
"step": 146
},
{
"epoch": 0.02480593992575093,
"grad_norm": 0.0002141989243682474,
"learning_rate": 2.8089999999999997e-05,
"loss": 23.0,
"step": 147
},
{
"epoch": 0.024974687816402295,
"grad_norm": 0.00038773109554313123,
"learning_rate": 2.756e-05,
"loss": 23.0,
"step": 148
},
{
"epoch": 0.02514343570705366,
"grad_norm": 0.00020906349527649581,
"learning_rate": 2.703e-05,
"loss": 23.0,
"step": 149
},
{
"epoch": 0.02531218359770503,
"grad_norm": 0.00029203391750343144,
"learning_rate": 2.6499999999999997e-05,
"loss": 23.0,
"step": 150
},
{
"epoch": 0.02531218359770503,
"eval_loss": 11.5,
"eval_runtime": 31.0725,
"eval_samples_per_second": 80.296,
"eval_steps_per_second": 20.082,
"step": 150
},
{
"epoch": 0.025480931488356395,
"grad_norm": 8.79631916177459e-05,
"learning_rate": 2.597e-05,
"loss": 23.0,
"step": 151
},
{
"epoch": 0.02564967937900776,
"grad_norm": 0.00010261518764309585,
"learning_rate": 2.544e-05,
"loss": 23.0,
"step": 152
},
{
"epoch": 0.02581842726965913,
"grad_norm": 0.00018040614668279886,
"learning_rate": 2.4909999999999997e-05,
"loss": 23.0,
"step": 153
},
{
"epoch": 0.025987175160310495,
"grad_norm": 0.00017360891797579825,
"learning_rate": 2.438e-05,
"loss": 23.0,
"step": 154
},
{
"epoch": 0.026155923050961864,
"grad_norm": 7.864111830713227e-05,
"learning_rate": 2.3849999999999997e-05,
"loss": 23.0,
"step": 155
},
{
"epoch": 0.02632467094161323,
"grad_norm": 0.0001662558934185654,
"learning_rate": 2.3319999999999997e-05,
"loss": 23.0,
"step": 156
},
{
"epoch": 0.026493418832264595,
"grad_norm": 0.00011379940406186506,
"learning_rate": 2.279e-05,
"loss": 23.0,
"step": 157
},
{
"epoch": 0.026662166722915964,
"grad_norm": 0.00011387839913368225,
"learning_rate": 2.2259999999999997e-05,
"loss": 23.0,
"step": 158
},
{
"epoch": 0.02683091461356733,
"grad_norm": 0.0001579077506903559,
"learning_rate": 2.173e-05,
"loss": 23.0,
"step": 159
},
{
"epoch": 0.0269996625042187,
"grad_norm": 0.00015800283290445805,
"learning_rate": 2.1199999999999997e-05,
"loss": 23.0,
"step": 160
},
{
"epoch": 0.027168410394870064,
"grad_norm": 7.721775182290003e-05,
"learning_rate": 2.067e-05,
"loss": 23.0,
"step": 161
},
{
"epoch": 0.02733715828552143,
"grad_norm": 6.047181159374304e-05,
"learning_rate": 2.014e-05,
"loss": 23.0,
"step": 162
},
{
"epoch": 0.0275059061761728,
"grad_norm": 7.38459566491656e-05,
"learning_rate": 1.9609999999999997e-05,
"loss": 23.0,
"step": 163
},
{
"epoch": 0.027674654066824164,
"grad_norm": 0.00011628502397798002,
"learning_rate": 1.908e-05,
"loss": 23.0,
"step": 164
},
{
"epoch": 0.027843401957475533,
"grad_norm": 0.00011288729729130864,
"learning_rate": 1.8549999999999997e-05,
"loss": 23.0,
"step": 165
},
{
"epoch": 0.0280121498481269,
"grad_norm": 9.810877236304805e-05,
"learning_rate": 1.802e-05,
"loss": 23.0,
"step": 166
},
{
"epoch": 0.028180897738778264,
"grad_norm": 0.00023592002980876714,
"learning_rate": 1.749e-05,
"loss": 23.0,
"step": 167
},
{
"epoch": 0.028349645629429633,
"grad_norm": 0.00020759565813932568,
"learning_rate": 1.696e-05,
"loss": 23.0,
"step": 168
},
{
"epoch": 0.028518393520081,
"grad_norm": 0.0002557075640652329,
"learning_rate": 1.643e-05,
"loss": 23.0,
"step": 169
},
{
"epoch": 0.028687141410732364,
"grad_norm": 0.0001290784712182358,
"learning_rate": 1.5899999999999997e-05,
"loss": 23.0,
"step": 170
},
{
"epoch": 0.028855889301383733,
"grad_norm": 8.08468321338296e-05,
"learning_rate": 1.537e-05,
"loss": 23.0,
"step": 171
},
{
"epoch": 0.0290246371920351,
"grad_norm": 0.00019896485900972039,
"learning_rate": 1.4839999999999999e-05,
"loss": 23.0,
"step": 172
},
{
"epoch": 0.029193385082686468,
"grad_norm": 0.0002528635086491704,
"learning_rate": 1.4309999999999999e-05,
"loss": 23.0,
"step": 173
},
{
"epoch": 0.029362132973337833,
"grad_norm": 0.00024162382760550827,
"learning_rate": 1.378e-05,
"loss": 23.0,
"step": 174
},
{
"epoch": 0.0295308808639892,
"grad_norm": 0.00018234840536024421,
"learning_rate": 1.3249999999999999e-05,
"loss": 23.0,
"step": 175
},
{
"epoch": 0.029699628754640568,
"grad_norm": 0.00025969985290430486,
"learning_rate": 1.272e-05,
"loss": 23.0,
"step": 176
},
{
"epoch": 0.029868376645291933,
"grad_norm": 0.0002434734342386946,
"learning_rate": 1.219e-05,
"loss": 23.0,
"step": 177
},
{
"epoch": 0.030037124535943302,
"grad_norm": 0.00023295178834814578,
"learning_rate": 1.1659999999999998e-05,
"loss": 23.0,
"step": 178
},
{
"epoch": 0.030205872426594668,
"grad_norm": 9.934210538631305e-05,
"learning_rate": 1.1129999999999998e-05,
"loss": 23.0,
"step": 179
},
{
"epoch": 0.030374620317246033,
"grad_norm": 0.0003217124321963638,
"learning_rate": 1.0599999999999998e-05,
"loss": 23.0,
"step": 180
},
{
"epoch": 0.030543368207897402,
"grad_norm": 0.00017936142103280872,
"learning_rate": 1.007e-05,
"loss": 23.0,
"step": 181
},
{
"epoch": 0.030712116098548767,
"grad_norm": 0.00024236104218289256,
"learning_rate": 9.54e-06,
"loss": 23.0,
"step": 182
},
{
"epoch": 0.030880863989200136,
"grad_norm": 0.00013197006774134934,
"learning_rate": 9.01e-06,
"loss": 23.0,
"step": 183
},
{
"epoch": 0.031049611879851502,
"grad_norm": 0.00025421561440452933,
"learning_rate": 8.48e-06,
"loss": 23.0,
"step": 184
},
{
"epoch": 0.031218359770502867,
"grad_norm": 0.00021938957797829062,
"learning_rate": 7.949999999999998e-06,
"loss": 23.0,
"step": 185
},
{
"epoch": 0.03138710766115423,
"grad_norm": 0.00020342513744253665,
"learning_rate": 7.419999999999999e-06,
"loss": 23.0,
"step": 186
},
{
"epoch": 0.0315558555518056,
"grad_norm": 0.00019843995687551796,
"learning_rate": 6.89e-06,
"loss": 23.0,
"step": 187
},
{
"epoch": 0.03172460344245697,
"grad_norm": 0.0004309473733883351,
"learning_rate": 6.36e-06,
"loss": 23.0,
"step": 188
},
{
"epoch": 0.03189335133310833,
"grad_norm": 0.00014857637870591134,
"learning_rate": 5.829999999999999e-06,
"loss": 23.0,
"step": 189
},
{
"epoch": 0.0320620992237597,
"grad_norm": 0.00024400238180533051,
"learning_rate": 5.299999999999999e-06,
"loss": 23.0,
"step": 190
},
{
"epoch": 0.03223084711441107,
"grad_norm": 0.0002994404057972133,
"learning_rate": 4.77e-06,
"loss": 23.0,
"step": 191
},
{
"epoch": 0.03239959500506244,
"grad_norm": 0.00024177887826226652,
"learning_rate": 4.24e-06,
"loss": 23.0,
"step": 192
},
{
"epoch": 0.0325683428957138,
"grad_norm": 0.00022951627033762634,
"learning_rate": 3.7099999999999996e-06,
"loss": 23.0,
"step": 193
},
{
"epoch": 0.03273709078636517,
"grad_norm": 0.00020361649512778968,
"learning_rate": 3.18e-06,
"loss": 23.0,
"step": 194
},
{
"epoch": 0.03290583867701654,
"grad_norm": 0.00028880671015940607,
"learning_rate": 2.6499999999999996e-06,
"loss": 23.0,
"step": 195
},
{
"epoch": 0.0330745865676679,
"grad_norm": 0.0002678855962585658,
"learning_rate": 2.12e-06,
"loss": 23.0,
"step": 196
},
{
"epoch": 0.03324333445831927,
"grad_norm": 0.00022941327188163996,
"learning_rate": 1.59e-06,
"loss": 23.0,
"step": 197
},
{
"epoch": 0.03341208234897064,
"grad_norm": 0.00020409403077792376,
"learning_rate": 1.06e-06,
"loss": 23.0,
"step": 198
},
{
"epoch": 0.033580830239622,
"grad_norm": 0.0004483718948904425,
"learning_rate": 5.3e-07,
"loss": 23.0,
"step": 199
},
{
"epoch": 0.03374957813027337,
"grad_norm": 0.00058153917780146,
"learning_rate": 0.0,
"loss": 23.0,
"step": 200
},
{
"epoch": 0.03374957813027337,
"eval_loss": 11.5,
"eval_runtime": 31.0274,
"eval_samples_per_second": 80.413,
"eval_steps_per_second": 20.111,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8039615692800.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}