|
{ |
|
"best_metric": 0.35135135135135137, |
|
"best_model_checkpoint": "vivit-b-16x2-kinetics400-finetuned-ucf101-subset-without-pretrained/checkpoint-2100", |
|
"epoch": 9.1, |
|
"eval_steps": 500, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 6.0146, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 5.6079, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-06, |
|
"loss": 5.4694, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 4.7757, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 4.2847, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1e-05, |
|
"loss": 3.6053, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 3.3222, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 4.0703, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.5e-05, |
|
"loss": 2.4623, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 4.2615, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 3.0162, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2e-05, |
|
"loss": 2.8954, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 2.3622, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 2.9892, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.4459, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 2.2578, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.8333333333333335e-05, |
|
"loss": 3.7852, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3e-05, |
|
"loss": 3.472, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 2.0967, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.8889, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.5e-05, |
|
"loss": 3.5945, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 2.4664, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 3.461, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4e-05, |
|
"loss": 3.1201, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 2.6063, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 1.8298, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.5e-05, |
|
"loss": 4.0171, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 2.9952, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 2.5919, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-05, |
|
"loss": 3.2964, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.10810810810810811, |
|
"eval_loss": 2.4748547077178955, |
|
"eval_runtime": 7.888, |
|
"eval_samples_per_second": 4.691, |
|
"eval_steps_per_second": 4.691, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.981481481481482e-05, |
|
"loss": 2.1251, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.962962962962963e-05, |
|
"loss": 2.7957, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.9444444444444446e-05, |
|
"loss": 3.0016, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.925925925925926e-05, |
|
"loss": 2.9296, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.9074074074074075e-05, |
|
"loss": 2.5054, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 2.6099, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.8703703703703704e-05, |
|
"loss": 2.3462, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.851851851851852e-05, |
|
"loss": 2.5099, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 2.9488, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 3.1838, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.796296296296296e-05, |
|
"loss": 2.2918, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"loss": 2.622, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.759259259259259e-05, |
|
"loss": 2.1096, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.740740740740741e-05, |
|
"loss": 2.3935, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 2.2316, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.703703703703704e-05, |
|
"loss": 2.476, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.685185185185185e-05, |
|
"loss": 2.5298, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 2.5783, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.648148148148148e-05, |
|
"loss": 2.4782, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 2.3204, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.6111111111111115e-05, |
|
"loss": 2.6603, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.592592592592593e-05, |
|
"loss": 2.1841, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.5740740740740745e-05, |
|
"loss": 2.1368, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.555555555555556e-05, |
|
"loss": 2.2092, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.5370370370370374e-05, |
|
"loss": 2.6375, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.518518518518519e-05, |
|
"loss": 2.0985, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.5e-05, |
|
"loss": 2.6354, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.481481481481482e-05, |
|
"loss": 2.0626, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.462962962962963e-05, |
|
"loss": 1.8691, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 3.1346, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.10810810810810811, |
|
"eval_loss": 2.4779040813446045, |
|
"eval_runtime": 7.7884, |
|
"eval_samples_per_second": 4.751, |
|
"eval_steps_per_second": 4.751, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.425925925925926e-05, |
|
"loss": 2.8789, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.4074074074074076e-05, |
|
"loss": 2.1999, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.388888888888889e-05, |
|
"loss": 2.4515, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.3703703703703705e-05, |
|
"loss": 2.0012, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.351851851851852e-05, |
|
"loss": 2.0266, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 1.6561, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.314814814814815e-05, |
|
"loss": 2.2148, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.296296296296296e-05, |
|
"loss": 2.4418, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 2.9789, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.259259259259259e-05, |
|
"loss": 1.9881, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.240740740740741e-05, |
|
"loss": 2.2087, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 2.1964, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.203703703703704e-05, |
|
"loss": 2.5474, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.185185185185185e-05, |
|
"loss": 2.1304, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 1.8592, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.148148148148148e-05, |
|
"loss": 2.8711, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.12962962962963e-05, |
|
"loss": 2.469, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.111111111111111e-05, |
|
"loss": 1.9158, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.092592592592593e-05, |
|
"loss": 2.0509, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 1.7933, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.055555555555556e-05, |
|
"loss": 2.0508, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.0370370370370374e-05, |
|
"loss": 2.1783, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.018518518518519e-05, |
|
"loss": 2.4269, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4e-05, |
|
"loss": 2.5345, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 3.981481481481482e-05, |
|
"loss": 2.0431, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 3.962962962962963e-05, |
|
"loss": 2.3893, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 3.944444444444445e-05, |
|
"loss": 2.3061, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 3.925925925925926e-05, |
|
"loss": 1.8541, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 3.9074074074074076e-05, |
|
"loss": 2.2115, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 1.9796, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_accuracy": 0.2972972972972973, |
|
"eval_loss": 2.4338538646698, |
|
"eval_runtime": 7.9053, |
|
"eval_samples_per_second": 4.68, |
|
"eval_steps_per_second": 4.68, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.8703703703703705e-05, |
|
"loss": 1.8704, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 3.851851851851852e-05, |
|
"loss": 1.8479, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 1.6709, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 3.814814814814815e-05, |
|
"loss": 1.9238, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.7962962962962964e-05, |
|
"loss": 2.2086, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 2.0035, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.759259259259259e-05, |
|
"loss": 1.959, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.740740740740741e-05, |
|
"loss": 1.736, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.722222222222222e-05, |
|
"loss": 2.4448, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 2.1386, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.685185185185185e-05, |
|
"loss": 2.0454, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 2.6412, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.648148148148148e-05, |
|
"loss": 2.0792, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.62962962962963e-05, |
|
"loss": 2.531, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 1.9005, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.592592592592593e-05, |
|
"loss": 2.1969, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.574074074074074e-05, |
|
"loss": 1.8413, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 2.0399, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.537037037037037e-05, |
|
"loss": 1.9236, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.518518518518519e-05, |
|
"loss": 1.4469, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.9591, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.481481481481482e-05, |
|
"loss": 2.4234, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.4629629629629626e-05, |
|
"loss": 2.0327, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.444444444444445e-05, |
|
"loss": 2.8836, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.425925925925926e-05, |
|
"loss": 2.1492, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.4074074074074077e-05, |
|
"loss": 1.9895, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.388888888888889e-05, |
|
"loss": 2.1062, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.3703703703703706e-05, |
|
"loss": 2.2514, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.351851851851852e-05, |
|
"loss": 1.5223, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.8692, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"eval_accuracy": 0.32432432432432434, |
|
"eval_loss": 2.177396535873413, |
|
"eval_runtime": 7.9365, |
|
"eval_samples_per_second": 4.662, |
|
"eval_steps_per_second": 4.662, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.314814814814815e-05, |
|
"loss": 1.8137, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.2962962962962964e-05, |
|
"loss": 1.8109, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.277777777777778e-05, |
|
"loss": 1.4493, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.25925925925926e-05, |
|
"loss": 1.6365, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 2.534, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.222222222222223e-05, |
|
"loss": 2.5825, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.203703703703704e-05, |
|
"loss": 2.208, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.185185185185185e-05, |
|
"loss": 1.6943, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 1.9221, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.148148148148148e-05, |
|
"loss": 1.3294, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.1296296296296295e-05, |
|
"loss": 2.6418, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 2.0143, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.0925925925925924e-05, |
|
"loss": 1.5268, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.074074074074074e-05, |
|
"loss": 2.2358, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 2.1382, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.037037037037037e-05, |
|
"loss": 2.4677, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.018518518518519e-05, |
|
"loss": 1.0908, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3e-05, |
|
"loss": 2.109, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 2.981481481481482e-05, |
|
"loss": 3.2783, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 1.6895, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.9444444444444448e-05, |
|
"loss": 2.4235, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.925925925925926e-05, |
|
"loss": 2.7382, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.9074074074074077e-05, |
|
"loss": 2.0283, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 2.1473, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.8703703703703706e-05, |
|
"loss": 2.0384, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 2.851851851851852e-05, |
|
"loss": 1.7009, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 2.8333333333333335e-05, |
|
"loss": 1.6367, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 2.814814814814815e-05, |
|
"loss": 1.9183, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 2.7962962962962965e-05, |
|
"loss": 2.9585, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 1.3617, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"eval_accuracy": 0.32432432432432434, |
|
"eval_loss": 2.161275625228882, |
|
"eval_runtime": 7.8706, |
|
"eval_samples_per_second": 4.701, |
|
"eval_steps_per_second": 4.701, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.7592592592592594e-05, |
|
"loss": 2.0505, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 2.7407407407407408e-05, |
|
"loss": 1.6757, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 2.7222222222222223e-05, |
|
"loss": 1.791, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 2.7037037037037037e-05, |
|
"loss": 1.8723, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 2.6851851851851855e-05, |
|
"loss": 2.2865, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.5064, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 2.6481481481481485e-05, |
|
"loss": 1.462, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 2.6296296296296296e-05, |
|
"loss": 1.7543, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 2.6111111111111114e-05, |
|
"loss": 1.2975, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 2.18, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 2.5740740740740743e-05, |
|
"loss": 2.3015, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 2.5555555555555554e-05, |
|
"loss": 1.0242, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 2.5370370370370372e-05, |
|
"loss": 1.6023, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 2.5185185185185183e-05, |
|
"loss": 1.9401, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.413, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 2.4814814814814816e-05, |
|
"loss": 2.5201, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 2.462962962962963e-05, |
|
"loss": 2.3386, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 2.4826, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 2.425925925925926e-05, |
|
"loss": 1.8664, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 2.4074074074074074e-05, |
|
"loss": 1.6916, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 2.3888888888888892e-05, |
|
"loss": 1.4313, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 2.3703703703703707e-05, |
|
"loss": 2.0346, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 2.351851851851852e-05, |
|
"loss": 1.9452, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 1.414, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 2.314814814814815e-05, |
|
"loss": 1.6949, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 2.2962962962962965e-05, |
|
"loss": 2.2121, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 2.277777777777778e-05, |
|
"loss": 2.301, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 2.2592592592592594e-05, |
|
"loss": 2.1127, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 2.240740740740741e-05, |
|
"loss": 1.931, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 2.0821, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"eval_accuracy": 0.21621621621621623, |
|
"eval_loss": 1.9206827878952026, |
|
"eval_runtime": 7.9165, |
|
"eval_samples_per_second": 4.674, |
|
"eval_steps_per_second": 4.674, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2.2037037037037038e-05, |
|
"loss": 1.7858, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 2.1851851851851852e-05, |
|
"loss": 1.8683, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 1.6547, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 2.148148148148148e-05, |
|
"loss": 0.798, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 2.1296296296296296e-05, |
|
"loss": 3.1924, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 2.111111111111111e-05, |
|
"loss": 3.1257, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 2.0925925925925925e-05, |
|
"loss": 1.8917, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 2.074074074074074e-05, |
|
"loss": 1.0172, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 2.0555555555555555e-05, |
|
"loss": 2.2976, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 2.037037037037037e-05, |
|
"loss": 1.7965, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 2.0185185185185187e-05, |
|
"loss": 1.7191, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 2e-05, |
|
"loss": 1.9343, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 1.9814814814814816e-05, |
|
"loss": 2.0969, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 1.962962962962963e-05, |
|
"loss": 1.1947, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 1.1954, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 1.925925925925926e-05, |
|
"loss": 1.9325, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.9074074074074075e-05, |
|
"loss": 1.4172, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 1.6606, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.8703703703703704e-05, |
|
"loss": 0.719, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 1.7754, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 1.5815, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 1.814814814814815e-05, |
|
"loss": 2.0108, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.7962962962962965e-05, |
|
"loss": 0.4663, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.777777777777778e-05, |
|
"loss": 2.8609, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.7592592592592595e-05, |
|
"loss": 1.6084, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 1.740740740740741e-05, |
|
"loss": 1.6349, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 1.7222222222222224e-05, |
|
"loss": 2.4036, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 1.7037037037037038e-05, |
|
"loss": 2.9006, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 1.6851851851851853e-05, |
|
"loss": 1.9097, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.5534, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"eval_accuracy": 0.35135135135135137, |
|
"eval_loss": 2.200272560119629, |
|
"eval_runtime": 8.1085, |
|
"eval_samples_per_second": 4.563, |
|
"eval_steps_per_second": 4.563, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.6481481481481482e-05, |
|
"loss": 1.1439, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 1.62962962962963e-05, |
|
"loss": 1.6308, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 1.6111111111111115e-05, |
|
"loss": 1.7053, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 1.5925925925925926e-05, |
|
"loss": 1.1133, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.574074074074074e-05, |
|
"loss": 3.4859, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 1.781, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.537037037037037e-05, |
|
"loss": 1.0939, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 1.5185185185185186e-05, |
|
"loss": 1.2528, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.2228, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 3.4645, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 1.462962962962963e-05, |
|
"loss": 1.4841, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 1.4444444444444444e-05, |
|
"loss": 1.4566, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 1.425925925925926e-05, |
|
"loss": 1.5839, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 1.4074074074074075e-05, |
|
"loss": 0.9707, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 3.4078, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 1.3703703703703704e-05, |
|
"loss": 2.6832, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 1.3518518518518519e-05, |
|
"loss": 1.4994, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.5557, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 1.3148148148148148e-05, |
|
"loss": 1.4492, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.2962962962962962e-05, |
|
"loss": 1.345, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.2777777777777777e-05, |
|
"loss": 1.5474, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.2592592592592592e-05, |
|
"loss": 2.1177, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 1.2407407407407408e-05, |
|
"loss": 1.4745, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"loss": 1.0913, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 1.2037037037037037e-05, |
|
"loss": 3.3812, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.1851851851851853e-05, |
|
"loss": 1.1772, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 1.407, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.1481481481481482e-05, |
|
"loss": 1.1857, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 1.1296296296296297e-05, |
|
"loss": 1.3831, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.5632, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"eval_accuracy": 0.35135135135135137, |
|
"eval_loss": 3.021118402481079, |
|
"eval_runtime": 7.9126, |
|
"eval_samples_per_second": 4.676, |
|
"eval_steps_per_second": 4.676, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.0925925925925926e-05, |
|
"loss": 1.528, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 1.074074074074074e-05, |
|
"loss": 1.5443, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 1.0555555555555555e-05, |
|
"loss": 2.7518, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 1.037037037037037e-05, |
|
"loss": 1.5082, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 1.0185185185185185e-05, |
|
"loss": 2.108, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 1e-05, |
|
"loss": 1.9699, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 9.814814814814815e-06, |
|
"loss": 1.2374, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 9.62962962962963e-06, |
|
"loss": 2.103, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 0.9801, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 2.5852, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 9.074074074074075e-06, |
|
"loss": 1.9504, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 8.88888888888889e-06, |
|
"loss": 2.4488, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 8.703703703703705e-06, |
|
"loss": 0.9139, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 8.518518518518519e-06, |
|
"loss": 0.995, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.7461, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 8.14814814814815e-06, |
|
"loss": 1.1371, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 7.962962962962963e-06, |
|
"loss": 0.9142, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 1.9478, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 7.592592592592593e-06, |
|
"loss": 2.1475, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 1.991, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 7.222222222222222e-06, |
|
"loss": 1.4008, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 7.0370370370370375e-06, |
|
"loss": 1.2762, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 6.851851851851852e-06, |
|
"loss": 1.3275, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.6279, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 6.481481481481481e-06, |
|
"loss": 1.293, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 6.296296296296296e-06, |
|
"loss": 0.754, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 6.111111111111111e-06, |
|
"loss": 1.514, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 5.925925925925927e-06, |
|
"loss": 1.885, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 5.740740740740741e-06, |
|
"loss": 1.7632, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.2806, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"eval_accuracy": 0.35135135135135137, |
|
"eval_loss": 2.7735354900360107, |
|
"eval_runtime": 8.0014, |
|
"eval_samples_per_second": 4.624, |
|
"eval_steps_per_second": 4.624, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 5.37037037037037e-06, |
|
"loss": 1.1323, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 5.185185185185185e-06, |
|
"loss": 0.3361, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 5e-06, |
|
"loss": 1.5187, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 4.814814814814815e-06, |
|
"loss": 1.0224, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.9034, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 4.444444444444445e-06, |
|
"loss": 1.0158, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 4.2592592592592596e-06, |
|
"loss": 0.6375, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 4.074074074074075e-06, |
|
"loss": 1.8772, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 1.0748, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.9857, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 3.5185185185185187e-06, |
|
"loss": 2.9974, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.9478, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 3.148148148148148e-06, |
|
"loss": 1.3206, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 2.9629629629629633e-06, |
|
"loss": 2.1553, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 2.3575, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 2.5925925925925925e-06, |
|
"loss": 1.433, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 2.4074074074074075e-06, |
|
"loss": 0.9617, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 2.2222222222222225e-06, |
|
"loss": 1.1871, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"learning_rate": 2.0370370370370375e-06, |
|
"loss": 1.5125, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 1.8518518518518519e-06, |
|
"loss": 0.8774, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 2.0533, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 1.4814814814814817e-06, |
|
"loss": 1.3807, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 1.2962962962962962e-06, |
|
"loss": 1.394, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 1.1111111111111112e-06, |
|
"loss": 1.5122, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 9.08, |
|
"learning_rate": 9.259259259259259e-07, |
|
"loss": 0.8405, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 7.407407407407408e-07, |
|
"loss": 0.7064, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 5.555555555555556e-07, |
|
"loss": 0.9997, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 3.703703703703704e-07, |
|
"loss": 0.7745, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 1.851851851851852e-07, |
|
"loss": 1.0899, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 0.0, |
|
"loss": 1.3832, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"eval_accuracy": 0.35135135135135137, |
|
"eval_loss": 2.931495189666748, |
|
"eval_runtime": 8.0466, |
|
"eval_samples_per_second": 4.598, |
|
"eval_steps_per_second": 4.598, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"step": 3000, |
|
"total_flos": 7.712679002112e+18, |
|
"train_loss": 2.043955718199412, |
|
"train_runtime": 1387.0202, |
|
"train_samples_per_second": 2.163, |
|
"train_steps_per_second": 2.163 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"eval_accuracy": 0.41379310344827586, |
|
"eval_loss": 2.1643764972686768, |
|
"eval_runtime": 18.3497, |
|
"eval_samples_per_second": 4.741, |
|
"eval_steps_per_second": 4.741, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"eval_accuracy": 0.41379310344827586, |
|
"eval_loss": 2.1643762588500977, |
|
"eval_runtime": 18.2718, |
|
"eval_samples_per_second": 4.761, |
|
"eval_steps_per_second": 4.761, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 3000, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 7.712679002112e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|