vit-lr-0.0001 / trainer_state.json
sharren's picture
🍻 cheers
add645b verified
raw
history blame
20.8 kB
{
"best_metric": 0.5187094211578369,
"best_model_checkpoint": "./vit-lr-0.0001/checkpoint-800",
"epoch": 3.426791277258567,
"eval_steps": 100,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 3.3859810829162598,
"learning_rate": 9.997196261682243e-05,
"loss": 1.1978,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 4.35668420791626,
"learning_rate": 9.994080996884736e-05,
"loss": 0.717,
"step": 20
},
{
"epoch": 0.09,
"grad_norm": 5.1708455085754395,
"learning_rate": 9.990965732087227e-05,
"loss": 0.8267,
"step": 30
},
{
"epoch": 0.12,
"grad_norm": 9.000005722045898,
"learning_rate": 9.98785046728972e-05,
"loss": 1.0157,
"step": 40
},
{
"epoch": 0.16,
"grad_norm": 4.164905071258545,
"learning_rate": 9.984735202492212e-05,
"loss": 0.7406,
"step": 50
},
{
"epoch": 0.19,
"grad_norm": 3.7239935398101807,
"learning_rate": 9.981619937694705e-05,
"loss": 0.7899,
"step": 60
},
{
"epoch": 0.22,
"grad_norm": 3.9820024967193604,
"learning_rate": 9.978504672897196e-05,
"loss": 0.816,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 3.7813899517059326,
"learning_rate": 9.975389408099689e-05,
"loss": 0.6441,
"step": 80
},
{
"epoch": 0.28,
"grad_norm": 6.419355869293213,
"learning_rate": 9.972274143302182e-05,
"loss": 0.6568,
"step": 90
},
{
"epoch": 0.31,
"grad_norm": 5.270507335662842,
"learning_rate": 9.969158878504672e-05,
"loss": 0.6008,
"step": 100
},
{
"epoch": 0.31,
"eval_accuracy": 0.7836338418862691,
"eval_loss": 0.5998427271842957,
"eval_runtime": 39.7401,
"eval_samples_per_second": 72.571,
"eval_steps_per_second": 9.084,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 4.352388858795166,
"learning_rate": 9.966043613707165e-05,
"loss": 0.6567,
"step": 110
},
{
"epoch": 0.37,
"grad_norm": 6.960423946380615,
"learning_rate": 9.962928348909658e-05,
"loss": 0.643,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 8.54637336730957,
"learning_rate": 9.95981308411215e-05,
"loss": 0.6606,
"step": 130
},
{
"epoch": 0.44,
"grad_norm": 1.6916728019714355,
"learning_rate": 9.956697819314643e-05,
"loss": 0.5345,
"step": 140
},
{
"epoch": 0.47,
"grad_norm": 5.490638732910156,
"learning_rate": 9.953582554517134e-05,
"loss": 0.6008,
"step": 150
},
{
"epoch": 0.5,
"grad_norm": 6.159054279327393,
"learning_rate": 9.950467289719627e-05,
"loss": 0.5466,
"step": 160
},
{
"epoch": 0.53,
"grad_norm": 2.6157217025756836,
"learning_rate": 9.947352024922119e-05,
"loss": 0.5968,
"step": 170
},
{
"epoch": 0.56,
"grad_norm": 7.270638465881348,
"learning_rate": 9.944236760124612e-05,
"loss": 0.6522,
"step": 180
},
{
"epoch": 0.59,
"grad_norm": 6.296653747558594,
"learning_rate": 9.941121495327103e-05,
"loss": 0.5591,
"step": 190
},
{
"epoch": 0.62,
"grad_norm": 6.226278781890869,
"learning_rate": 9.938006230529595e-05,
"loss": 0.5779,
"step": 200
},
{
"epoch": 0.62,
"eval_accuracy": 0.7832871012482663,
"eval_loss": 0.611842930316925,
"eval_runtime": 39.418,
"eval_samples_per_second": 73.165,
"eval_steps_per_second": 9.158,
"step": 200
},
{
"epoch": 0.65,
"grad_norm": 4.321172714233398,
"learning_rate": 9.934890965732088e-05,
"loss": 0.606,
"step": 210
},
{
"epoch": 0.69,
"grad_norm": 6.226727485656738,
"learning_rate": 9.93177570093458e-05,
"loss": 0.7798,
"step": 220
},
{
"epoch": 0.72,
"grad_norm": 3.879312515258789,
"learning_rate": 9.928660436137072e-05,
"loss": 0.4047,
"step": 230
},
{
"epoch": 0.75,
"grad_norm": 2.546088457107544,
"learning_rate": 9.925545171339564e-05,
"loss": 0.559,
"step": 240
},
{
"epoch": 0.78,
"grad_norm": 6.213643550872803,
"learning_rate": 9.922429906542056e-05,
"loss": 0.7639,
"step": 250
},
{
"epoch": 0.81,
"grad_norm": 3.789020299911499,
"learning_rate": 9.91931464174455e-05,
"loss": 0.5372,
"step": 260
},
{
"epoch": 0.84,
"grad_norm": 5.764331340789795,
"learning_rate": 9.916199376947041e-05,
"loss": 0.5031,
"step": 270
},
{
"epoch": 0.87,
"grad_norm": 2.2293648719787598,
"learning_rate": 9.913084112149534e-05,
"loss": 0.4379,
"step": 280
},
{
"epoch": 0.9,
"grad_norm": 6.797378063201904,
"learning_rate": 9.909968847352025e-05,
"loss": 0.6147,
"step": 290
},
{
"epoch": 0.93,
"grad_norm": 6.964852809906006,
"learning_rate": 9.906853582554517e-05,
"loss": 0.6891,
"step": 300
},
{
"epoch": 0.93,
"eval_accuracy": 0.7777392510402219,
"eval_loss": 0.5814183354377747,
"eval_runtime": 39.6347,
"eval_samples_per_second": 72.765,
"eval_steps_per_second": 9.108,
"step": 300
},
{
"epoch": 0.97,
"grad_norm": 3.367398262023926,
"learning_rate": 9.90373831775701e-05,
"loss": 0.5153,
"step": 310
},
{
"epoch": 1.0,
"grad_norm": 5.517517566680908,
"learning_rate": 9.900623052959503e-05,
"loss": 0.5022,
"step": 320
},
{
"epoch": 1.03,
"grad_norm": 6.2498393058776855,
"learning_rate": 9.897507788161994e-05,
"loss": 0.4581,
"step": 330
},
{
"epoch": 1.06,
"grad_norm": 5.515881538391113,
"learning_rate": 9.894392523364486e-05,
"loss": 0.3453,
"step": 340
},
{
"epoch": 1.09,
"grad_norm": 3.255171060562134,
"learning_rate": 9.891277258566979e-05,
"loss": 0.4332,
"step": 350
},
{
"epoch": 1.12,
"grad_norm": 2.262460231781006,
"learning_rate": 9.888161993769472e-05,
"loss": 0.3463,
"step": 360
},
{
"epoch": 1.15,
"grad_norm": 5.0724711418151855,
"learning_rate": 9.885046728971963e-05,
"loss": 0.5136,
"step": 370
},
{
"epoch": 1.18,
"grad_norm": 3.8894565105438232,
"learning_rate": 9.881931464174455e-05,
"loss": 0.3792,
"step": 380
},
{
"epoch": 1.21,
"grad_norm": 4.299795627593994,
"learning_rate": 9.878816199376948e-05,
"loss": 0.4804,
"step": 390
},
{
"epoch": 1.25,
"grad_norm": 2.381559371948242,
"learning_rate": 9.875700934579439e-05,
"loss": 0.3535,
"step": 400
},
{
"epoch": 1.25,
"eval_accuracy": 0.7933425797503467,
"eval_loss": 0.5594320297241211,
"eval_runtime": 39.1416,
"eval_samples_per_second": 73.681,
"eval_steps_per_second": 9.223,
"step": 400
},
{
"epoch": 1.28,
"grad_norm": 4.152671813964844,
"learning_rate": 9.872585669781932e-05,
"loss": 0.4629,
"step": 410
},
{
"epoch": 1.31,
"grad_norm": 3.4808621406555176,
"learning_rate": 9.869470404984425e-05,
"loss": 0.3583,
"step": 420
},
{
"epoch": 1.34,
"grad_norm": 1.483371615409851,
"learning_rate": 9.866355140186917e-05,
"loss": 0.5018,
"step": 430
},
{
"epoch": 1.37,
"grad_norm": 4.952483654022217,
"learning_rate": 9.863239875389408e-05,
"loss": 0.4338,
"step": 440
},
{
"epoch": 1.4,
"grad_norm": 4.527029037475586,
"learning_rate": 9.860124610591901e-05,
"loss": 0.437,
"step": 450
},
{
"epoch": 1.43,
"grad_norm": 7.609699726104736,
"learning_rate": 9.857009345794394e-05,
"loss": 0.3796,
"step": 460
},
{
"epoch": 1.46,
"grad_norm": 3.572444200515747,
"learning_rate": 9.853894080996885e-05,
"loss": 0.3551,
"step": 470
},
{
"epoch": 1.5,
"grad_norm": 8.147821426391602,
"learning_rate": 9.850778816199377e-05,
"loss": 0.4182,
"step": 480
},
{
"epoch": 1.53,
"grad_norm": 6.7267279624938965,
"learning_rate": 9.84766355140187e-05,
"loss": 0.3584,
"step": 490
},
{
"epoch": 1.56,
"grad_norm": 3.457125425338745,
"learning_rate": 9.844548286604361e-05,
"loss": 0.4055,
"step": 500
},
{
"epoch": 1.56,
"eval_accuracy": 0.8134535367545076,
"eval_loss": 0.571877658367157,
"eval_runtime": 39.3357,
"eval_samples_per_second": 73.318,
"eval_steps_per_second": 9.177,
"step": 500
},
{
"epoch": 1.59,
"grad_norm": 4.186713695526123,
"learning_rate": 9.841433021806854e-05,
"loss": 0.4802,
"step": 510
},
{
"epoch": 1.62,
"grad_norm": 4.469476699829102,
"learning_rate": 9.838317757009346e-05,
"loss": 0.4118,
"step": 520
},
{
"epoch": 1.65,
"grad_norm": 7.559543609619141,
"learning_rate": 9.835202492211837e-05,
"loss": 0.4605,
"step": 530
},
{
"epoch": 1.68,
"grad_norm": 2.5254476070404053,
"learning_rate": 9.83208722741433e-05,
"loss": 0.2878,
"step": 540
},
{
"epoch": 1.71,
"grad_norm": 4.755306720733643,
"learning_rate": 9.828971962616823e-05,
"loss": 0.3881,
"step": 550
},
{
"epoch": 1.74,
"grad_norm": 3.8046302795410156,
"learning_rate": 9.825856697819316e-05,
"loss": 0.3511,
"step": 560
},
{
"epoch": 1.78,
"grad_norm": 2.603055238723755,
"learning_rate": 9.822741433021808e-05,
"loss": 0.3354,
"step": 570
},
{
"epoch": 1.81,
"grad_norm": 3.5978047847747803,
"learning_rate": 9.819626168224299e-05,
"loss": 0.2885,
"step": 580
},
{
"epoch": 1.84,
"grad_norm": 5.625058174133301,
"learning_rate": 9.816510903426792e-05,
"loss": 0.392,
"step": 590
},
{
"epoch": 1.87,
"grad_norm": 4.636626720428467,
"learning_rate": 9.813395638629284e-05,
"loss": 0.2471,
"step": 600
},
{
"epoch": 1.87,
"eval_accuracy": 0.794382801664355,
"eval_loss": 0.5536235570907593,
"eval_runtime": 39.2047,
"eval_samples_per_second": 73.563,
"eval_steps_per_second": 9.208,
"step": 600
},
{
"epoch": 1.9,
"grad_norm": 6.1720123291015625,
"learning_rate": 9.810280373831777e-05,
"loss": 0.5169,
"step": 610
},
{
"epoch": 1.93,
"grad_norm": 3.5033581256866455,
"learning_rate": 9.807165109034268e-05,
"loss": 0.4374,
"step": 620
},
{
"epoch": 1.96,
"grad_norm": 4.63729190826416,
"learning_rate": 9.80404984423676e-05,
"loss": 0.2866,
"step": 630
},
{
"epoch": 1.99,
"grad_norm": 3.4656624794006348,
"learning_rate": 9.800934579439253e-05,
"loss": 0.3323,
"step": 640
},
{
"epoch": 2.02,
"grad_norm": 3.2712185382843018,
"learning_rate": 9.797819314641746e-05,
"loss": 0.2275,
"step": 650
},
{
"epoch": 2.06,
"grad_norm": 3.9931576251983643,
"learning_rate": 9.794704049844237e-05,
"loss": 0.21,
"step": 660
},
{
"epoch": 2.09,
"grad_norm": 1.7855778932571411,
"learning_rate": 9.791588785046729e-05,
"loss": 0.2996,
"step": 670
},
{
"epoch": 2.12,
"grad_norm": 5.909547328948975,
"learning_rate": 9.788473520249222e-05,
"loss": 0.2081,
"step": 680
},
{
"epoch": 2.15,
"grad_norm": 1.9149627685546875,
"learning_rate": 9.785358255451714e-05,
"loss": 0.216,
"step": 690
},
{
"epoch": 2.18,
"grad_norm": 4.954345226287842,
"learning_rate": 9.782242990654206e-05,
"loss": 0.171,
"step": 700
},
{
"epoch": 2.18,
"eval_accuracy": 0.8391123439667129,
"eval_loss": 0.5375762581825256,
"eval_runtime": 39.0099,
"eval_samples_per_second": 73.93,
"eval_steps_per_second": 9.254,
"step": 700
},
{
"epoch": 2.21,
"grad_norm": 1.2202866077423096,
"learning_rate": 9.779127725856699e-05,
"loss": 0.1488,
"step": 710
},
{
"epoch": 2.24,
"grad_norm": 6.487290382385254,
"learning_rate": 9.77601246105919e-05,
"loss": 0.3093,
"step": 720
},
{
"epoch": 2.27,
"grad_norm": 6.3305230140686035,
"learning_rate": 9.772897196261682e-05,
"loss": 0.3298,
"step": 730
},
{
"epoch": 2.31,
"grad_norm": 1.8739694356918335,
"learning_rate": 9.769781931464175e-05,
"loss": 0.2836,
"step": 740
},
{
"epoch": 2.34,
"grad_norm": 6.215338230133057,
"learning_rate": 9.766666666666668e-05,
"loss": 0.213,
"step": 750
},
{
"epoch": 2.37,
"grad_norm": 0.6346792578697205,
"learning_rate": 9.76355140186916e-05,
"loss": 0.1537,
"step": 760
},
{
"epoch": 2.4,
"grad_norm": 6.488032341003418,
"learning_rate": 9.760436137071651e-05,
"loss": 0.2713,
"step": 770
},
{
"epoch": 2.43,
"grad_norm": 3.9325110912323,
"learning_rate": 9.757320872274144e-05,
"loss": 0.2861,
"step": 780
},
{
"epoch": 2.46,
"grad_norm": 4.505757808685303,
"learning_rate": 9.754205607476637e-05,
"loss": 0.2625,
"step": 790
},
{
"epoch": 2.49,
"grad_norm": 1.1799720525741577,
"learning_rate": 9.751090342679128e-05,
"loss": 0.2159,
"step": 800
},
{
"epoch": 2.49,
"eval_accuracy": 0.8359916782246879,
"eval_loss": 0.5187094211578369,
"eval_runtime": 38.9237,
"eval_samples_per_second": 74.094,
"eval_steps_per_second": 9.275,
"step": 800
},
{
"epoch": 2.52,
"grad_norm": 0.662136971950531,
"learning_rate": 9.74797507788162e-05,
"loss": 0.2325,
"step": 810
},
{
"epoch": 2.55,
"grad_norm": 9.680724143981934,
"learning_rate": 9.744859813084113e-05,
"loss": 0.2579,
"step": 820
},
{
"epoch": 2.59,
"grad_norm": 0.5741639137268066,
"learning_rate": 9.741744548286604e-05,
"loss": 0.1271,
"step": 830
},
{
"epoch": 2.62,
"grad_norm": 9.787598609924316,
"learning_rate": 9.738629283489097e-05,
"loss": 0.2705,
"step": 840
},
{
"epoch": 2.65,
"grad_norm": 3.5786948204040527,
"learning_rate": 9.73551401869159e-05,
"loss": 0.1892,
"step": 850
},
{
"epoch": 2.68,
"grad_norm": 3.534965753555298,
"learning_rate": 9.732398753894082e-05,
"loss": 0.1629,
"step": 860
},
{
"epoch": 2.71,
"grad_norm": 3.6333532333374023,
"learning_rate": 9.729283489096573e-05,
"loss": 0.3307,
"step": 870
},
{
"epoch": 2.74,
"grad_norm": 7.089163780212402,
"learning_rate": 9.726168224299066e-05,
"loss": 0.3159,
"step": 880
},
{
"epoch": 2.77,
"grad_norm": 0.845410168170929,
"learning_rate": 9.723052959501559e-05,
"loss": 0.1572,
"step": 890
},
{
"epoch": 2.8,
"grad_norm": 6.727290630340576,
"learning_rate": 9.71993769470405e-05,
"loss": 0.1659,
"step": 900
},
{
"epoch": 2.8,
"eval_accuracy": 0.8318307905686546,
"eval_loss": 0.5848782062530518,
"eval_runtime": 38.908,
"eval_samples_per_second": 74.124,
"eval_steps_per_second": 9.278,
"step": 900
},
{
"epoch": 2.83,
"grad_norm": 4.609592914581299,
"learning_rate": 9.716822429906542e-05,
"loss": 0.3052,
"step": 910
},
{
"epoch": 2.87,
"grad_norm": 4.7995285987854,
"learning_rate": 9.713707165109035e-05,
"loss": 0.3944,
"step": 920
},
{
"epoch": 2.9,
"grad_norm": 4.78571891784668,
"learning_rate": 9.710591900311527e-05,
"loss": 0.3118,
"step": 930
},
{
"epoch": 2.93,
"grad_norm": 7.084194183349609,
"learning_rate": 9.70747663551402e-05,
"loss": 0.2874,
"step": 940
},
{
"epoch": 2.96,
"grad_norm": 2.6003012657165527,
"learning_rate": 9.704361370716511e-05,
"loss": 0.2024,
"step": 950
},
{
"epoch": 2.99,
"grad_norm": 3.7201614379882812,
"learning_rate": 9.701246105919004e-05,
"loss": 0.224,
"step": 960
},
{
"epoch": 3.02,
"grad_norm": 3.0651230812072754,
"learning_rate": 9.698130841121495e-05,
"loss": 0.1076,
"step": 970
},
{
"epoch": 3.05,
"grad_norm": 4.620060920715332,
"learning_rate": 9.695015576323988e-05,
"loss": 0.1312,
"step": 980
},
{
"epoch": 3.08,
"grad_norm": 0.5105383992195129,
"learning_rate": 9.691900311526481e-05,
"loss": 0.1479,
"step": 990
},
{
"epoch": 3.12,
"grad_norm": 1.0817281007766724,
"learning_rate": 9.688785046728971e-05,
"loss": 0.0554,
"step": 1000
},
{
"epoch": 3.12,
"eval_accuracy": 0.8543689320388349,
"eval_loss": 0.5457024574279785,
"eval_runtime": 39.7583,
"eval_samples_per_second": 72.538,
"eval_steps_per_second": 9.08,
"step": 1000
},
{
"epoch": 3.15,
"grad_norm": 1.351655125617981,
"learning_rate": 9.685669781931464e-05,
"loss": 0.0568,
"step": 1010
},
{
"epoch": 3.18,
"grad_norm": 3.9608170986175537,
"learning_rate": 9.682554517133957e-05,
"loss": 0.1162,
"step": 1020
},
{
"epoch": 3.21,
"grad_norm": 6.9148783683776855,
"learning_rate": 9.679439252336449e-05,
"loss": 0.0587,
"step": 1030
},
{
"epoch": 3.24,
"grad_norm": 1.0946722030639648,
"learning_rate": 9.676323987538942e-05,
"loss": 0.0572,
"step": 1040
},
{
"epoch": 3.27,
"grad_norm": 9.527204513549805,
"learning_rate": 9.673208722741433e-05,
"loss": 0.1847,
"step": 1050
},
{
"epoch": 3.3,
"grad_norm": 7.0584716796875,
"learning_rate": 9.670093457943926e-05,
"loss": 0.1296,
"step": 1060
},
{
"epoch": 3.33,
"grad_norm": 7.145915985107422,
"learning_rate": 9.666978193146418e-05,
"loss": 0.1188,
"step": 1070
},
{
"epoch": 3.36,
"grad_norm": 7.105827808380127,
"learning_rate": 9.66386292834891e-05,
"loss": 0.099,
"step": 1080
},
{
"epoch": 3.4,
"grad_norm": 8.103167533874512,
"learning_rate": 9.660747663551402e-05,
"loss": 0.0874,
"step": 1090
},
{
"epoch": 3.43,
"grad_norm": 1.4485965967178345,
"learning_rate": 9.657632398753894e-05,
"loss": 0.1125,
"step": 1100
},
{
"epoch": 3.43,
"eval_accuracy": 0.8318307905686546,
"eval_loss": 0.7323819398880005,
"eval_runtime": 39.6414,
"eval_samples_per_second": 72.752,
"eval_steps_per_second": 9.107,
"step": 1100
},
{
"epoch": 3.43,
"step": 1100,
"total_flos": 1.362060245306622e+18,
"train_loss": 0.38896639000285754,
"train_runtime": 855.3091,
"train_samples_per_second": 599.549,
"train_steps_per_second": 37.53
}
],
"logging_steps": 10,
"max_steps": 32100,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 100,
"total_flos": 1.362060245306622e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}