|
{ |
|
"best_metric": 0.6984126984126984, |
|
"best_model_checkpoint": "vit-base-patch16-224/checkpoint-245", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 350, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 6.601813793182373, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.785, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 5.436784267425537, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.7224, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 5.551754474639893, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.6893, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.4126984126984127, |
|
"eval_loss": 0.7319461703300476, |
|
"eval_runtime": 12.0068, |
|
"eval_samples_per_second": 5.247, |
|
"eval_steps_per_second": 1.333, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 7.985645294189453, |
|
"learning_rate": 4.920634920634921e-06, |
|
"loss": 0.7348, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 4.910062313079834, |
|
"learning_rate": 4.761904761904762e-06, |
|
"loss": 0.7049, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 4.96162223815918, |
|
"learning_rate": 4.603174603174604e-06, |
|
"loss": 0.7185, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 7.957433223724365, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.702, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5238095238095238, |
|
"eval_loss": 0.686264157295227, |
|
"eval_runtime": 10.8952, |
|
"eval_samples_per_second": 5.782, |
|
"eval_steps_per_second": 1.469, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 5.780089378356934, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.6473, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 3.8612518310546875, |
|
"learning_rate": 4.126984126984127e-06, |
|
"loss": 0.6843, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 5.266788482666016, |
|
"learning_rate": 3.968253968253968e-06, |
|
"loss": 0.6644, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5873015873015873, |
|
"eval_loss": 0.6796107888221741, |
|
"eval_runtime": 10.7178, |
|
"eval_samples_per_second": 5.878, |
|
"eval_steps_per_second": 1.493, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 3.142857142857143, |
|
"grad_norm": 4.334442615509033, |
|
"learning_rate": 3.80952380952381e-06, |
|
"loss": 0.6628, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 4.810235500335693, |
|
"learning_rate": 3.6507936507936507e-06, |
|
"loss": 0.6903, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.7142857142857144, |
|
"grad_norm": 7.596153736114502, |
|
"learning_rate": 3.492063492063492e-06, |
|
"loss": 0.6859, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 8.868383407592773, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.645, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5714285714285714, |
|
"eval_loss": 0.6722006797790527, |
|
"eval_runtime": 10.7432, |
|
"eval_samples_per_second": 5.864, |
|
"eval_steps_per_second": 1.489, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"grad_norm": 4.674743175506592, |
|
"learning_rate": 3.1746031746031746e-06, |
|
"loss": 0.6336, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.571428571428571, |
|
"grad_norm": 5.1306023597717285, |
|
"learning_rate": 3.015873015873016e-06, |
|
"loss": 0.6607, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.857142857142857, |
|
"grad_norm": 5.9358015060424805, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.6455, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 0.6544514298439026, |
|
"eval_runtime": 10.8969, |
|
"eval_samples_per_second": 5.781, |
|
"eval_steps_per_second": 1.468, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 5.142857142857143, |
|
"grad_norm": 4.706002712249756, |
|
"learning_rate": 2.6984126984126986e-06, |
|
"loss": 0.6039, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.428571428571429, |
|
"grad_norm": 5.049504280090332, |
|
"learning_rate": 2.53968253968254e-06, |
|
"loss": 0.6881, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 4.565422534942627, |
|
"learning_rate": 2.380952380952381e-06, |
|
"loss": 0.5841, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 5.591720104217529, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 0.6456, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 0.6535508036613464, |
|
"eval_runtime": 10.8606, |
|
"eval_samples_per_second": 5.801, |
|
"eval_steps_per_second": 1.473, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.285714285714286, |
|
"grad_norm": 5.09126091003418, |
|
"learning_rate": 2.0634920634920634e-06, |
|
"loss": 0.6349, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.571428571428571, |
|
"grad_norm": 4.8962812423706055, |
|
"learning_rate": 1.904761904761905e-06, |
|
"loss": 0.5735, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"grad_norm": 4.864034175872803, |
|
"learning_rate": 1.746031746031746e-06, |
|
"loss": 0.6745, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.6463457942008972, |
|
"eval_runtime": 10.8225, |
|
"eval_samples_per_second": 5.821, |
|
"eval_steps_per_second": 1.478, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 7.142857142857143, |
|
"grad_norm": 7.017258167266846, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 0.6431, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.428571428571429, |
|
"grad_norm": 4.79843282699585, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.6164, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 7.714285714285714, |
|
"grad_norm": 6.843749523162842, |
|
"learning_rate": 1.26984126984127e-06, |
|
"loss": 0.644, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 5.495963096618652, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 0.6369, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.6666666666666666, |
|
"eval_loss": 0.6524515748023987, |
|
"eval_runtime": 11.0752, |
|
"eval_samples_per_second": 5.688, |
|
"eval_steps_per_second": 1.445, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.285714285714286, |
|
"grad_norm": 7.264893531799316, |
|
"learning_rate": 9.523809523809525e-07, |
|
"loss": 0.6661, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 3.773362636566162, |
|
"learning_rate": 7.936507936507937e-07, |
|
"loss": 0.607, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.857142857142858, |
|
"grad_norm": 6.338254451751709, |
|
"learning_rate": 6.34920634920635e-07, |
|
"loss": 0.6012, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.6485763788223267, |
|
"eval_runtime": 10.9179, |
|
"eval_samples_per_second": 5.77, |
|
"eval_steps_per_second": 1.465, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"grad_norm": 5.557713031768799, |
|
"learning_rate": 4.7619047619047623e-07, |
|
"loss": 0.625, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.428571428571429, |
|
"grad_norm": 5.070873737335205, |
|
"learning_rate": 3.174603174603175e-07, |
|
"loss": 0.6012, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.714285714285714, |
|
"grad_norm": 6.369267463684082, |
|
"learning_rate": 1.5873015873015874e-07, |
|
"loss": 0.6237, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 8.102092742919922, |
|
"learning_rate": 0.0, |
|
"loss": 0.6219, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.646605372428894, |
|
"eval_runtime": 11.2095, |
|
"eval_samples_per_second": 5.62, |
|
"eval_steps_per_second": 1.427, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 350, |
|
"total_flos": 4.3240530204942336e+17, |
|
"train_loss": 0.6562235273633684, |
|
"train_runtime": 1176.6645, |
|
"train_samples_per_second": 4.742, |
|
"train_steps_per_second": 0.297 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 350, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.3240530204942336e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|