unsup-simcse-roberta-large-with-mlm / bc-trainer_state.json
bcai001's picture
Rename trainer_state.json to bc-trainer_state.json
da5e073 verified
raw
history blame
4.07 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"global_step": 15625,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.84e-06,
"loss": 0.6713,
"step": 500
},
{
"epoch": 0.06,
"learning_rate": 4.680000000000001e-06,
"loss": 0.1946,
"step": 1000
},
{
"epoch": 0.1,
"learning_rate": 4.520000000000001e-06,
"loss": 0.1884,
"step": 1500
},
{
"epoch": 0.13,
"learning_rate": 4.360000000000001e-06,
"loss": 0.1858,
"step": 2000
},
{
"epoch": 0.16,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.1833,
"step": 2500
},
{
"epoch": 0.19,
"learning_rate": 4.04e-06,
"loss": 0.1839,
"step": 3000
},
{
"epoch": 0.22,
"learning_rate": 3.88e-06,
"loss": 0.1816,
"step": 3500
},
{
"epoch": 0.26,
"learning_rate": 3.7200000000000004e-06,
"loss": 0.1807,
"step": 4000
},
{
"epoch": 0.29,
"learning_rate": 3.5600000000000002e-06,
"loss": 0.1804,
"step": 4500
},
{
"epoch": 0.32,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.1803,
"step": 5000
},
{
"epoch": 0.35,
"learning_rate": 3.2400000000000003e-06,
"loss": 0.179,
"step": 5500
},
{
"epoch": 0.38,
"learning_rate": 3.08e-06,
"loss": 0.18,
"step": 6000
},
{
"epoch": 0.42,
"learning_rate": 2.92e-06,
"loss": 0.1774,
"step": 6500
},
{
"epoch": 0.45,
"learning_rate": 2.7600000000000003e-06,
"loss": 0.178,
"step": 7000
},
{
"epoch": 0.48,
"learning_rate": 2.6e-06,
"loss": 0.178,
"step": 7500
},
{
"epoch": 0.51,
"learning_rate": 2.4400000000000004e-06,
"loss": 0.1775,
"step": 8000
},
{
"epoch": 0.54,
"learning_rate": 2.28e-06,
"loss": 0.1759,
"step": 8500
},
{
"epoch": 0.58,
"learning_rate": 2.12e-06,
"loss": 0.1773,
"step": 9000
},
{
"epoch": 0.61,
"learning_rate": 1.9600000000000003e-06,
"loss": 0.1747,
"step": 9500
},
{
"epoch": 0.64,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.1756,
"step": 10000
},
{
"epoch": 0.67,
"learning_rate": 1.6400000000000002e-06,
"loss": 0.1768,
"step": 10500
},
{
"epoch": 0.7,
"learning_rate": 1.48e-06,
"loss": 0.1753,
"step": 11000
},
{
"epoch": 0.74,
"learning_rate": 1.32e-06,
"loss": 0.175,
"step": 11500
},
{
"epoch": 0.77,
"learning_rate": 1.1600000000000001e-06,
"loss": 0.1764,
"step": 12000
},
{
"epoch": 0.8,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1758,
"step": 12500
},
{
"epoch": 0.83,
"learning_rate": 8.400000000000001e-07,
"loss": 0.1752,
"step": 13000
},
{
"epoch": 0.86,
"learning_rate": 6.800000000000001e-07,
"loss": 0.1755,
"step": 13500
},
{
"epoch": 0.9,
"learning_rate": 5.2e-07,
"loss": 0.1751,
"step": 14000
},
{
"epoch": 0.93,
"learning_rate": 3.6e-07,
"loss": 0.1755,
"step": 14500
},
{
"epoch": 0.96,
"learning_rate": 2.0000000000000002e-07,
"loss": 0.1738,
"step": 15000
},
{
"epoch": 0.99,
"learning_rate": 4e-08,
"loss": 0.1747,
"step": 15500
},
{
"epoch": 1.0,
"step": 15625,
"train_runtime": 3654.5011,
"train_samples_per_second": 4.276
}
],
"max_steps": 15625,
"num_train_epochs": 1,
"total_flos": 313292557056000000,
"trial_name": null,
"trial_params": null
}