BashExplainer_Gemma / trainer_state.json
KakashiH's picture
Upload 11 files
a2fe482 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 221,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": 14.758803367614746,
"learning_rate": 0.0002,
"loss": 23.3472,
"step": 10
},
{
"epoch": 0.18,
"grad_norm": 59.627952575683594,
"learning_rate": 0.0002,
"loss": 15.2238,
"step": 20
},
{
"epoch": 0.27,
"grad_norm": 4.7137131690979,
"learning_rate": 0.0002,
"loss": 2.488,
"step": 30
},
{
"epoch": 0.36,
"grad_norm": 1.8212027549743652,
"learning_rate": 0.0002,
"loss": 1.0811,
"step": 40
},
{
"epoch": 0.45,
"grad_norm": 2.546755313873291,
"learning_rate": 0.0002,
"loss": 0.8055,
"step": 50
},
{
"epoch": 0.54,
"grad_norm": 1.0168941020965576,
"learning_rate": 0.0002,
"loss": 0.705,
"step": 60
},
{
"epoch": 0.63,
"grad_norm": 0.9273145794868469,
"learning_rate": 0.0002,
"loss": 0.6457,
"step": 70
},
{
"epoch": 0.72,
"grad_norm": 0.7880547642707825,
"learning_rate": 0.0002,
"loss": 0.5931,
"step": 80
},
{
"epoch": 0.81,
"grad_norm": 0.8284961581230164,
"learning_rate": 0.0002,
"loss": 0.5887,
"step": 90
},
{
"epoch": 0.9,
"grad_norm": 1.7326173782348633,
"learning_rate": 0.0002,
"loss": 0.5627,
"step": 100
},
{
"epoch": 1.0,
"grad_norm": 1.3316080570220947,
"learning_rate": 0.0002,
"loss": 0.5699,
"step": 110
},
{
"epoch": 1.09,
"grad_norm": 0.7548896074295044,
"learning_rate": 0.0002,
"loss": 0.5371,
"step": 120
},
{
"epoch": 1.18,
"grad_norm": 0.612474262714386,
"learning_rate": 0.0002,
"loss": 0.479,
"step": 130
},
{
"epoch": 1.27,
"grad_norm": 0.8262041807174683,
"learning_rate": 0.0002,
"loss": 0.5065,
"step": 140
},
{
"epoch": 1.36,
"grad_norm": 0.9287819862365723,
"learning_rate": 0.0002,
"loss": 0.4441,
"step": 150
},
{
"epoch": 1.45,
"grad_norm": 0.7204004526138306,
"learning_rate": 0.0002,
"loss": 0.5144,
"step": 160
},
{
"epoch": 1.54,
"grad_norm": 0.5320543646812439,
"learning_rate": 0.0002,
"loss": 0.4624,
"step": 170
},
{
"epoch": 1.63,
"grad_norm": 0.5815476775169373,
"learning_rate": 0.0002,
"loss": 0.4536,
"step": 180
},
{
"epoch": 1.72,
"grad_norm": 0.4539355933666229,
"learning_rate": 0.0002,
"loss": 0.5108,
"step": 190
},
{
"epoch": 1.81,
"grad_norm": 0.8424472212791443,
"learning_rate": 0.0002,
"loss": 0.4603,
"step": 200
},
{
"epoch": 1.9,
"grad_norm": 0.485895574092865,
"learning_rate": 0.0002,
"loss": 0.4843,
"step": 210
},
{
"epoch": 1.99,
"grad_norm": 0.4039005935192108,
"learning_rate": 0.0002,
"loss": 0.4872,
"step": 220
}
],
"logging_steps": 10,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 6.217168915257754e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}