cimol's picture
Training in progress, step 50, checkpoint
6bcf1f2 verified
raw
history blame
9.88 kB
{
"best_metric": 1.031134843826294,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.5376344086021505,
"eval_steps": 50,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010752688172043012,
"grad_norm": 4.536034107208252,
"learning_rate": 7e-06,
"loss": 8.1083,
"step": 1
},
{
"epoch": 0.010752688172043012,
"eval_loss": 2.7238574028015137,
"eval_runtime": 14.4588,
"eval_samples_per_second": 10.858,
"eval_steps_per_second": 2.766,
"step": 1
},
{
"epoch": 0.021505376344086023,
"grad_norm": 5.978432655334473,
"learning_rate": 1.4e-05,
"loss": 10.3139,
"step": 2
},
{
"epoch": 0.03225806451612903,
"grad_norm": 7.468502998352051,
"learning_rate": 2.1e-05,
"loss": 10.007,
"step": 3
},
{
"epoch": 0.043010752688172046,
"grad_norm": 12.983536720275879,
"learning_rate": 2.8e-05,
"loss": 9.7253,
"step": 4
},
{
"epoch": 0.053763440860215055,
"grad_norm": 13.686447143554688,
"learning_rate": 3.5e-05,
"loss": 9.548,
"step": 5
},
{
"epoch": 0.06451612903225806,
"grad_norm": 9.852103233337402,
"learning_rate": 4.2e-05,
"loss": 9.0148,
"step": 6
},
{
"epoch": 0.07526881720430108,
"grad_norm": 7.126467704772949,
"learning_rate": 4.899999999999999e-05,
"loss": 8.349,
"step": 7
},
{
"epoch": 0.08602150537634409,
"grad_norm": 6.550177574157715,
"learning_rate": 5.6e-05,
"loss": 7.4971,
"step": 8
},
{
"epoch": 0.0967741935483871,
"grad_norm": 10.200550079345703,
"learning_rate": 6.3e-05,
"loss": 5.8202,
"step": 9
},
{
"epoch": 0.10752688172043011,
"grad_norm": 7.892305850982666,
"learning_rate": 7e-05,
"loss": 4.6753,
"step": 10
},
{
"epoch": 0.11827956989247312,
"grad_norm": 19.061233520507812,
"learning_rate": 6.999521567473641e-05,
"loss": 5.6292,
"step": 11
},
{
"epoch": 0.12903225806451613,
"grad_norm": 14.547859191894531,
"learning_rate": 6.998086400693241e-05,
"loss": 5.6758,
"step": 12
},
{
"epoch": 0.13978494623655913,
"grad_norm": 9.280782699584961,
"learning_rate": 6.995694892019065e-05,
"loss": 4.7095,
"step": 13
},
{
"epoch": 0.15053763440860216,
"grad_norm": 8.161127090454102,
"learning_rate": 6.99234769526571e-05,
"loss": 3.4122,
"step": 14
},
{
"epoch": 0.16129032258064516,
"grad_norm": 34.015987396240234,
"learning_rate": 6.988045725523343e-05,
"loss": 4.0857,
"step": 15
},
{
"epoch": 0.17204301075268819,
"grad_norm": 24.155447006225586,
"learning_rate": 6.982790158907539e-05,
"loss": 4.2336,
"step": 16
},
{
"epoch": 0.1827956989247312,
"grad_norm": 5.08544397354126,
"learning_rate": 6.976582432237733e-05,
"loss": 4.5834,
"step": 17
},
{
"epoch": 0.1935483870967742,
"grad_norm": 13.041632652282715,
"learning_rate": 6.969424242644413e-05,
"loss": 3.4418,
"step": 18
},
{
"epoch": 0.20430107526881722,
"grad_norm": 5.957951545715332,
"learning_rate": 6.961317547105138e-05,
"loss": 3.8441,
"step": 19
},
{
"epoch": 0.21505376344086022,
"grad_norm": 5.243581771850586,
"learning_rate": 6.952264561909527e-05,
"loss": 3.2856,
"step": 20
},
{
"epoch": 0.22580645161290322,
"grad_norm": 7.811934471130371,
"learning_rate": 6.942267762053337e-05,
"loss": 4.6588,
"step": 21
},
{
"epoch": 0.23655913978494625,
"grad_norm": 11.206618309020996,
"learning_rate": 6.931329880561832e-05,
"loss": 4.762,
"step": 22
},
{
"epoch": 0.24731182795698925,
"grad_norm": 11.149868965148926,
"learning_rate": 6.919453907742597e-05,
"loss": 2.4062,
"step": 23
},
{
"epoch": 0.25806451612903225,
"grad_norm": 11.476567268371582,
"learning_rate": 6.90664309036802e-05,
"loss": 9.3109,
"step": 24
},
{
"epoch": 0.26881720430107525,
"grad_norm": 9.604092597961426,
"learning_rate": 6.892900930787656e-05,
"loss": 7.829,
"step": 25
},
{
"epoch": 0.27956989247311825,
"grad_norm": 6.5685601234436035,
"learning_rate": 6.87823118597072e-05,
"loss": 6.88,
"step": 26
},
{
"epoch": 0.2903225806451613,
"grad_norm": 9.324129104614258,
"learning_rate": 6.862637866478969e-05,
"loss": 7.8981,
"step": 27
},
{
"epoch": 0.3010752688172043,
"grad_norm": 8.132095336914062,
"learning_rate": 6.846125235370252e-05,
"loss": 5.5197,
"step": 28
},
{
"epoch": 0.3118279569892473,
"grad_norm": 5.731638431549072,
"learning_rate": 6.828697807033038e-05,
"loss": 6.1606,
"step": 29
},
{
"epoch": 0.3225806451612903,
"grad_norm": 3.5606529712677,
"learning_rate": 6.81036034595222e-05,
"loss": 5.3749,
"step": 30
},
{
"epoch": 0.3333333333333333,
"grad_norm": 3.248960256576538,
"learning_rate": 6.791117865406564e-05,
"loss": 5.605,
"step": 31
},
{
"epoch": 0.34408602150537637,
"grad_norm": 2.7672736644744873,
"learning_rate": 6.770975626098112e-05,
"loss": 3.8032,
"step": 32
},
{
"epoch": 0.3548387096774194,
"grad_norm": 2.312937021255493,
"learning_rate": 6.749939134713974e-05,
"loss": 2.5618,
"step": 33
},
{
"epoch": 0.3655913978494624,
"grad_norm": 2.682342290878296,
"learning_rate": 6.728014142420846e-05,
"loss": 3.8125,
"step": 34
},
{
"epoch": 0.3763440860215054,
"grad_norm": 3.345135450363159,
"learning_rate": 6.7052066432927e-05,
"loss": 3.4451,
"step": 35
},
{
"epoch": 0.3870967741935484,
"grad_norm": 2.8873608112335205,
"learning_rate": 6.681522872672069e-05,
"loss": 3.7995,
"step": 36
},
{
"epoch": 0.3978494623655914,
"grad_norm": 2.9758994579315186,
"learning_rate": 6.656969305465356e-05,
"loss": 1.916,
"step": 37
},
{
"epoch": 0.40860215053763443,
"grad_norm": 2.9894485473632812,
"learning_rate": 6.631552654372672e-05,
"loss": 2.6401,
"step": 38
},
{
"epoch": 0.41935483870967744,
"grad_norm": 2.8846404552459717,
"learning_rate": 6.60527986805264e-05,
"loss": 3.5161,
"step": 39
},
{
"epoch": 0.43010752688172044,
"grad_norm": 2.933262348175049,
"learning_rate": 6.578158129222711e-05,
"loss": 3.2549,
"step": 40
},
{
"epoch": 0.44086021505376344,
"grad_norm": 3.833869218826294,
"learning_rate": 6.550194852695469e-05,
"loss": 3.841,
"step": 41
},
{
"epoch": 0.45161290322580644,
"grad_norm": 3.596083402633667,
"learning_rate": 6.521397683351509e-05,
"loss": 2.6962,
"step": 42
},
{
"epoch": 0.46236559139784944,
"grad_norm": 6.494784355163574,
"learning_rate": 6.491774494049386e-05,
"loss": 3.7265,
"step": 43
},
{
"epoch": 0.4731182795698925,
"grad_norm": 3.249906063079834,
"learning_rate": 6.461333383473272e-05,
"loss": 1.9618,
"step": 44
},
{
"epoch": 0.4838709677419355,
"grad_norm": 6.374508380889893,
"learning_rate": 6.430082673918849e-05,
"loss": 3.3468,
"step": 45
},
{
"epoch": 0.4946236559139785,
"grad_norm": 7.742399215698242,
"learning_rate": 6.398030909018069e-05,
"loss": 2.4064,
"step": 46
},
{
"epoch": 0.5053763440860215,
"grad_norm": 5.071364879608154,
"learning_rate": 6.365186851403423e-05,
"loss": 6.4741,
"step": 47
},
{
"epoch": 0.5161290322580645,
"grad_norm": 4.5256571769714355,
"learning_rate": 6.331559480312315e-05,
"loss": 6.4723,
"step": 48
},
{
"epoch": 0.5268817204301075,
"grad_norm": 3.286426305770874,
"learning_rate": 6.297157989132236e-05,
"loss": 5.6636,
"step": 49
},
{
"epoch": 0.5376344086021505,
"grad_norm": 2.9677677154541016,
"learning_rate": 6.261991782887377e-05,
"loss": 4.5262,
"step": 50
},
{
"epoch": 0.5376344086021505,
"eval_loss": 1.031134843826294,
"eval_runtime": 14.7506,
"eval_samples_per_second": 10.644,
"eval_steps_per_second": 2.712,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.65254329581568e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}