codellama-without-summary / trainer_state.json
rsh-raj's picture
Upload 8 files
4c7b7be verified
raw
history blame
17.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.571428571428571,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03571428571428571,
"grad_norm": 0.10301133245229721,
"learning_rate": 9.970149253731344e-05,
"loss": 1.0487,
"step": 10
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.13997109234333038,
"learning_rate": 9.91044776119403e-05,
"loss": 0.7479,
"step": 20
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.07622478902339935,
"learning_rate": 9.850746268656717e-05,
"loss": 0.4974,
"step": 30
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.06940005719661713,
"learning_rate": 9.791044776119404e-05,
"loss": 0.4081,
"step": 40
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.056480951607227325,
"learning_rate": 9.731343283582089e-05,
"loss": 0.4592,
"step": 50
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.055311448872089386,
"learning_rate": 9.671641791044777e-05,
"loss": 0.5037,
"step": 60
},
{
"epoch": 0.25,
"grad_norm": 0.05507435277104378,
"learning_rate": 9.611940298507464e-05,
"loss": 0.4667,
"step": 70
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.054898519068956375,
"learning_rate": 9.552238805970149e-05,
"loss": 0.4856,
"step": 80
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.04788612574338913,
"learning_rate": 9.492537313432837e-05,
"loss": 0.4145,
"step": 90
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.06482447683811188,
"learning_rate": 9.432835820895522e-05,
"loss": 0.4418,
"step": 100
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.058699481189250946,
"learning_rate": 9.373134328358209e-05,
"loss": 0.6189,
"step": 110
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.05194304883480072,
"learning_rate": 9.313432835820896e-05,
"loss": 0.4142,
"step": 120
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.07520686835050583,
"learning_rate": 9.253731343283582e-05,
"loss": 0.4576,
"step": 130
},
{
"epoch": 0.5,
"grad_norm": 0.0573502779006958,
"learning_rate": 9.194029850746269e-05,
"loss": 0.4354,
"step": 140
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.061439767479896545,
"learning_rate": 9.134328358208956e-05,
"loss": 0.4084,
"step": 150
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.058765340596437454,
"learning_rate": 9.074626865671642e-05,
"loss": 0.405,
"step": 160
},
{
"epoch": 0.6071428571428571,
"grad_norm": 0.055010490119457245,
"learning_rate": 9.014925373134329e-05,
"loss": 0.4748,
"step": 170
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.0870099738240242,
"learning_rate": 8.955223880597016e-05,
"loss": 0.4428,
"step": 180
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.06115978583693504,
"learning_rate": 8.895522388059702e-05,
"loss": 0.4274,
"step": 190
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.052583467215299606,
"learning_rate": 8.835820895522389e-05,
"loss": 0.4513,
"step": 200
},
{
"epoch": 0.75,
"grad_norm": 0.054836906492710114,
"learning_rate": 8.776119402985074e-05,
"loss": 0.454,
"step": 210
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.05478864163160324,
"learning_rate": 8.716417910447762e-05,
"loss": 0.4935,
"step": 220
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.04880619794130325,
"learning_rate": 8.656716417910447e-05,
"loss": 0.5444,
"step": 230
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.05223756283521652,
"learning_rate": 8.597014925373134e-05,
"loss": 0.4363,
"step": 240
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.07902172952890396,
"learning_rate": 8.537313432835822e-05,
"loss": 0.4591,
"step": 250
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.05140472576022148,
"learning_rate": 8.477611940298507e-05,
"loss": 0.4274,
"step": 260
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.06524641066789627,
"learning_rate": 8.417910447761194e-05,
"loss": 0.4718,
"step": 270
},
{
"epoch": 1.0,
"grad_norm": 0.06143767759203911,
"learning_rate": 8.358208955223881e-05,
"loss": 0.4073,
"step": 280
},
{
"epoch": 1.0357142857142858,
"grad_norm": 0.061438463628292084,
"learning_rate": 8.298507462686567e-05,
"loss": 0.4232,
"step": 290
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.06389790773391724,
"learning_rate": 8.238805970149254e-05,
"loss": 0.4539,
"step": 300
},
{
"epoch": 1.1071428571428572,
"grad_norm": 0.06448990106582642,
"learning_rate": 8.179104477611941e-05,
"loss": 0.3898,
"step": 310
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.07985088974237442,
"learning_rate": 8.119402985074627e-05,
"loss": 0.3905,
"step": 320
},
{
"epoch": 1.1785714285714286,
"grad_norm": 0.06921962648630142,
"learning_rate": 8.059701492537314e-05,
"loss": 0.471,
"step": 330
},
{
"epoch": 1.2142857142857142,
"grad_norm": 0.09291083365678787,
"learning_rate": 8e-05,
"loss": 0.3922,
"step": 340
},
{
"epoch": 1.25,
"grad_norm": 0.0728558897972107,
"learning_rate": 7.940298507462687e-05,
"loss": 0.4263,
"step": 350
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.0626588836312294,
"learning_rate": 7.880597014925374e-05,
"loss": 0.4269,
"step": 360
},
{
"epoch": 1.3214285714285714,
"grad_norm": 0.0737665668129921,
"learning_rate": 7.820895522388059e-05,
"loss": 0.4146,
"step": 370
},
{
"epoch": 1.3571428571428572,
"grad_norm": 0.07175736874341965,
"learning_rate": 7.761194029850747e-05,
"loss": 0.5322,
"step": 380
},
{
"epoch": 1.3928571428571428,
"grad_norm": 0.0721152052283287,
"learning_rate": 7.701492537313433e-05,
"loss": 0.3717,
"step": 390
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.06777182966470718,
"learning_rate": 7.641791044776119e-05,
"loss": 0.4049,
"step": 400
},
{
"epoch": 1.4642857142857144,
"grad_norm": 0.08008120208978653,
"learning_rate": 7.582089552238806e-05,
"loss": 0.3878,
"step": 410
},
{
"epoch": 1.5,
"grad_norm": 0.08273333311080933,
"learning_rate": 7.522388059701493e-05,
"loss": 0.3975,
"step": 420
},
{
"epoch": 1.5357142857142856,
"grad_norm": 0.07972041517496109,
"learning_rate": 7.46268656716418e-05,
"loss": 0.3897,
"step": 430
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.07434827834367752,
"learning_rate": 7.402985074626866e-05,
"loss": 0.4029,
"step": 440
},
{
"epoch": 1.6071428571428572,
"grad_norm": 0.09134312719106674,
"learning_rate": 7.343283582089552e-05,
"loss": 0.4254,
"step": 450
},
{
"epoch": 1.6428571428571428,
"grad_norm": 0.10692407935857773,
"learning_rate": 7.283582089552239e-05,
"loss": 0.4101,
"step": 460
},
{
"epoch": 1.6785714285714286,
"grad_norm": 0.0963137298822403,
"learning_rate": 7.223880597014926e-05,
"loss": 0.417,
"step": 470
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.08129405975341797,
"learning_rate": 7.164179104477612e-05,
"loss": 0.4739,
"step": 480
},
{
"epoch": 1.75,
"grad_norm": 0.09193170815706253,
"learning_rate": 7.104477611940299e-05,
"loss": 0.3555,
"step": 490
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.07470010221004486,
"learning_rate": 7.044776119402984e-05,
"loss": 0.4238,
"step": 500
},
{
"epoch": 1.8214285714285714,
"grad_norm": 0.09832178801298141,
"learning_rate": 6.985074626865672e-05,
"loss": 0.4187,
"step": 510
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.08799290657043457,
"learning_rate": 6.925373134328359e-05,
"loss": 0.4022,
"step": 520
},
{
"epoch": 1.8928571428571428,
"grad_norm": 0.08097486197948456,
"learning_rate": 6.865671641791044e-05,
"loss": 0.428,
"step": 530
},
{
"epoch": 1.9285714285714286,
"grad_norm": 0.08261922746896744,
"learning_rate": 6.805970149253732e-05,
"loss": 0.5208,
"step": 540
},
{
"epoch": 1.9642857142857144,
"grad_norm": 0.0832519382238388,
"learning_rate": 6.746268656716418e-05,
"loss": 0.4323,
"step": 550
},
{
"epoch": 2.0,
"grad_norm": 0.07341128587722778,
"learning_rate": 6.686567164179106e-05,
"loss": 0.3885,
"step": 560
},
{
"epoch": 2.0357142857142856,
"grad_norm": 0.06848422437906265,
"learning_rate": 6.626865671641791e-05,
"loss": 0.3713,
"step": 570
},
{
"epoch": 2.0714285714285716,
"grad_norm": 0.08991721272468567,
"learning_rate": 6.567164179104478e-05,
"loss": 0.3903,
"step": 580
},
{
"epoch": 2.107142857142857,
"grad_norm": 0.12314411252737045,
"learning_rate": 6.507462686567164e-05,
"loss": 0.3742,
"step": 590
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.0857195034623146,
"learning_rate": 6.447761194029851e-05,
"loss": 0.3501,
"step": 600
},
{
"epoch": 2.1785714285714284,
"grad_norm": 0.08988986909389496,
"learning_rate": 6.388059701492538e-05,
"loss": 0.3783,
"step": 610
},
{
"epoch": 2.2142857142857144,
"grad_norm": 0.1092318519949913,
"learning_rate": 6.328358208955224e-05,
"loss": 0.3741,
"step": 620
},
{
"epoch": 2.25,
"grad_norm": 0.11366426199674606,
"learning_rate": 6.268656716417911e-05,
"loss": 0.4403,
"step": 630
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.11339179426431656,
"learning_rate": 6.208955223880598e-05,
"loss": 0.4135,
"step": 640
},
{
"epoch": 2.3214285714285716,
"grad_norm": 0.11044107377529144,
"learning_rate": 6.149253731343284e-05,
"loss": 0.3279,
"step": 650
},
{
"epoch": 2.357142857142857,
"grad_norm": 0.11268908530473709,
"learning_rate": 6.08955223880597e-05,
"loss": 0.3495,
"step": 660
},
{
"epoch": 2.392857142857143,
"grad_norm": 0.11684803664684296,
"learning_rate": 6.029850746268657e-05,
"loss": 0.3811,
"step": 670
},
{
"epoch": 2.4285714285714284,
"grad_norm": 0.13463515043258667,
"learning_rate": 5.970149253731343e-05,
"loss": 0.3605,
"step": 680
},
{
"epoch": 2.4642857142857144,
"grad_norm": 0.0957857295870781,
"learning_rate": 5.91044776119403e-05,
"loss": 0.3896,
"step": 690
},
{
"epoch": 2.5,
"grad_norm": 0.10150361061096191,
"learning_rate": 5.8507462686567175e-05,
"loss": 0.3401,
"step": 700
},
{
"epoch": 2.5357142857142856,
"grad_norm": 0.10616330057382584,
"learning_rate": 5.7910447761194034e-05,
"loss": 0.3846,
"step": 710
},
{
"epoch": 2.571428571428571,
"grad_norm": 0.09289854764938354,
"learning_rate": 5.73134328358209e-05,
"loss": 0.346,
"step": 720
},
{
"epoch": 2.607142857142857,
"grad_norm": 0.13642659783363342,
"learning_rate": 5.671641791044776e-05,
"loss": 0.3898,
"step": 730
},
{
"epoch": 2.642857142857143,
"grad_norm": 0.10691198706626892,
"learning_rate": 5.6119402985074634e-05,
"loss": 0.3798,
"step": 740
},
{
"epoch": 2.678571428571429,
"grad_norm": 0.12347331643104553,
"learning_rate": 5.5522388059701494e-05,
"loss": 0.3919,
"step": 750
},
{
"epoch": 2.7142857142857144,
"grad_norm": 0.1209939643740654,
"learning_rate": 5.492537313432836e-05,
"loss": 0.3716,
"step": 760
},
{
"epoch": 2.75,
"grad_norm": 0.09330391138792038,
"learning_rate": 5.432835820895522e-05,
"loss": 0.3832,
"step": 770
},
{
"epoch": 2.7857142857142856,
"grad_norm": 0.13387814164161682,
"learning_rate": 5.373134328358209e-05,
"loss": 0.3602,
"step": 780
},
{
"epoch": 2.821428571428571,
"grad_norm": 0.11489560455083847,
"learning_rate": 5.313432835820896e-05,
"loss": 0.4088,
"step": 790
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.13628144562244415,
"learning_rate": 5.253731343283582e-05,
"loss": 0.3813,
"step": 800
},
{
"epoch": 2.892857142857143,
"grad_norm": 0.11659189313650131,
"learning_rate": 5.194029850746269e-05,
"loss": 0.3797,
"step": 810
},
{
"epoch": 2.928571428571429,
"grad_norm": 0.12676863372325897,
"learning_rate": 5.134328358208955e-05,
"loss": 0.329,
"step": 820
},
{
"epoch": 2.9642857142857144,
"grad_norm": 0.10595127195119858,
"learning_rate": 5.074626865671642e-05,
"loss": 0.3765,
"step": 830
},
{
"epoch": 3.0,
"grad_norm": 0.11963572353124619,
"learning_rate": 5.014925373134328e-05,
"loss": 0.3701,
"step": 840
},
{
"epoch": 3.0357142857142856,
"grad_norm": 0.13174974918365479,
"learning_rate": 4.955223880597015e-05,
"loss": 0.2597,
"step": 850
},
{
"epoch": 3.0714285714285716,
"grad_norm": 0.16706831753253937,
"learning_rate": 4.895522388059702e-05,
"loss": 0.3411,
"step": 860
},
{
"epoch": 3.107142857142857,
"grad_norm": 0.12361631542444229,
"learning_rate": 4.8358208955223885e-05,
"loss": 0.3653,
"step": 870
},
{
"epoch": 3.142857142857143,
"grad_norm": 0.12208428233861923,
"learning_rate": 4.7761194029850745e-05,
"loss": 0.3632,
"step": 880
},
{
"epoch": 3.1785714285714284,
"grad_norm": 0.1192869320511818,
"learning_rate": 4.716417910447761e-05,
"loss": 0.3367,
"step": 890
},
{
"epoch": 3.2142857142857144,
"grad_norm": 0.1340082734823227,
"learning_rate": 4.656716417910448e-05,
"loss": 0.337,
"step": 900
},
{
"epoch": 3.25,
"grad_norm": 0.15136446058750153,
"learning_rate": 4.5970149253731345e-05,
"loss": 0.3168,
"step": 910
},
{
"epoch": 3.2857142857142856,
"grad_norm": 0.15890203416347504,
"learning_rate": 4.537313432835821e-05,
"loss": 0.3723,
"step": 920
},
{
"epoch": 3.3214285714285716,
"grad_norm": 0.14035534858703613,
"learning_rate": 4.477611940298508e-05,
"loss": 0.3235,
"step": 930
},
{
"epoch": 3.357142857142857,
"grad_norm": 0.14109626412391663,
"learning_rate": 4.4179104477611944e-05,
"loss": 0.3404,
"step": 940
},
{
"epoch": 3.392857142857143,
"grad_norm": 0.12994477152824402,
"learning_rate": 4.358208955223881e-05,
"loss": 0.3578,
"step": 950
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.15381306409835815,
"learning_rate": 4.298507462686567e-05,
"loss": 0.3197,
"step": 960
},
{
"epoch": 3.4642857142857144,
"grad_norm": 0.12908071279525757,
"learning_rate": 4.238805970149254e-05,
"loss": 0.334,
"step": 970
},
{
"epoch": 3.5,
"grad_norm": 0.16112634539604187,
"learning_rate": 4.1791044776119404e-05,
"loss": 0.3051,
"step": 980
},
{
"epoch": 3.5357142857142856,
"grad_norm": 0.15946915745735168,
"learning_rate": 4.119402985074627e-05,
"loss": 0.3806,
"step": 990
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.13795320689678192,
"learning_rate": 4.059701492537314e-05,
"loss": 0.2901,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 1680,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.0255297151791104e+17,
"train_batch_size": 5,
"trial_name": null,
"trial_params": null
}