rsh-raj's picture
Upload 8 files
e200e6b verified
raw
history blame
9.33 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.7857142857142856,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03571428571428571,
"grad_norm": 0.2544403374195099,
"learning_rate": 9.970149253731344e-05,
"loss": 1.4916,
"step": 10
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.17301955819129944,
"learning_rate": 9.91044776119403e-05,
"loss": 0.908,
"step": 20
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.11398938298225403,
"learning_rate": 9.850746268656717e-05,
"loss": 0.4285,
"step": 30
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.08889368921518326,
"learning_rate": 9.791044776119404e-05,
"loss": 0.3305,
"step": 40
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.07145170122385025,
"learning_rate": 9.731343283582089e-05,
"loss": 0.3709,
"step": 50
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.08652878552675247,
"learning_rate": 9.671641791044777e-05,
"loss": 0.3822,
"step": 60
},
{
"epoch": 0.25,
"grad_norm": 0.0885363519191742,
"learning_rate": 9.611940298507464e-05,
"loss": 0.3799,
"step": 70
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.07026822865009308,
"learning_rate": 9.552238805970149e-05,
"loss": 0.4085,
"step": 80
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.07123386859893799,
"learning_rate": 9.492537313432837e-05,
"loss": 0.2925,
"step": 90
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.07839447259902954,
"learning_rate": 9.432835820895522e-05,
"loss": 0.3455,
"step": 100
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.0755082443356514,
"learning_rate": 9.373134328358209e-05,
"loss": 0.3406,
"step": 110
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.0823201835155487,
"learning_rate": 9.313432835820896e-05,
"loss": 0.3259,
"step": 120
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.08403510600328445,
"learning_rate": 9.253731343283582e-05,
"loss": 0.3017,
"step": 130
},
{
"epoch": 0.5,
"grad_norm": 0.10145727545022964,
"learning_rate": 9.194029850746269e-05,
"loss": 0.3473,
"step": 140
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.07516448199748993,
"learning_rate": 9.134328358208956e-05,
"loss": 0.2865,
"step": 150
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.07223515212535858,
"learning_rate": 9.074626865671642e-05,
"loss": 0.3045,
"step": 160
},
{
"epoch": 0.6071428571428571,
"grad_norm": 0.07799769937992096,
"learning_rate": 9.014925373134329e-05,
"loss": 0.3306,
"step": 170
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.08388201892375946,
"learning_rate": 8.955223880597016e-05,
"loss": 0.309,
"step": 180
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.0838930755853653,
"learning_rate": 8.895522388059702e-05,
"loss": 0.3356,
"step": 190
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.07891852408647537,
"learning_rate": 8.835820895522389e-05,
"loss": 0.3048,
"step": 200
},
{
"epoch": 0.75,
"grad_norm": 0.07239601761102676,
"learning_rate": 8.776119402985074e-05,
"loss": 0.3601,
"step": 210
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.07549150288105011,
"learning_rate": 8.716417910447762e-05,
"loss": 0.3415,
"step": 220
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.08595260232686996,
"learning_rate": 8.656716417910447e-05,
"loss": 0.3521,
"step": 230
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.08356776833534241,
"learning_rate": 8.597014925373134e-05,
"loss": 0.2898,
"step": 240
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.07699938863515854,
"learning_rate": 8.537313432835822e-05,
"loss": 0.3038,
"step": 250
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.07691926509141922,
"learning_rate": 8.477611940298507e-05,
"loss": 0.3403,
"step": 260
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.07808340340852737,
"learning_rate": 8.417910447761194e-05,
"loss": 0.3164,
"step": 270
},
{
"epoch": 1.0,
"grad_norm": 0.07639653235673904,
"learning_rate": 8.358208955223881e-05,
"loss": 0.2872,
"step": 280
},
{
"epoch": 1.0357142857142858,
"grad_norm": 0.07973363250494003,
"learning_rate": 8.298507462686567e-05,
"loss": 0.3111,
"step": 290
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.08878948539495468,
"learning_rate": 8.238805970149254e-05,
"loss": 0.317,
"step": 300
},
{
"epoch": 1.1071428571428572,
"grad_norm": 0.07322458922863007,
"learning_rate": 8.179104477611941e-05,
"loss": 0.2999,
"step": 310
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.08571141958236694,
"learning_rate": 8.119402985074627e-05,
"loss": 0.2642,
"step": 320
},
{
"epoch": 1.1785714285714286,
"grad_norm": 0.10807636380195618,
"learning_rate": 8.059701492537314e-05,
"loss": 0.3024,
"step": 330
},
{
"epoch": 1.2142857142857142,
"grad_norm": 0.1146746426820755,
"learning_rate": 8e-05,
"loss": 0.2788,
"step": 340
},
{
"epoch": 1.25,
"grad_norm": 0.10096397995948792,
"learning_rate": 7.940298507462687e-05,
"loss": 0.278,
"step": 350
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.08820641040802002,
"learning_rate": 7.880597014925374e-05,
"loss": 0.3123,
"step": 360
},
{
"epoch": 1.3214285714285714,
"grad_norm": 0.09731289744377136,
"learning_rate": 7.820895522388059e-05,
"loss": 0.2962,
"step": 370
},
{
"epoch": 1.3571428571428572,
"grad_norm": 0.09726134687662125,
"learning_rate": 7.761194029850747e-05,
"loss": 0.2723,
"step": 380
},
{
"epoch": 1.3928571428571428,
"grad_norm": 0.1366291046142578,
"learning_rate": 7.701492537313433e-05,
"loss": 0.2549,
"step": 390
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.09392824023962021,
"learning_rate": 7.641791044776119e-05,
"loss": 0.2785,
"step": 400
},
{
"epoch": 1.4642857142857144,
"grad_norm": 0.10537943243980408,
"learning_rate": 7.582089552238806e-05,
"loss": 0.2692,
"step": 410
},
{
"epoch": 1.5,
"grad_norm": 0.11430277675390244,
"learning_rate": 7.522388059701493e-05,
"loss": 0.2787,
"step": 420
},
{
"epoch": 1.5357142857142856,
"grad_norm": 0.1093386635184288,
"learning_rate": 7.46268656716418e-05,
"loss": 0.2953,
"step": 430
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.10960441827774048,
"learning_rate": 7.402985074626866e-05,
"loss": 0.2906,
"step": 440
},
{
"epoch": 1.6071428571428572,
"grad_norm": 0.11768423765897751,
"learning_rate": 7.343283582089552e-05,
"loss": 0.3433,
"step": 450
},
{
"epoch": 1.6428571428571428,
"grad_norm": 0.10420206189155579,
"learning_rate": 7.283582089552239e-05,
"loss": 0.2769,
"step": 460
},
{
"epoch": 1.6785714285714286,
"grad_norm": 0.090525321662426,
"learning_rate": 7.223880597014926e-05,
"loss": 0.2859,
"step": 470
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.11874176561832428,
"learning_rate": 7.164179104477612e-05,
"loss": 0.3243,
"step": 480
},
{
"epoch": 1.75,
"grad_norm": 0.08634401112794876,
"learning_rate": 7.104477611940299e-05,
"loss": 0.2398,
"step": 490
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.10205753147602081,
"learning_rate": 7.044776119402984e-05,
"loss": 0.3725,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 1680,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0775686821482496e+17,
"train_batch_size": 5,
"trial_name": null,
"trial_params": null
}