codeGemma-7bScore-56 / trainer_state.json
afrias5's picture
Upload 15 files
3b11f6a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 12.421052631578947,
"eval_steps": 500,
"global_step": 56,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.21052631578947367,
"grad_norm": 19.218000411987305,
"learning_rate": 2e-05,
"loss": 3.886,
"step": 1
},
{
"epoch": 0.42105263157894735,
"grad_norm": 19.777868270874023,
"learning_rate": 4e-05,
"loss": 4.4615,
"step": 2
},
{
"epoch": 0.631578947368421,
"grad_norm": 17.180343627929688,
"learning_rate": 6e-05,
"loss": 3.7595,
"step": 3
},
{
"epoch": 0.8421052631578947,
"grad_norm": 17.781330108642578,
"learning_rate": 8e-05,
"loss": 3.3302,
"step": 4
},
{
"epoch": 1.1052631578947367,
"grad_norm": 20.543649673461914,
"learning_rate": 0.0001,
"loss": 4.6154,
"step": 5
},
{
"epoch": 1.3157894736842106,
"grad_norm": 11.29702091217041,
"learning_rate": 0.00012,
"loss": 2.5755,
"step": 6
},
{
"epoch": 1.526315789473684,
"grad_norm": 8.194554328918457,
"learning_rate": 0.00014,
"loss": 1.6726,
"step": 7
},
{
"epoch": 1.736842105263158,
"grad_norm": 6.040785312652588,
"learning_rate": 0.00016,
"loss": 1.2606,
"step": 8
},
{
"epoch": 1.9473684210526314,
"grad_norm": 12.296614646911621,
"learning_rate": 0.00018,
"loss": 1.9246,
"step": 9
},
{
"epoch": 2.2105263157894735,
"grad_norm": 6.707492828369141,
"learning_rate": 0.0002,
"loss": 1.0017,
"step": 10
},
{
"epoch": 2.4210526315789473,
"grad_norm": 4.79848575592041,
"learning_rate": 0.00019976687691905393,
"loss": 0.6978,
"step": 11
},
{
"epoch": 2.6315789473684212,
"grad_norm": 3.3441014289855957,
"learning_rate": 0.00019906859460363307,
"loss": 0.5996,
"step": 12
},
{
"epoch": 2.8421052631578947,
"grad_norm": 3.113868474960327,
"learning_rate": 0.00019790840876823232,
"loss": 0.5782,
"step": 13
},
{
"epoch": 3.1052631578947367,
"grad_norm": 7.5327229499816895,
"learning_rate": 0.00019629172873477995,
"loss": 0.8014,
"step": 14
},
{
"epoch": 3.3157894736842106,
"grad_norm": 2.6903626918792725,
"learning_rate": 0.00019422609221188207,
"loss": 0.3285,
"step": 15
},
{
"epoch": 3.526315789473684,
"grad_norm": 3.672346591949463,
"learning_rate": 0.00019172113015054532,
"loss": 0.3814,
"step": 16
},
{
"epoch": 3.736842105263158,
"grad_norm": 1.4505695104599,
"learning_rate": 0.0001887885218402375,
"loss": 0.2401,
"step": 17
},
{
"epoch": 3.9473684210526314,
"grad_norm": 2.6290504932403564,
"learning_rate": 0.00018544194045464886,
"loss": 0.3272,
"step": 18
},
{
"epoch": 4.2105263157894735,
"grad_norm": 1.3445160388946533,
"learning_rate": 0.0001816969893010442,
"loss": 0.1871,
"step": 19
},
{
"epoch": 4.421052631578947,
"grad_norm": 1.851083755493164,
"learning_rate": 0.000177571129070442,
"loss": 0.1781,
"step": 20
},
{
"epoch": 4.631578947368421,
"grad_norm": 1.0952624082565308,
"learning_rate": 0.00017308359642781242,
"loss": 0.1966,
"step": 21
},
{
"epoch": 4.842105263157895,
"grad_norm": 2.4012107849121094,
"learning_rate": 0.00016825531432186543,
"loss": 0.1738,
"step": 22
},
{
"epoch": 5.105263157894737,
"grad_norm": 1.6361955404281616,
"learning_rate": 0.00016310879443260528,
"loss": 0.2131,
"step": 23
},
{
"epoch": 5.315789473684211,
"grad_norm": 2.245840072631836,
"learning_rate": 0.00015766803221148673,
"loss": 0.1646,
"step": 24
},
{
"epoch": 5.526315789473684,
"grad_norm": 1.684137225151062,
"learning_rate": 0.00015195839500354335,
"loss": 0.184,
"step": 25
},
{
"epoch": 5.7368421052631575,
"grad_norm": 0.9733522534370422,
"learning_rate": 0.00014600650377311522,
"loss": 0.1465,
"step": 26
},
{
"epoch": 5.947368421052632,
"grad_norm": 2.4283876419067383,
"learning_rate": 0.00013984010898462416,
"loss": 0.2177,
"step": 27
},
{
"epoch": 6.2105263157894735,
"grad_norm": 1.4879282712936401,
"learning_rate": 0.00013348796121709862,
"loss": 0.1299,
"step": 28
},
{
"epoch": 6.421052631578947,
"grad_norm": 1.0862969160079956,
"learning_rate": 0.00012697967711570242,
"loss": 0.1115,
"step": 29
},
{
"epoch": 6.631578947368421,
"grad_norm": 0.7216919660568237,
"learning_rate": 0.0001203456013052634,
"loss": 0.0971,
"step": 30
},
{
"epoch": 6.842105263157895,
"grad_norm": 1.4882885217666626,
"learning_rate": 0.00011361666490962468,
"loss": 0.1453,
"step": 31
},
{
"epoch": 7.105263157894737,
"grad_norm": 1.220837950706482,
"learning_rate": 0.0001068242413364671,
"loss": 0.1634,
"step": 32
},
{
"epoch": 7.315789473684211,
"grad_norm": 0.6117944121360779,
"learning_rate": 0.0001,
"loss": 0.1026,
"step": 33
},
{
"epoch": 7.526315789473684,
"grad_norm": 0.9110010266304016,
"learning_rate": 9.317575866353292e-05,
"loss": 0.103,
"step": 34
},
{
"epoch": 7.7368421052631575,
"grad_norm": 0.9700250625610352,
"learning_rate": 8.638333509037536e-05,
"loss": 0.1205,
"step": 35
},
{
"epoch": 7.947368421052632,
"grad_norm": 0.9124184250831604,
"learning_rate": 7.965439869473664e-05,
"loss": 0.1095,
"step": 36
},
{
"epoch": 8.210526315789474,
"grad_norm": 0.49950140714645386,
"learning_rate": 7.302032288429756e-05,
"loss": 0.0796,
"step": 37
},
{
"epoch": 8.421052631578947,
"grad_norm": 0.8700776100158691,
"learning_rate": 6.651203878290139e-05,
"loss": 0.0774,
"step": 38
},
{
"epoch": 8.631578947368421,
"grad_norm": 0.7889358401298523,
"learning_rate": 6.015989101537586e-05,
"loss": 0.0835,
"step": 39
},
{
"epoch": 8.842105263157894,
"grad_norm": 0.5236338973045349,
"learning_rate": 5.399349622688479e-05,
"loss": 0.0581,
"step": 40
},
{
"epoch": 9.105263157894736,
"grad_norm": 0.9376251697540283,
"learning_rate": 4.804160499645667e-05,
"loss": 0.0936,
"step": 41
},
{
"epoch": 9.31578947368421,
"grad_norm": 0.7259882092475891,
"learning_rate": 4.2331967788513295e-05,
"loss": 0.0712,
"step": 42
},
{
"epoch": 9.526315789473685,
"grad_norm": 0.5035570859909058,
"learning_rate": 3.689120556739475e-05,
"loss": 0.0776,
"step": 43
},
{
"epoch": 9.736842105263158,
"grad_norm": 0.4928078055381775,
"learning_rate": 3.174468567813461e-05,
"loss": 0.0483,
"step": 44
},
{
"epoch": 9.947368421052632,
"grad_norm": 1.1208339929580688,
"learning_rate": 2.691640357218759e-05,
"loss": 0.1032,
"step": 45
},
{
"epoch": 10.210526315789474,
"grad_norm": 0.5036032795906067,
"learning_rate": 2.242887092955801e-05,
"loss": 0.0745,
"step": 46
},
{
"epoch": 10.421052631578947,
"grad_norm": 0.4454878866672516,
"learning_rate": 1.8303010698955804e-05,
"loss": 0.0526,
"step": 47
},
{
"epoch": 10.631578947368421,
"grad_norm": 0.48385077714920044,
"learning_rate": 1.4558059545351143e-05,
"loss": 0.0609,
"step": 48
},
{
"epoch": 10.842105263157894,
"grad_norm": 0.8407765030860901,
"learning_rate": 1.1211478159762478e-05,
"loss": 0.0638,
"step": 49
},
{
"epoch": 11.105263157894736,
"grad_norm": 0.4083283841609955,
"learning_rate": 8.278869849454718e-06,
"loss": 0.065,
"step": 50
},
{
"epoch": 11.31578947368421,
"grad_norm": 0.4340791702270508,
"learning_rate": 5.77390778811796e-06,
"loss": 0.0545,
"step": 51
},
{
"epoch": 11.526315789473685,
"grad_norm": 0.5537849068641663,
"learning_rate": 3.7082712652200867e-06,
"loss": 0.0511,
"step": 52
},
{
"epoch": 11.736842105263158,
"grad_norm": 0.36356380581855774,
"learning_rate": 2.091591231767709e-06,
"loss": 0.0553,
"step": 53
},
{
"epoch": 11.947368421052632,
"grad_norm": 1.3945286273956299,
"learning_rate": 9.314053963669245e-07,
"loss": 0.1255,
"step": 54
},
{
"epoch": 12.210526315789474,
"grad_norm": 0.5195576548576355,
"learning_rate": 2.3312308094607382e-07,
"loss": 0.0559,
"step": 55
},
{
"epoch": 12.421052631578947,
"grad_norm": 0.5838670134544373,
"learning_rate": 0.0,
"loss": 0.0631,
"step": 56
}
],
"logging_steps": 1,
"max_steps": 56,
"num_input_tokens_seen": 0,
"num_train_epochs": 14,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 50350673362944.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}