eddysang's picture
Training in progress, step 34, checkpoint
93abf0e verified
raw
history blame
6.78 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.050057510927076145,
"eval_steps": 50,
"global_step": 34,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0014722797331492984,
"grad_norm": 0.26644960045814514,
"learning_rate": 7.499999999999999e-06,
"loss": 0.296,
"step": 1
},
{
"epoch": 0.0014722797331492984,
"eval_loss": 0.4772031009197235,
"eval_runtime": 847.2862,
"eval_samples_per_second": 2.7,
"eval_steps_per_second": 1.35,
"step": 1
},
{
"epoch": 0.0029445594662985968,
"grad_norm": 0.4351513385772705,
"learning_rate": 1.4999999999999999e-05,
"loss": 0.408,
"step": 2
},
{
"epoch": 0.004416839199447895,
"grad_norm": 0.3042258024215698,
"learning_rate": 2.2499999999999998e-05,
"loss": 0.393,
"step": 3
},
{
"epoch": 0.0058891189325971935,
"grad_norm": 0.3164433538913727,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.409,
"step": 4
},
{
"epoch": 0.007361398665746492,
"grad_norm": 0.37835752964019775,
"learning_rate": 3.75e-05,
"loss": 0.2635,
"step": 5
},
{
"epoch": 0.00883367839889579,
"grad_norm": 0.22859500348567963,
"learning_rate": 4.4999999999999996e-05,
"loss": 0.2615,
"step": 6
},
{
"epoch": 0.010305958132045089,
"grad_norm": 0.37722575664520264,
"learning_rate": 5.2499999999999995e-05,
"loss": 0.296,
"step": 7
},
{
"epoch": 0.011778237865194387,
"grad_norm": 0.8506933450698853,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.2655,
"step": 8
},
{
"epoch": 0.013250517598343685,
"grad_norm": 0.45279720425605774,
"learning_rate": 6.75e-05,
"loss": 0.259,
"step": 9
},
{
"epoch": 0.014722797331492983,
"grad_norm": 0.18526464700698853,
"learning_rate": 7.5e-05,
"loss": 0.2053,
"step": 10
},
{
"epoch": 0.016195077064642283,
"grad_norm": 0.20497167110443115,
"learning_rate": 8.25e-05,
"loss": 0.1554,
"step": 11
},
{
"epoch": 0.01766735679779158,
"grad_norm": 0.14627239108085632,
"learning_rate": 8.999999999999999e-05,
"loss": 0.1306,
"step": 12
},
{
"epoch": 0.01913963653094088,
"grad_norm": 0.240932434797287,
"learning_rate": 9.75e-05,
"loss": 0.1564,
"step": 13
},
{
"epoch": 0.020611916264090178,
"grad_norm": 0.3040555715560913,
"learning_rate": 0.00010499999999999999,
"loss": 0.1677,
"step": 14
},
{
"epoch": 0.022084195997239476,
"grad_norm": 0.4959651231765747,
"learning_rate": 0.0001125,
"loss": 0.3867,
"step": 15
},
{
"epoch": 0.023556475730388774,
"grad_norm": 0.20291835069656372,
"learning_rate": 0.00011999999999999999,
"loss": 0.2207,
"step": 16
},
{
"epoch": 0.025028755463538072,
"grad_norm": 0.16518566012382507,
"learning_rate": 0.00012749999999999998,
"loss": 0.1309,
"step": 17
},
{
"epoch": 0.02650103519668737,
"grad_norm": 0.18677189946174622,
"learning_rate": 0.000135,
"loss": 0.181,
"step": 18
},
{
"epoch": 0.02797331492983667,
"grad_norm": 0.1576640009880066,
"learning_rate": 0.0001425,
"loss": 0.1562,
"step": 19
},
{
"epoch": 0.029445594662985967,
"grad_norm": 0.19666102528572083,
"learning_rate": 0.00015,
"loss": 0.2439,
"step": 20
},
{
"epoch": 0.030917874396135265,
"grad_norm": 0.12512515485286713,
"learning_rate": 0.00014998857713672935,
"loss": 0.144,
"step": 21
},
{
"epoch": 0.03239015412928457,
"grad_norm": 0.1219751164317131,
"learning_rate": 0.00014995431202643217,
"loss": 0.1047,
"step": 22
},
{
"epoch": 0.033862433862433865,
"grad_norm": 0.1390693038702011,
"learning_rate": 0.000149897215106593,
"loss": 0.1022,
"step": 23
},
{
"epoch": 0.03533471359558316,
"grad_norm": 0.19809921085834503,
"learning_rate": 0.0001498173037694868,
"loss": 0.24,
"step": 24
},
{
"epoch": 0.03680699332873246,
"grad_norm": 0.17452572286128998,
"learning_rate": 0.0001497146023568809,
"loss": 0.2497,
"step": 25
},
{
"epoch": 0.03827927306188176,
"grad_norm": 0.10941721498966217,
"learning_rate": 0.00014958914215262048,
"loss": 0.0914,
"step": 26
},
{
"epoch": 0.03975155279503106,
"grad_norm": 0.12793776392936707,
"learning_rate": 0.00014944096137309914,
"loss": 0.114,
"step": 27
},
{
"epoch": 0.041223832528180356,
"grad_norm": 0.14925755560398102,
"learning_rate": 0.00014927010515561776,
"loss": 0.1931,
"step": 28
},
{
"epoch": 0.042696112261329654,
"grad_norm": 0.12749770283699036,
"learning_rate": 0.00014907662554463532,
"loss": 0.1342,
"step": 29
},
{
"epoch": 0.04416839199447895,
"grad_norm": 0.12666098773479462,
"learning_rate": 0.0001488605814759156,
"loss": 0.1404,
"step": 30
},
{
"epoch": 0.04564067172762825,
"grad_norm": 0.12451935559511185,
"learning_rate": 0.00014862203875857477,
"loss": 0.1297,
"step": 31
},
{
"epoch": 0.04711295146077755,
"grad_norm": 0.12354013323783875,
"learning_rate": 0.0001483610700550354,
"loss": 0.0667,
"step": 32
},
{
"epoch": 0.048585231193926846,
"grad_norm": 0.11861127614974976,
"learning_rate": 0.00014807775485889264,
"loss": 0.1057,
"step": 33
},
{
"epoch": 0.050057510927076145,
"grad_norm": 0.11489235609769821,
"learning_rate": 0.0001477721794706997,
"loss": 0.0717,
"step": 34
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.597635405563822e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}