{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.1953125, "eval_steps": 9, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00390625, "grad_norm": 0.9358924627304077, "learning_rate": 1e-05, "loss": 0.4079, "step": 1 }, { "epoch": 0.00390625, "eval_loss": 0.4379751980304718, "eval_runtime": 37.4931, "eval_samples_per_second": 5.761, "eval_steps_per_second": 0.72, "step": 1 }, { "epoch": 0.0078125, "grad_norm": 0.9645170569419861, "learning_rate": 2e-05, "loss": 0.3906, "step": 2 }, { "epoch": 0.01171875, "grad_norm": 1.000429630279541, "learning_rate": 3e-05, "loss": 0.4281, "step": 3 }, { "epoch": 0.015625, "grad_norm": 0.6437779664993286, "learning_rate": 4e-05, "loss": 0.3714, "step": 4 }, { "epoch": 0.01953125, "grad_norm": 0.8947291374206543, "learning_rate": 5e-05, "loss": 0.3801, "step": 5 }, { "epoch": 0.0234375, "grad_norm": 0.7578244805335999, "learning_rate": 6e-05, "loss": 0.3648, "step": 6 }, { "epoch": 0.02734375, "grad_norm": 0.6235212087631226, "learning_rate": 7e-05, "loss": 0.3909, "step": 7 }, { "epoch": 0.03125, "grad_norm": 0.4007953405380249, "learning_rate": 8e-05, "loss": 0.3141, "step": 8 }, { "epoch": 0.03515625, "grad_norm": 0.3965928554534912, "learning_rate": 9e-05, "loss": 0.2924, "step": 9 }, { "epoch": 0.03515625, "eval_loss": 0.2510845959186554, "eval_runtime": 37.7104, "eval_samples_per_second": 5.728, "eval_steps_per_second": 0.716, "step": 9 }, { "epoch": 0.0390625, "grad_norm": 0.4188036322593689, "learning_rate": 0.0001, "loss": 0.2828, "step": 10 }, { "epoch": 0.04296875, "grad_norm": 0.3323066830635071, "learning_rate": 9.99695413509548e-05, "loss": 0.2415, "step": 11 }, { "epoch": 0.046875, "grad_norm": 0.2657626271247864, "learning_rate": 9.987820251299122e-05, "loss": 0.1928, "step": 12 }, { "epoch": 0.05078125, "grad_norm": 0.2819468080997467, "learning_rate": 9.972609476841367e-05, "loss": 0.1859, "step": 13 }, { "epoch": 0.0546875, "grad_norm": 0.21283726394176483, "learning_rate": 9.951340343707852e-05, "loss": 0.1895, "step": 14 }, { "epoch": 0.05859375, "grad_norm": 0.22380894422531128, "learning_rate": 9.924038765061042e-05, "loss": 0.1606, "step": 15 }, { "epoch": 0.0625, "grad_norm": 0.26086336374282837, "learning_rate": 9.890738003669029e-05, "loss": 0.1618, "step": 16 }, { "epoch": 0.06640625, "grad_norm": 0.17846892774105072, "learning_rate": 9.851478631379982e-05, "loss": 0.1524, "step": 17 }, { "epoch": 0.0703125, "grad_norm": 0.16768787801265717, "learning_rate": 9.806308479691595e-05, "loss": 0.1242, "step": 18 }, { "epoch": 0.0703125, "eval_loss": 0.14291226863861084, "eval_runtime": 37.7214, "eval_samples_per_second": 5.726, "eval_steps_per_second": 0.716, "step": 18 }, { "epoch": 0.07421875, "grad_norm": 0.16563105583190918, "learning_rate": 9.755282581475769e-05, "loss": 0.1247, "step": 19 }, { "epoch": 0.078125, "grad_norm": 0.17445707321166992, "learning_rate": 9.698463103929542e-05, "loss": 0.1217, "step": 20 }, { "epoch": 0.08203125, "grad_norm": 0.16826312243938446, "learning_rate": 9.635919272833938e-05, "loss": 0.118, "step": 21 }, { "epoch": 0.0859375, "grad_norm": 0.14247825741767883, "learning_rate": 9.567727288213005e-05, "loss": 0.1009, "step": 22 }, { "epoch": 0.08984375, "grad_norm": 0.20849590003490448, "learning_rate": 9.493970231495835e-05, "loss": 0.1035, "step": 23 }, { "epoch": 0.09375, "grad_norm": 0.17949005961418152, "learning_rate": 9.414737964294636e-05, "loss": 0.1156, "step": 24 }, { "epoch": 0.09765625, "grad_norm": 0.19471654295921326, "learning_rate": 9.330127018922194e-05, "loss": 0.1187, "step": 25 }, { "epoch": 0.1015625, "grad_norm": 0.18970464169979095, "learning_rate": 9.24024048078213e-05, "loss": 0.1302, "step": 26 }, { "epoch": 0.10546875, "grad_norm": 0.13743029534816742, "learning_rate": 9.145187862775209e-05, "loss": 0.103, "step": 27 }, { "epoch": 0.10546875, "eval_loss": 0.11523790657520294, "eval_runtime": 37.6775, "eval_samples_per_second": 5.733, "eval_steps_per_second": 0.717, "step": 27 }, { "epoch": 0.109375, "grad_norm": 0.16887696087360382, "learning_rate": 9.045084971874738e-05, "loss": 0.0911, "step": 28 }, { "epoch": 0.11328125, "grad_norm": 0.1817857027053833, "learning_rate": 8.940053768033609e-05, "loss": 0.1184, "step": 29 }, { "epoch": 0.1171875, "grad_norm": 0.39407843351364136, "learning_rate": 8.83022221559489e-05, "loss": 0.1714, "step": 30 }, { "epoch": 0.12109375, "grad_norm": 0.14840836822986603, "learning_rate": 8.715724127386972e-05, "loss": 0.0822, "step": 31 }, { "epoch": 0.125, "grad_norm": 0.12886931002140045, "learning_rate": 8.596699001693255e-05, "loss": 0.0924, "step": 32 }, { "epoch": 0.12890625, "grad_norm": 0.131106898188591, "learning_rate": 8.473291852294987e-05, "loss": 0.1126, "step": 33 }, { "epoch": 0.1328125, "grad_norm": 0.20917432010173798, "learning_rate": 8.345653031794292e-05, "loss": 0.1214, "step": 34 }, { "epoch": 0.13671875, "grad_norm": 0.13371144235134125, "learning_rate": 8.213938048432697e-05, "loss": 0.1085, "step": 35 }, { "epoch": 0.140625, "grad_norm": 0.16854634881019592, "learning_rate": 8.07830737662829e-05, "loss": 0.1154, "step": 36 }, { "epoch": 0.140625, "eval_loss": 0.10245885699987411, "eval_runtime": 37.6755, "eval_samples_per_second": 5.733, "eval_steps_per_second": 0.717, "step": 36 }, { "epoch": 0.14453125, "grad_norm": 0.12487249076366425, "learning_rate": 7.938926261462366e-05, "loss": 0.109, "step": 37 }, { "epoch": 0.1484375, "grad_norm": 0.1517653465270996, "learning_rate": 7.795964517353735e-05, "loss": 0.0798, "step": 38 }, { "epoch": 0.15234375, "grad_norm": 0.16995568573474884, "learning_rate": 7.649596321166024e-05, "loss": 0.1015, "step": 39 }, { "epoch": 0.15625, "grad_norm": 0.09856864809989929, "learning_rate": 7.500000000000001e-05, "loss": 0.1026, "step": 40 }, { "epoch": 0.16015625, "grad_norm": 0.11969555914402008, "learning_rate": 7.347357813929454e-05, "loss": 0.099, "step": 41 }, { "epoch": 0.1640625, "grad_norm": 0.1942385733127594, "learning_rate": 7.191855733945387e-05, "loss": 0.069, "step": 42 }, { "epoch": 0.16796875, "grad_norm": 0.12041034549474716, "learning_rate": 7.033683215379002e-05, "loss": 0.1015, "step": 43 }, { "epoch": 0.171875, "grad_norm": 0.1598462611436844, "learning_rate": 6.873032967079561e-05, "loss": 0.0893, "step": 44 }, { "epoch": 0.17578125, "grad_norm": 0.17382952570915222, "learning_rate": 6.710100716628344e-05, "loss": 0.1179, "step": 45 }, { "epoch": 0.17578125, "eval_loss": 0.09511082619428635, "eval_runtime": 37.6196, "eval_samples_per_second": 5.742, "eval_steps_per_second": 0.718, "step": 45 }, { "epoch": 0.1796875, "grad_norm": 0.1521601378917694, "learning_rate": 6.545084971874738e-05, "loss": 0.0992, "step": 46 }, { "epoch": 0.18359375, "grad_norm": 0.18973639607429504, "learning_rate": 6.378186779084995e-05, "loss": 0.0867, "step": 47 }, { "epoch": 0.1875, "grad_norm": 0.1595524698495865, "learning_rate": 6.209609477998338e-05, "loss": 0.1163, "step": 48 }, { "epoch": 0.19140625, "grad_norm": 0.2963557243347168, "learning_rate": 6.0395584540887963e-05, "loss": 0.0805, "step": 49 }, { "epoch": 0.1953125, "grad_norm": 0.09695882350206375, "learning_rate": 5.868240888334653e-05, "loss": 0.0806, "step": 50 } ], "logging_steps": 1, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 5.487081170141184e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }