|
{ |
|
"best_metric": 1.1528342962265015, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-500", |
|
"epoch": 0.008418783990840364, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.6837567981680726e-05, |
|
"eval_loss": 2.296457529067993, |
|
"eval_runtime": 1687.1865, |
|
"eval_samples_per_second": 14.822, |
|
"eval_steps_per_second": 3.706, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016837567981680727, |
|
"grad_norm": 1.795365571975708, |
|
"learning_rate": 4.2800000000000004e-05, |
|
"loss": 1.902, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00033675135963361454, |
|
"grad_norm": 2.1661720275878906, |
|
"learning_rate": 8.560000000000001e-05, |
|
"loss": 1.3571, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0005051270394504217, |
|
"grad_norm": 1.6164090633392334, |
|
"learning_rate": 0.0001284, |
|
"loss": 1.2606, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0006735027192672291, |
|
"grad_norm": 2.0214858055114746, |
|
"learning_rate": 0.00017120000000000001, |
|
"loss": 1.3816, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0008418783990840363, |
|
"grad_norm": 3.338027238845825, |
|
"learning_rate": 0.000214, |
|
"loss": 1.4541, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0008418783990840363, |
|
"eval_loss": 1.289824366569519, |
|
"eval_runtime": 1685.1162, |
|
"eval_samples_per_second": 14.84, |
|
"eval_steps_per_second": 3.71, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0010102540789008435, |
|
"grad_norm": 3.690471887588501, |
|
"learning_rate": 0.00021373935337780118, |
|
"loss": 1.1769, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0011786297587176508, |
|
"grad_norm": 1.5708606243133545, |
|
"learning_rate": 0.00021295868335534802, |
|
"loss": 1.0838, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0013470054385344582, |
|
"grad_norm": 1.608247995376587, |
|
"learning_rate": 0.0002116617932785172, |
|
"loss": 1.1606, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0015153811183512653, |
|
"grad_norm": 2.022498607635498, |
|
"learning_rate": 0.00020985500146540012, |
|
"loss": 1.2215, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0016837567981680727, |
|
"grad_norm": 4.2700910568237305, |
|
"learning_rate": 0.0002075471104240922, |
|
"loss": 1.4665, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0016837567981680727, |
|
"eval_loss": 1.3164697885513306, |
|
"eval_runtime": 1686.7665, |
|
"eval_samples_per_second": 14.825, |
|
"eval_steps_per_second": 3.707, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0018521324779848798, |
|
"grad_norm": 1.7091002464294434, |
|
"learning_rate": 0.00020474936396775828, |
|
"loss": 1.293, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.002020508157801687, |
|
"grad_norm": 1.684004783630371, |
|
"learning_rate": 0.00020147539243590517, |
|
"loss": 1.0287, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0021888838376184945, |
|
"grad_norm": 1.815836787223816, |
|
"learning_rate": 0.00019774114628873756, |
|
"loss": 1.129, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0023572595174353016, |
|
"grad_norm": 1.905654788017273, |
|
"learning_rate": 0.00019356481839811937, |
|
"loss": 1.1745, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0025256351972521088, |
|
"grad_norm": 3.854990005493164, |
|
"learning_rate": 0.00018896675541373064, |
|
"loss": 1.538, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0025256351972521088, |
|
"eval_loss": 1.274539589881897, |
|
"eval_runtime": 1688.0862, |
|
"eval_samples_per_second": 14.814, |
|
"eval_steps_per_second": 3.704, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0026940108770689163, |
|
"grad_norm": 1.3403651714324951, |
|
"learning_rate": 0.00018396935863623567, |
|
"loss": 1.2173, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0028623865568857235, |
|
"grad_norm": 1.405836820602417, |
|
"learning_rate": 0.00017859697488039784, |
|
"loss": 1.0537, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0030307622367025306, |
|
"grad_norm": 1.8490408658981323, |
|
"learning_rate": 0.00017287577785984542, |
|
"loss": 1.2706, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.0031991379165193378, |
|
"grad_norm": 2.120272397994995, |
|
"learning_rate": 0.0001668336406713699, |
|
"loss": 1.3493, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.0033675135963361453, |
|
"grad_norm": 2.9566562175750732, |
|
"learning_rate": 0.0001605, |
|
"loss": 1.4803, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0033675135963361453, |
|
"eval_loss": 1.2688231468200684, |
|
"eval_runtime": 1688.2003, |
|
"eval_samples_per_second": 14.813, |
|
"eval_steps_per_second": 3.703, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0035358892761529525, |
|
"grad_norm": 1.4451912641525269, |
|
"learning_rate": 0.00015390571270643128, |
|
"loss": 1.0993, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.0037042649559697596, |
|
"grad_norm": 1.5819648504257202, |
|
"learning_rate": 0.0001470829054955026, |
|
"loss": 1.113, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.003872640635786567, |
|
"grad_norm": 2.1583080291748047, |
|
"learning_rate": 0.00014006481839811937, |
|
"loss": 1.2436, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.004041016315603374, |
|
"grad_norm": 2.5022051334381104, |
|
"learning_rate": 0.00013288564282916442, |
|
"loss": 1.3693, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.004209391995420182, |
|
"grad_norm": 4.059680461883545, |
|
"learning_rate": 0.00012558035501036158, |
|
"loss": 1.63, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.004209391995420182, |
|
"eval_loss": 1.2357275485992432, |
|
"eval_runtime": 1686.4416, |
|
"eval_samples_per_second": 14.828, |
|
"eval_steps_per_second": 3.707, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.004377767675236989, |
|
"grad_norm": 1.4801981449127197, |
|
"learning_rate": 0.00011818454556963892, |
|
"loss": 1.0041, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.004546143355053796, |
|
"grad_norm": 1.3664003610610962, |
|
"learning_rate": 0.00011073424614716762, |
|
"loss": 1.0114, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.004714519034870603, |
|
"grad_norm": 1.5936243534088135, |
|
"learning_rate": 0.00010326575385283242, |
|
"loss": 1.1728, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.00488289471468741, |
|
"grad_norm": 1.537528157234192, |
|
"learning_rate": 9.58154544303611e-05, |
|
"loss": 1.3018, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.0050512703945042176, |
|
"grad_norm": 2.965649127960205, |
|
"learning_rate": 8.841964498963846e-05, |
|
"loss": 1.5497, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.0050512703945042176, |
|
"eval_loss": 1.223321795463562, |
|
"eval_runtime": 1686.5734, |
|
"eval_samples_per_second": 14.827, |
|
"eval_steps_per_second": 3.707, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.005219646074321025, |
|
"grad_norm": 1.483461618423462, |
|
"learning_rate": 8.111435717083556e-05, |
|
"loss": 1.1607, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.005388021754137833, |
|
"grad_norm": 1.2739410400390625, |
|
"learning_rate": 7.393518160188063e-05, |
|
"loss": 1.0222, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.00555639743395464, |
|
"grad_norm": 1.644224762916565, |
|
"learning_rate": 6.69170945044974e-05, |
|
"loss": 1.1464, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.005724773113771447, |
|
"grad_norm": 2.106903076171875, |
|
"learning_rate": 6.009428729356871e-05, |
|
"loss": 1.2647, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.005893148793588254, |
|
"grad_norm": 2.470841646194458, |
|
"learning_rate": 5.3500000000000026e-05, |
|
"loss": 1.5781, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.005893148793588254, |
|
"eval_loss": 1.1787070035934448, |
|
"eval_runtime": 1687.9624, |
|
"eval_samples_per_second": 14.815, |
|
"eval_steps_per_second": 3.704, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.006061524473405061, |
|
"grad_norm": 1.565023422241211, |
|
"learning_rate": 4.7166359328630106e-05, |
|
"loss": 1.0986, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.006229900153221868, |
|
"grad_norm": 1.3808709383010864, |
|
"learning_rate": 4.112422214015456e-05, |
|
"loss": 1.078, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.0063982758330386755, |
|
"grad_norm": 1.5624812841415405, |
|
"learning_rate": 3.5403025119602206e-05, |
|
"loss": 1.153, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.0065666515128554835, |
|
"grad_norm": 1.8611598014831543, |
|
"learning_rate": 3.0030641363764346e-05, |
|
"loss": 1.2869, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.006735027192672291, |
|
"grad_norm": 1.9800591468811035, |
|
"learning_rate": 2.5033244586269365e-05, |
|
"loss": 1.3724, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.006735027192672291, |
|
"eval_loss": 1.1627603769302368, |
|
"eval_runtime": 1687.9689, |
|
"eval_samples_per_second": 14.815, |
|
"eval_steps_per_second": 3.704, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.006903402872489098, |
|
"grad_norm": 1.244630217552185, |
|
"learning_rate": 2.0435181601880635e-05, |
|
"loss": 1.0196, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.007071778552305905, |
|
"grad_norm": 1.495737075805664, |
|
"learning_rate": 1.625885371126242e-05, |
|
"loss": 1.0804, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.007240154232122712, |
|
"grad_norm": 1.6196967363357544, |
|
"learning_rate": 1.2524607564094813e-05, |
|
"loss": 1.1342, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.007408529911939519, |
|
"grad_norm": 1.6709822416305542, |
|
"learning_rate": 9.250636032241695e-06, |
|
"loss": 1.2707, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.007576905591756326, |
|
"grad_norm": 2.335101366043091, |
|
"learning_rate": 6.45288957590781e-06, |
|
"loss": 1.346, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.007576905591756326, |
|
"eval_loss": 1.1538912057876587, |
|
"eval_runtime": 1694.9946, |
|
"eval_samples_per_second": 14.753, |
|
"eval_steps_per_second": 3.689, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.007745281271573134, |
|
"grad_norm": 1.5919936895370483, |
|
"learning_rate": 4.144998534599878e-06, |
|
"loss": 1.0085, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.00791365695138994, |
|
"grad_norm": 1.8804110288619995, |
|
"learning_rate": 2.3382067214827915e-06, |
|
"loss": 1.0371, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.008082032631206748, |
|
"grad_norm": 1.4158278703689575, |
|
"learning_rate": 1.0413166446519713e-06, |
|
"loss": 1.1094, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.008250408311023557, |
|
"grad_norm": 1.5951461791992188, |
|
"learning_rate": 2.6064662219881083e-07, |
|
"loss": 1.2787, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.008418783990840364, |
|
"grad_norm": 2.344301223754883, |
|
"learning_rate": 0.0, |
|
"loss": 1.2756, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.008418783990840364, |
|
"eval_loss": 1.1528342962265015, |
|
"eval_runtime": 1688.0738, |
|
"eval_samples_per_second": 14.814, |
|
"eval_steps_per_second": 3.704, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.021628442036142e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|