{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.033819488480236734, "eval_steps": 50, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0001690974424011837, "eval_loss": NaN, "eval_runtime": 224.3391, "eval_samples_per_second": 11.099, "eval_steps_per_second": 5.55, "step": 1 }, { "epoch": 0.0016909744240118367, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 100.9861, "step": 10 }, { "epoch": 0.0033819488480236735, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 100.8249, "step": 20 }, { "epoch": 0.0050729232720355105, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 16.6635, "step": 30 }, { "epoch": 0.006763897696047347, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 4.2478, "step": 40 }, { "epoch": 0.008454872120059184, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 9.0062, "step": 50 }, { "epoch": 0.008454872120059184, "eval_loss": NaN, "eval_runtime": 223.9149, "eval_samples_per_second": 11.12, "eval_steps_per_second": 5.56, "step": 50 }, { "epoch": 0.010145846544071021, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 0.3404, "step": 60 }, { "epoch": 0.011836820968082858, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 4.908, "step": 70 }, { "epoch": 0.013527795392094694, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 62.4184, "step": 80 }, { "epoch": 0.015218769816106531, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 75.0879, "step": 90 }, { "epoch": 0.016909744240118367, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 46.0364, "step": 100 }, { "epoch": 0.016909744240118367, "eval_loss": NaN, "eval_runtime": 223.9595, "eval_samples_per_second": 11.118, "eval_steps_per_second": 5.559, "step": 100 }, { "epoch": 0.018600718664130204, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 166.2225, "step": 110 }, { "epoch": 0.020291693088142042, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 110.6994, "step": 120 }, { "epoch": 0.02198266751215388, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 5.273, "step": 130 }, { "epoch": 0.023673641936165717, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 203.2636, "step": 140 }, { "epoch": 0.025364616360177554, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 148.0058, "step": 150 }, { "epoch": 0.025364616360177554, "eval_loss": NaN, "eval_runtime": 224.0057, "eval_samples_per_second": 11.116, "eval_steps_per_second": 5.558, "step": 150 }, { "epoch": 0.027055590784189388, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 67.0935, "step": 160 }, { "epoch": 0.028746565208201225, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 0.2994, "step": 170 }, { "epoch": 0.030437539632213063, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 79.1237, "step": 180 }, { "epoch": 0.0321285140562249, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 0.7457, "step": 190 }, { "epoch": 0.033819488480236734, "grad_norm": NaN, "learning_rate": 0.0002, "loss": 72.2216, "step": 200 }, { "epoch": 0.033819488480236734, "eval_loss": NaN, "eval_runtime": 224.0523, "eval_samples_per_second": 11.113, "eval_steps_per_second": 5.557, "step": 200 } ], "logging_steps": 10, "max_steps": 200, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.268523024777216e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }