eddysang commited on
Commit
892f183
·
verified ·
1 Parent(s): cc20e98

Training in progress, step 102, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f220adf6445faa3084119deeca55d2d287f74f40538df7e9218a2aeee61a809a
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:409506c2e743c0962472a80fbb6dcaf2cbc9c90ce5fb8329688b39a5b3cfa456
3
  size 640009682
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ee335a62849ee2439378e9094b922b6e35c167d78196a6c3c8df14be4850d15
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609ac74381de893b35193cf51e9a6fce221b22031433fbd3b97539f9bac6436f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b383ad1d61ff4e9bbd86bd276c043e414782d2bb7de68ada3e289a786eb79681
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70670c442607259270e13afbef3aac28e38a58ddad6998414f76ed43ab7f41d4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0934001785591649,
5
  "eval_steps": 50,
6
- "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -618,6 +618,133 @@
618
  "learning_rate": 0.00010669636963055245,
619
  "loss": 0.0,
620
  "step": 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
  }
622
  ],
623
  "logging_steps": 1,
@@ -637,7 +764,7 @@
637
  "attributes": {}
638
  }
639
  },
640
- "total_flos": 4.978120149832827e+17,
641
  "train_batch_size": 2,
642
  "trial_name": null,
643
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.11208021427099787,
5
  "eval_steps": 50,
6
+ "global_step": 102,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
618
  "learning_rate": 0.00010669636963055245,
619
  "loss": 0.0,
620
  "step": 85
621
+ },
622
+ {
623
+ "epoch": 0.09449900418927272,
624
+ "grad_norm": NaN,
625
+ "learning_rate": 0.00010550524823068502,
626
+ "loss": 0.0,
627
+ "step": 86
628
+ },
629
+ {
630
+ "epoch": 0.09559782981938053,
631
+ "grad_norm": NaN,
632
+ "learning_rate": 0.00010430483463669551,
633
+ "loss": 0.0,
634
+ "step": 87
635
+ },
636
+ {
637
+ "epoch": 0.09669665544948836,
638
+ "grad_norm": NaN,
639
+ "learning_rate": 0.0001030954945061934,
640
+ "loss": 0.0,
641
+ "step": 88
642
+ },
643
+ {
644
+ "epoch": 0.09779548107959618,
645
+ "grad_norm": NaN,
646
+ "learning_rate": 0.0001018775962158975,
647
+ "loss": 0.0,
648
+ "step": 89
649
+ },
650
+ {
651
+ "epoch": 0.098894306709704,
652
+ "grad_norm": NaN,
653
+ "learning_rate": 0.00010065151074942516,
654
+ "loss": 0.0,
655
+ "step": 90
656
+ },
657
+ {
658
+ "epoch": 0.09999313233981183,
659
+ "grad_norm": NaN,
660
+ "learning_rate": 9.941761158428674e-05,
661
+ "loss": 0.0,
662
+ "step": 91
663
+ },
664
+ {
665
+ "epoch": 0.10109195796991965,
666
+ "grad_norm": NaN,
667
+ "learning_rate": 9.817627457812105e-05,
668
+ "loss": 0.0,
669
+ "step": 92
670
+ },
671
+ {
672
+ "epoch": 0.10219078360002747,
673
+ "grad_norm": NaN,
674
+ "learning_rate": 9.692787785420525e-05,
675
+ "loss": 0.0,
676
+ "step": 93
677
+ },
678
+ {
679
+ "epoch": 0.10328960923013529,
680
+ "grad_norm": NaN,
681
+ "learning_rate": 9.567280168627493e-05,
682
+ "loss": 0.0,
683
+ "step": 94
684
+ },
685
+ {
686
+ "epoch": 0.10438843486024312,
687
+ "grad_norm": NaN,
688
+ "learning_rate": 9.441142838268905e-05,
689
+ "loss": 0.0,
690
+ "step": 95
691
+ },
692
+ {
693
+ "epoch": 0.10548726049035094,
694
+ "grad_norm": NaN,
695
+ "learning_rate": 9.314414216997507e-05,
696
+ "loss": 0.0,
697
+ "step": 96
698
+ },
699
+ {
700
+ "epoch": 0.10658608612045876,
701
+ "grad_norm": NaN,
702
+ "learning_rate": 9.187132907578987e-05,
703
+ "loss": 0.0,
704
+ "step": 97
705
+ },
706
+ {
707
+ "epoch": 0.10768491175056658,
708
+ "grad_norm": NaN,
709
+ "learning_rate": 9.059337681133192e-05,
710
+ "loss": 0.0,
711
+ "step": 98
712
+ },
713
+ {
714
+ "epoch": 0.1087837373806744,
715
+ "grad_norm": NaN,
716
+ "learning_rate": 8.931067465324085e-05,
717
+ "loss": 0.0,
718
+ "step": 99
719
+ },
720
+ {
721
+ "epoch": 0.10988256301078222,
722
+ "grad_norm": NaN,
723
+ "learning_rate": 8.802361332501978e-05,
724
+ "loss": 0.0,
725
+ "step": 100
726
+ },
727
+ {
728
+ "epoch": 0.10988256301078222,
729
+ "eval_loss": NaN,
730
+ "eval_runtime": 637.0136,
731
+ "eval_samples_per_second": 4.813,
732
+ "eval_steps_per_second": 2.407,
733
+ "step": 100
734
+ },
735
+ {
736
+ "epoch": 0.11098138864089005,
737
+ "grad_norm": NaN,
738
+ "learning_rate": 8.673258487801731e-05,
739
+ "loss": 0.0,
740
+ "step": 101
741
+ },
742
+ {
743
+ "epoch": 0.11208021427099787,
744
+ "grad_norm": NaN,
745
+ "learning_rate": 8.54379825720049e-05,
746
+ "loss": 0.0,
747
+ "step": 102
748
  }
749
  ],
750
  "logging_steps": 1,
 
764
  "attributes": {}
765
  }
766
  },
767
+ "total_flos": 5.979003336114831e+17,
768
  "train_batch_size": 2,
769
  "trial_name": null,
770
  "trial_params": null