eddysang commited on
Commit
1500f09
·
verified ·
1 Parent(s): 99a5607

Training in progress, step 99, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c582e34b740c2ab84777b72c509a0e97c269aac69112da8e8698713a54feeae8
3
  size 319876032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66bb7c33619160a5e48850e0b483d5a81d1997be023b014dbf19adda41b6beca
3
  size 319876032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5ccefb6bcb213e033c6d906dfab3f18aaf2958fcc017bb013b20f4d401e0fe3
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae0e93e739b1f1ad537a3aac7e3afc2fbd66a15c103bba4b06e45f4891d59c55
3
  size 640009682
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5134bfc96a8a009684ec48127ff4fc03b2f2e61a0fbe126a34bef4ab7025231
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29318a353e9804a8c1ab05eea178ef167bea993ccfea56e96ee92575c188598
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:429c9f0a4ff1909f8cc1cafc3bf6eb6ff29e4592139b5344f1aee9b75eae7379
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eef4c60eef56c59d148e4191bea64e9a3bf517b29f5a413c536dcdc8e61edf1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.936785474108944,
5
  "eval_steps": 50,
6
- "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -653,6 +653,69 @@
653
  "learning_rate": 5.709035061653494e-06,
654
  "loss": 23.4895,
655
  "step": 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656
  }
657
  ],
658
  "logging_steps": 1,
@@ -672,7 +735,7 @@
672
  "attributes": {}
673
  }
674
  },
675
- "total_flos": 4.7332406838951936e+17,
676
  "train_batch_size": 2,
677
  "trial_name": null,
678
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.1304640215198387,
5
  "eval_steps": 50,
6
+ "global_step": 99,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
653
  "learning_rate": 5.709035061653494e-06,
654
  "loss": 23.4895,
655
  "step": 90
656
+ },
657
+ {
658
+ "epoch": 1.9583053127101546,
659
+ "grad_norm": 18.024465560913086,
660
+ "learning_rate": 4.635649805813696e-06,
661
+ "loss": 26.1946,
662
+ "step": 91
663
+ },
664
+ {
665
+ "epoch": 1.9798251513113652,
666
+ "grad_norm": 25.951337814331055,
667
+ "learning_rate": 3.670761277863485e-06,
668
+ "loss": 24.1037,
669
+ "step": 92
670
+ },
671
+ {
672
+ "epoch": 2.0013449899125755,
673
+ "grad_norm": 27.450637817382812,
674
+ "learning_rate": 2.815857265976462e-06,
675
+ "loss": 21.865,
676
+ "step": 93
677
+ },
678
+ {
679
+ "epoch": 2.0228648285137862,
680
+ "grad_norm": 17.74875259399414,
681
+ "learning_rate": 2.072255970174258e-06,
682
+ "loss": 19.4987,
683
+ "step": 94
684
+ },
685
+ {
686
+ "epoch": 2.0443846671149966,
687
+ "grad_norm": 20.335710525512695,
688
+ "learning_rate": 1.4411039697577175e-06,
689
+ "loss": 15.9349,
690
+ "step": 95
691
+ },
692
+ {
693
+ "epoch": 2.0659045057162073,
694
+ "grad_norm": 19.2858829498291,
695
+ "learning_rate": 9.233744553646754e-07,
696
+ "loss": 18.5274,
697
+ "step": 96
698
+ },
699
+ {
700
+ "epoch": 2.0874243443174176,
701
+ "grad_norm": 16.66928482055664,
702
+ "learning_rate": 5.198657283805279e-07,
703
+ "loss": 16.2938,
704
+ "step": 97
705
+ },
706
+ {
707
+ "epoch": 2.108944182918628,
708
+ "grad_norm": 16.087203979492188,
709
+ "learning_rate": 2.311999700154027e-07,
710
+ "loss": 18.4087,
711
+ "step": 98
712
+ },
713
+ {
714
+ "epoch": 2.1304640215198387,
715
+ "grad_norm": 17.201932907104492,
716
+ "learning_rate": 5.7822281945782424e-08,
717
+ "loss": 23.99,
718
+ "step": 99
719
  }
720
  ],
721
  "logging_steps": 1,
 
735
  "attributes": {}
736
  }
737
  },
738
+ "total_flos": 5.206564752284713e+17,
739
  "train_batch_size": 2,
740
  "trial_name": null,
741
  "trial_params": null