leixa commited on
Commit
df2a176
·
verified ·
1 Parent(s): 981e361

Training in progress, step 340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8527b967128133645e100c466a08cc181daa69f21ade7b0c7d2c2340369ecee
3
  size 692136856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d25b88e084ebd1782d201ceb195fe6db0730594ab101aeca0c6eef087fe8aa
3
  size 692136856
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a030a746ecbed1a27675003fabd902feca0e845682af282e9619dd3883d2dd16
3
  size 85723732
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2450dc03895aa4a5edc22e7d6710bfb27b0462b4e19005486a1604c4a7f4ceee
3
  size 85723732
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:232340abfd6d132ef8ac9c5ef9374bc62390215c131732dba5d9141001cfc574
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd3ee42edb81b08784cce5af0d531cc06157d25956a649c4a46b5f301140f33b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:542eeb761eff9bd2c88163850a5018d7ed947bdab57ea917e6e376b6cb0c0259
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6999f9aad8d44fbf7db1d80d56ad86630abb8e28a7187e80ed24f8546462146
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.020618556701030927,
5
  "eval_steps": 34,
6
- "global_step": 306,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -801,6 +801,91 @@
801
  "eval_samples_per_second": 14.037,
802
  "eval_steps_per_second": 1.755,
803
  "step": 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804
  }
805
  ],
806
  "logging_steps": 3,
@@ -820,7 +905,7 @@
820
  "attributes": {}
821
  }
822
  },
823
- "total_flos": 4.3031549961673114e+17,
824
  "train_batch_size": 8,
825
  "trial_name": null,
826
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.022909507445589918,
5
  "eval_steps": 34,
6
+ "global_step": 340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
801
  "eval_samples_per_second": 14.037,
802
  "eval_steps_per_second": 1.755,
803
  "step": 306
804
+ },
805
+ {
806
+ "epoch": 0.020820699413786132,
807
+ "grad_norm": 0.8132256269454956,
808
+ "learning_rate": 6.421379363065142e-06,
809
+ "loss": 0.8069,
810
+ "step": 309
811
+ },
812
+ {
813
+ "epoch": 0.02102284212654134,
814
+ "grad_norm": 0.7123071551322937,
815
+ "learning_rate": 6.022586521156715e-06,
816
+ "loss": 0.7624,
817
+ "step": 312
818
+ },
819
+ {
820
+ "epoch": 0.021224984839296544,
821
+ "grad_norm": 0.6497386693954468,
822
+ "learning_rate": 5.634875954308638e-06,
823
+ "loss": 0.7902,
824
+ "step": 315
825
+ },
826
+ {
827
+ "epoch": 0.02142712755205175,
828
+ "grad_norm": 0.6508458256721497,
829
+ "learning_rate": 5.258474074573877e-06,
830
+ "loss": 0.8201,
831
+ "step": 318
832
+ },
833
+ {
834
+ "epoch": 0.021629270264806953,
835
+ "grad_norm": 0.9117996096611023,
836
+ "learning_rate": 4.893600690050579e-06,
837
+ "loss": 0.8328,
838
+ "step": 321
839
+ },
840
+ {
841
+ "epoch": 0.021831412977562158,
842
+ "grad_norm": 0.693020761013031,
843
+ "learning_rate": 4.540468876520323e-06,
844
+ "loss": 0.7926,
845
+ "step": 324
846
+ },
847
+ {
848
+ "epoch": 0.022033555690317366,
849
+ "grad_norm": 0.6869902014732361,
850
+ "learning_rate": 4.199284853017896e-06,
851
+ "loss": 0.805,
852
+ "step": 327
853
+ },
854
+ {
855
+ "epoch": 0.02223569840307257,
856
+ "grad_norm": 0.7282816171646118,
857
+ "learning_rate": 3.8702478614051355e-06,
858
+ "loss": 0.8067,
859
+ "step": 330
860
+ },
861
+ {
862
+ "epoch": 0.022437841115827774,
863
+ "grad_norm": 0.6699129343032837,
864
+ "learning_rate": 3.5535500500193357e-06,
865
+ "loss": 0.8041,
866
+ "step": 333
867
+ },
868
+ {
869
+ "epoch": 0.02263998382858298,
870
+ "grad_norm": 0.6829515695571899,
871
+ "learning_rate": 3.249376361464021e-06,
872
+ "loss": 0.8149,
873
+ "step": 336
874
+ },
875
+ {
876
+ "epoch": 0.022842126541338183,
877
+ "grad_norm": 0.7807720303535461,
878
+ "learning_rate": 2.957904424607652e-06,
879
+ "loss": 0.825,
880
+ "step": 339
881
+ },
882
+ {
883
+ "epoch": 0.022909507445589918,
884
+ "eval_loss": 0.8090208768844604,
885
+ "eval_runtime": 1781.7524,
886
+ "eval_samples_per_second": 14.028,
887
+ "eval_steps_per_second": 1.754,
888
+ "step": 340
889
  }
890
  ],
891
  "logging_steps": 3,
 
905
  "attributes": {}
906
  }
907
  },
908
+ "total_flos": 4.7812833290747904e+17,
909
  "train_batch_size": 8,
910
  "trial_name": null,
911
  "trial_params": null