ErrorAI commited on
Commit
c0d7144
·
verified ·
1 Parent(s): 2fa0284

Training in progress, step 159, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d2592144216914132e86bd5a601f8e758f85749ba90940e5be447bc46137bc2
3
  size 36981072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4035eb3a15d3e5448fa28916cd6f49e43837caaff57d6d8f42386f32521b896
3
  size 36981072
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cb4b95fed23122522581814dfc54cd9ba36a4a88b61c364a46622c0ffed4d78
3
  size 19859140
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3e64a11f6ea47c0343abd2040f7093d7c4bfe06345ca4bb64d6ad732f03072
3
  size 19859140
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb7ee2ca041eb973281be8bb6e98884779e97cdbdae58cb160ddd1e05697d7a6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:299bec1be8e3127922a76464b06da5f7e4edb847830ce04c77eb12728fc77775
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c620d55c80c663393c9d19b56755001f0197cc6593e37fbd3941c05d6429b0d4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c6151dd7288eb98bf063d6d1d782a1bec4b1d6191cf4071752734d494723af6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.507177033492823,
5
  "eval_steps": 500,
6
- "global_step": 106,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -749,6 +749,377 @@
749
  "learning_rate": 5.0769967764450345e-05,
750
  "loss": 1.4559,
751
  "step": 106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752
  }
753
  ],
754
  "logging_steps": 1,
@@ -768,7 +1139,7 @@
768
  "attributes": {}
769
  }
770
  },
771
- "total_flos": 1.4139404071206912e+16,
772
  "train_batch_size": 4,
773
  "trial_name": null,
774
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7607655502392344,
5
  "eval_steps": 500,
6
+ "global_step": 159,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
749
  "learning_rate": 5.0769967764450345e-05,
750
  "loss": 1.4559,
751
  "step": 106
752
+ },
753
+ {
754
+ "epoch": 0.5119617224880383,
755
+ "grad_norm": 0.3313244581222534,
756
+ "learning_rate": 5e-05,
757
+ "loss": 1.4098,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 0.5167464114832536,
762
+ "grad_norm": 0.2960905432701111,
763
+ "learning_rate": 4.9230032235549667e-05,
764
+ "loss": 1.5134,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 0.5215311004784688,
769
+ "grad_norm": 0.32173770666122437,
770
+ "learning_rate": 4.8460247072191496e-05,
771
+ "loss": 1.5187,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 0.5263157894736842,
776
+ "grad_norm": 0.3100273311138153,
777
+ "learning_rate": 4.7690827067713035e-05,
778
+ "loss": 1.4619,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 0.5311004784688995,
783
+ "grad_norm": 0.31192484498023987,
784
+ "learning_rate": 4.692195469330286e-05,
785
+ "loss": 1.5889,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 0.5358851674641149,
790
+ "grad_norm": 0.3112650215625763,
791
+ "learning_rate": 4.6153812290276813e-05,
792
+ "loss": 1.4634,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 0.5406698564593302,
797
+ "grad_norm": 0.31642478704452515,
798
+ "learning_rate": 4.5386582026834906e-05,
799
+ "loss": 1.6003,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 0.5454545454545454,
804
+ "grad_norm": 0.30912068486213684,
805
+ "learning_rate": 4.462044585485944e-05,
806
+ "loss": 1.5149,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 0.5502392344497608,
811
+ "grad_norm": 0.3080631196498871,
812
+ "learning_rate": 4.38555854667643e-05,
813
+ "loss": 1.3925,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 0.5550239234449761,
818
+ "grad_norm": 0.32206517457962036,
819
+ "learning_rate": 4.30921822524059e-05,
820
+ "loss": 1.549,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 0.5598086124401914,
825
+ "grad_norm": 0.31533685326576233,
826
+ "learning_rate": 4.233041725606572e-05,
827
+ "loss": 1.4718,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 0.5645933014354066,
832
+ "grad_norm": 0.3389468491077423,
833
+ "learning_rate": 4.157047113351504e-05,
834
+ "loss": 1.4761,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 0.569377990430622,
839
+ "grad_norm": 0.33409473299980164,
840
+ "learning_rate": 4.0812524109171476e-05,
841
+ "loss": 1.3456,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 0.5741626794258373,
846
+ "grad_norm": 0.345503568649292,
847
+ "learning_rate": 4.0056755933358184e-05,
848
+ "loss": 1.6427,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 0.5789473684210527,
853
+ "grad_norm": 0.34861519932746887,
854
+ "learning_rate": 3.930334583967514e-05,
855
+ "loss": 1.6905,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 0.583732057416268,
860
+ "grad_norm": 0.36137065291404724,
861
+ "learning_rate": 3.855247250249331e-05,
862
+ "loss": 1.4516,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 0.5885167464114832,
867
+ "grad_norm": 0.375118225812912,
868
+ "learning_rate": 3.780431399458114e-05,
869
+ "loss": 1.7034,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 0.5933014354066986,
874
+ "grad_norm": 0.3760150074958801,
875
+ "learning_rate": 3.705904774487396e-05,
876
+ "loss": 1.5659,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 0.5980861244019139,
881
+ "grad_norm": 0.3561256229877472,
882
+ "learning_rate": 3.631685049639586e-05,
883
+ "loss": 1.6253,
884
+ "step": 125
885
+ },
886
+ {
887
+ "epoch": 0.6028708133971292,
888
+ "grad_norm": 0.392549604177475,
889
+ "learning_rate": 3.557789826434439e-05,
890
+ "loss": 1.6744,
891
+ "step": 126
892
+ },
893
+ {
894
+ "epoch": 0.6076555023923444,
895
+ "grad_norm": 0.3839954733848572,
896
+ "learning_rate": 3.484236629434783e-05,
897
+ "loss": 1.5695,
898
+ "step": 127
899
+ },
900
+ {
901
+ "epoch": 0.6124401913875598,
902
+ "grad_norm": 0.39694321155548096,
903
+ "learning_rate": 3.411042902090492e-05,
904
+ "loss": 1.5518,
905
+ "step": 128
906
+ },
907
+ {
908
+ "epoch": 0.6172248803827751,
909
+ "grad_norm": 0.39925724267959595,
910
+ "learning_rate": 3.338226002601703e-05,
911
+ "loss": 1.723,
912
+ "step": 129
913
+ },
914
+ {
915
+ "epoch": 0.6220095693779905,
916
+ "grad_norm": 0.45537644624710083,
917
+ "learning_rate": 3.265803199802237e-05,
918
+ "loss": 1.8658,
919
+ "step": 130
920
+ },
921
+ {
922
+ "epoch": 0.6267942583732058,
923
+ "grad_norm": 0.4865153729915619,
924
+ "learning_rate": 3.1937916690642356e-05,
925
+ "loss": 1.9033,
926
+ "step": 131
927
+ },
928
+ {
929
+ "epoch": 0.631578947368421,
930
+ "grad_norm": 0.4416426718235016,
931
+ "learning_rate": 3.1222084882249375e-05,
932
+ "loss": 1.6994,
933
+ "step": 132
934
+ },
935
+ {
936
+ "epoch": 0.6363636363636364,
937
+ "grad_norm": 0.4817049503326416,
938
+ "learning_rate": 3.0510706335366035e-05,
939
+ "loss": 1.8274,
940
+ "step": 133
941
+ },
942
+ {
943
+ "epoch": 0.6411483253588517,
944
+ "grad_norm": 0.4855473041534424,
945
+ "learning_rate": 2.980394975640526e-05,
946
+ "loss": 1.7856,
947
+ "step": 134
948
+ },
949
+ {
950
+ "epoch": 0.645933014354067,
951
+ "grad_norm": 0.5136681199073792,
952
+ "learning_rate": 2.910198275566085e-05,
953
+ "loss": 1.7244,
954
+ "step": 135
955
+ },
956
+ {
957
+ "epoch": 0.6507177033492823,
958
+ "grad_norm": 0.5696388483047485,
959
+ "learning_rate": 2.8404971807557957e-05,
960
+ "loss": 1.6867,
961
+ "step": 136
962
+ },
963
+ {
964
+ "epoch": 0.6555023923444976,
965
+ "grad_norm": 0.5509561896324158,
966
+ "learning_rate": 2.771308221117309e-05,
967
+ "loss": 1.9106,
968
+ "step": 137
969
+ },
970
+ {
971
+ "epoch": 0.6602870813397129,
972
+ "grad_norm": 0.6965003609657288,
973
+ "learning_rate": 2.7026478051032623e-05,
974
+ "loss": 2.0887,
975
+ "step": 138
976
+ },
977
+ {
978
+ "epoch": 0.6650717703349283,
979
+ "grad_norm": 0.6352129578590393,
980
+ "learning_rate": 2.6345322158199503e-05,
981
+ "loss": 1.8677,
982
+ "step": 139
983
+ },
984
+ {
985
+ "epoch": 0.6698564593301436,
986
+ "grad_norm": 0.7062574625015259,
987
+ "learning_rate": 2.5669776071657192e-05,
988
+ "loss": 1.94,
989
+ "step": 140
990
+ },
991
+ {
992
+ "epoch": 0.6746411483253588,
993
+ "grad_norm": 0.6748818159103394,
994
+ "learning_rate": 2.500000000000001e-05,
995
+ "loss": 1.7784,
996
+ "step": 141
997
+ },
998
+ {
999
+ "epoch": 0.6794258373205742,
1000
+ "grad_norm": 0.9134660959243774,
1001
+ "learning_rate": 2.4336152783438982e-05,
1002
+ "loss": 2.0779,
1003
+ "step": 142
1004
+ },
1005
+ {
1006
+ "epoch": 0.6842105263157895,
1007
+ "grad_norm": 0.8519887924194336,
1008
+ "learning_rate": 2.3678391856132204e-05,
1009
+ "loss": 2.0711,
1010
+ "step": 143
1011
+ },
1012
+ {
1013
+ "epoch": 0.6889952153110048,
1014
+ "grad_norm": 0.8988573551177979,
1015
+ "learning_rate": 2.302687320884876e-05,
1016
+ "loss": 1.9372,
1017
+ "step": 144
1018
+ },
1019
+ {
1020
+ "epoch": 0.69377990430622,
1021
+ "grad_norm": 0.9712769389152527,
1022
+ "learning_rate": 2.238175135197471e-05,
1023
+ "loss": 2.0032,
1024
+ "step": 145
1025
+ },
1026
+ {
1027
+ "epoch": 0.6985645933014354,
1028
+ "grad_norm": 1.3098163604736328,
1029
+ "learning_rate": 2.1743179278870407e-05,
1030
+ "loss": 2.4697,
1031
+ "step": 146
1032
+ },
1033
+ {
1034
+ "epoch": 0.7033492822966507,
1035
+ "grad_norm": 1.3078206777572632,
1036
+ "learning_rate": 2.1111308429587444e-05,
1037
+ "loss": 1.8509,
1038
+ "step": 147
1039
+ },
1040
+ {
1041
+ "epoch": 0.7081339712918661,
1042
+ "grad_norm": 1.5340124368667603,
1043
+ "learning_rate": 2.0486288654954028e-05,
1044
+ "loss": 1.687,
1045
+ "step": 148
1046
+ },
1047
+ {
1048
+ "epoch": 0.7129186602870813,
1049
+ "grad_norm": 1.5090495347976685,
1050
+ "learning_rate": 1.9868268181037185e-05,
1051
+ "loss": 1.4265,
1052
+ "step": 149
1053
+ },
1054
+ {
1055
+ "epoch": 0.7177033492822966,
1056
+ "grad_norm": 2.595984697341919,
1057
+ "learning_rate": 1.925739357399038e-05,
1058
+ "loss": 1.6117,
1059
+ "step": 150
1060
+ },
1061
+ {
1062
+ "epoch": 0.722488038277512,
1063
+ "grad_norm": 0.20171192288398743,
1064
+ "learning_rate": 1.8653809705294688e-05,
1065
+ "loss": 1.5114,
1066
+ "step": 151
1067
+ },
1068
+ {
1069
+ "epoch": 0.7272727272727273,
1070
+ "grad_norm": 0.22950993478298187,
1071
+ "learning_rate": 1.8057659717401947e-05,
1072
+ "loss": 1.4048,
1073
+ "step": 152
1074
+ },
1075
+ {
1076
+ "epoch": 0.7320574162679426,
1077
+ "grad_norm": 0.22736592590808868,
1078
+ "learning_rate": 1.746908498978791e-05,
1079
+ "loss": 1.4318,
1080
+ "step": 153
1081
+ },
1082
+ {
1083
+ "epoch": 0.7368421052631579,
1084
+ "grad_norm": 0.22380989789962769,
1085
+ "learning_rate": 1.6888225105423507e-05,
1086
+ "loss": 1.3012,
1087
+ "step": 154
1088
+ },
1089
+ {
1090
+ "epoch": 0.7416267942583732,
1091
+ "grad_norm": 0.2610418498516083,
1092
+ "learning_rate": 1.631521781767214e-05,
1093
+ "loss": 1.5871,
1094
+ "step": 155
1095
+ },
1096
+ {
1097
+ "epoch": 0.7464114832535885,
1098
+ "grad_norm": 0.24435089528560638,
1099
+ "learning_rate": 1.575019901762097e-05,
1100
+ "loss": 1.3297,
1101
+ "step": 156
1102
+ },
1103
+ {
1104
+ "epoch": 0.7511961722488039,
1105
+ "grad_norm": 0.27125418186187744,
1106
+ "learning_rate": 1.5193302701853673e-05,
1107
+ "loss": 1.3504,
1108
+ "step": 157
1109
+ },
1110
+ {
1111
+ "epoch": 0.7559808612440191,
1112
+ "grad_norm": 0.2774478495121002,
1113
+ "learning_rate": 1.4644660940672627e-05,
1114
+ "loss": 1.5265,
1115
+ "step": 158
1116
+ },
1117
+ {
1118
+ "epoch": 0.7607655502392344,
1119
+ "grad_norm": 0.28624045848846436,
1120
+ "learning_rate": 1.4104403846777909e-05,
1121
+ "loss": 1.4913,
1122
+ "step": 159
1123
  }
1124
  ],
1125
  "logging_steps": 1,
 
1139
  "attributes": {}
1140
  }
1141
  },
1142
+ "total_flos": 2.1176676280958976e+16,
1143
  "train_batch_size": 4,
1144
  "trial_name": null,
1145
  "trial_params": null