eddysang commited on
Commit
70b2e04
·
verified ·
1 Parent(s): 020c718

Training in progress, step 170, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b745462f26110d8f5e6c0bcef91055e14492965f4fda21faf63549fad0fcc448
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63d64bb4252a56462612ce8ea325bbb4f7eb1e476a12210a5f3bc4adabecffe5
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aedeeacab12f5e39f365b4749f7ef795c5b68cd1bab0b91369367d075cadfc66
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4beda4b81812c494724c944ce42a1384386d898438a92682ca41d1363ea7cd53
3
  size 671466706
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afb210574debeca01beef086494f1b0da6d8ee8853b3831ec7094b554157981c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d2cc8b4da1c1e828f3ecd6fdcd0ddcc7555dae3d2812a0ace74e33cfe7a8a86
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e76ff8614026ec7c5c2d9793615ca4e2f707e550ce0b5a4376af475431afe3f1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9a138d58efee4722b14f409e14731dcd0935c0eaa8a6078a4918744b9529bab
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7090514120202752,
5
  "eval_steps": 50,
6
- "global_step": 153,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1110,6 +1110,125 @@
1110
  "learning_rate": 2.385012299531262e-05,
1111
  "loss": 0.3123,
1112
  "step": 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113
  }
1114
  ],
1115
  "logging_steps": 1,
@@ -1129,7 +1248,7 @@
1129
  "attributes": {}
1130
  }
1131
  },
1132
- "total_flos": 9.131167934494802e+17,
1133
  "train_batch_size": 2,
1134
  "trial_name": null,
1135
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7878349022447502,
5
  "eval_steps": 50,
6
+ "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1110
  "learning_rate": 2.385012299531262e-05,
1111
  "loss": 0.3123,
1112
  "step": 153
1113
+ },
1114
+ {
1115
+ "epoch": 0.713685734974656,
1116
+ "grad_norm": 1.3699779510498047,
1117
+ "learning_rate": 2.2900622215575197e-05,
1118
+ "loss": 0.2744,
1119
+ "step": 154
1120
+ },
1121
+ {
1122
+ "epoch": 0.718320057929037,
1123
+ "grad_norm": 1.4394081830978394,
1124
+ "learning_rate": 2.1966991411008938e-05,
1125
+ "loss": 0.2854,
1126
+ "step": 155
1127
+ },
1128
+ {
1129
+ "epoch": 0.7229543808834178,
1130
+ "grad_norm": 1.1516926288604736,
1131
+ "learning_rate": 2.1049514974601175e-05,
1132
+ "loss": 0.232,
1133
+ "step": 156
1134
+ },
1135
+ {
1136
+ "epoch": 0.7275887038377987,
1137
+ "grad_norm": 1.226991891860962,
1138
+ "learning_rate": 2.0148472378562215e-05,
1139
+ "loss": 0.2174,
1140
+ "step": 157
1141
+ },
1142
+ {
1143
+ "epoch": 0.7322230267921795,
1144
+ "grad_norm": 1.3236987590789795,
1145
+ "learning_rate": 1.926413808919542e-05,
1146
+ "loss": 0.2859,
1147
+ "step": 158
1148
+ },
1149
+ {
1150
+ "epoch": 0.7368573497465605,
1151
+ "grad_norm": 1.3909859657287598,
1152
+ "learning_rate": 1.8396781483292098e-05,
1153
+ "loss": 0.1741,
1154
+ "step": 159
1155
+ },
1156
+ {
1157
+ "epoch": 0.7414916727009413,
1158
+ "grad_norm": 1.3062829971313477,
1159
+ "learning_rate": 1.7546666766076655e-05,
1160
+ "loss": 0.2003,
1161
+ "step": 160
1162
+ },
1163
+ {
1164
+ "epoch": 0.7461259956553222,
1165
+ "grad_norm": 1.4208601713180542,
1166
+ "learning_rate": 1.671405289072718e-05,
1167
+ "loss": 0.2306,
1168
+ "step": 161
1169
+ },
1170
+ {
1171
+ "epoch": 0.7507603186097032,
1172
+ "grad_norm": 1.130900502204895,
1173
+ "learning_rate": 1.5899193479495857e-05,
1174
+ "loss": 0.1712,
1175
+ "step": 162
1176
+ },
1177
+ {
1178
+ "epoch": 0.755394641564084,
1179
+ "grad_norm": 1.3274939060211182,
1180
+ "learning_rate": 1.5102336746453053e-05,
1181
+ "loss": 0.2057,
1182
+ "step": 163
1183
+ },
1184
+ {
1185
+ "epoch": 0.7600289645184649,
1186
+ "grad_norm": 1.5153155326843262,
1187
+ "learning_rate": 1.4323725421878949e-05,
1188
+ "loss": 0.2792,
1189
+ "step": 164
1190
+ },
1191
+ {
1192
+ "epoch": 0.7646632874728457,
1193
+ "grad_norm": 1.2106071710586548,
1194
+ "learning_rate": 1.3563596678325606e-05,
1195
+ "loss": 0.213,
1196
+ "step": 165
1197
+ },
1198
+ {
1199
+ "epoch": 0.7692976104272267,
1200
+ "grad_norm": 1.3996082544326782,
1201
+ "learning_rate": 1.2822182058371878e-05,
1202
+ "loss": 0.2113,
1203
+ "step": 166
1204
+ },
1205
+ {
1206
+ "epoch": 0.7739319333816075,
1207
+ "grad_norm": 1.1402256488800049,
1208
+ "learning_rate": 1.2099707404093203e-05,
1209
+ "loss": 0.1599,
1210
+ "step": 167
1211
+ },
1212
+ {
1213
+ "epoch": 0.7785662563359884,
1214
+ "grad_norm": 1.6671884059906006,
1215
+ "learning_rate": 1.1396392788268052e-05,
1216
+ "loss": 0.301,
1217
+ "step": 168
1218
+ },
1219
+ {
1220
+ "epoch": 0.7832005792903693,
1221
+ "grad_norm": 1.1182959079742432,
1222
+ "learning_rate": 1.0712452447341582e-05,
1223
+ "loss": 0.1367,
1224
+ "step": 169
1225
+ },
1226
+ {
1227
+ "epoch": 0.7878349022447502,
1228
+ "grad_norm": 1.0370293855667114,
1229
+ "learning_rate": 1.0048094716167095e-05,
1230
+ "loss": 0.1441,
1231
+ "step": 170
1232
  }
1233
  ],
1234
  "logging_steps": 1,
 
1248
  "attributes": {}
1249
  }
1250
  },
1251
+ "total_flos": 1.0145742149438669e+18,
1252
  "train_batch_size": 2,
1253
  "trial_name": null,
1254
  "trial_params": null