eddysang commited on
Commit
93abf0e
·
verified ·
1 Parent(s): 47a179f

Training in progress, step 34, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2be6c741c16462462566383c58c96fe7f6bc28d7566b13220c3dffb9ed417700
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1b54cccbbed2f75eaef804b9662f6ef8e1c82290e1144dee6ea63fb77aeb26f
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34444611993aa5826fae6429f620abde590eb090402c17731e486c9d73095492
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bcb817a546831a005a9504935df07250d9cc27fd981d0add1cf5aa973ee4f9
3
  size 671466706
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc7d6365f55f6979e3f98089ca1eee6a5f1d967b9c10fb47e70f23c0169a194
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7abcd57538b0647768495b9aeb3b444e111ce136e810d289cc8fe830dca41b95
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0abe1a027b3fea2bf654a1c387b6eb2241fa486bab4a282d3a0e829c4308c91
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e1983b20d7ce0214623b79adb071ed1f5c168cabcab4cc0ff2c0c61c63ddce9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.025028755463538072,
5
  "eval_steps": 50,
6
- "global_step": 17,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -134,6 +134,125 @@
134
  "learning_rate": 0.00012749999999999998,
135
  "loss": 0.1309,
136
  "step": 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  }
138
  ],
139
  "logging_steps": 1,
@@ -153,7 +272,7 @@
153
  "attributes": {}
154
  }
155
  },
156
- "total_flos": 1.807210320368763e+17,
157
  "train_batch_size": 2,
158
  "trial_name": null,
159
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.050057510927076145,
5
  "eval_steps": 50,
6
+ "global_step": 34,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
134
  "learning_rate": 0.00012749999999999998,
135
  "loss": 0.1309,
136
  "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.02650103519668737,
140
+ "grad_norm": 0.18677189946174622,
141
+ "learning_rate": 0.000135,
142
+ "loss": 0.181,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.02797331492983667,
147
+ "grad_norm": 0.1576640009880066,
148
+ "learning_rate": 0.0001425,
149
+ "loss": 0.1562,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.029445594662985967,
154
+ "grad_norm": 0.19666102528572083,
155
+ "learning_rate": 0.00015,
156
+ "loss": 0.2439,
157
+ "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.030917874396135265,
161
+ "grad_norm": 0.12512515485286713,
162
+ "learning_rate": 0.00014998857713672935,
163
+ "loss": 0.144,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.03239015412928457,
168
+ "grad_norm": 0.1219751164317131,
169
+ "learning_rate": 0.00014995431202643217,
170
+ "loss": 0.1047,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.033862433862433865,
175
+ "grad_norm": 0.1390693038702011,
176
+ "learning_rate": 0.000149897215106593,
177
+ "loss": 0.1022,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.03533471359558316,
182
+ "grad_norm": 0.19809921085834503,
183
+ "learning_rate": 0.0001498173037694868,
184
+ "loss": 0.24,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.03680699332873246,
189
+ "grad_norm": 0.17452572286128998,
190
+ "learning_rate": 0.0001497146023568809,
191
+ "loss": 0.2497,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.03827927306188176,
196
+ "grad_norm": 0.10941721498966217,
197
+ "learning_rate": 0.00014958914215262048,
198
+ "loss": 0.0914,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.03975155279503106,
203
+ "grad_norm": 0.12793776392936707,
204
+ "learning_rate": 0.00014944096137309914,
205
+ "loss": 0.114,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.041223832528180356,
210
+ "grad_norm": 0.14925755560398102,
211
+ "learning_rate": 0.00014927010515561776,
212
+ "loss": 0.1931,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.042696112261329654,
217
+ "grad_norm": 0.12749770283699036,
218
+ "learning_rate": 0.00014907662554463532,
219
+ "loss": 0.1342,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.04416839199447895,
224
+ "grad_norm": 0.12666098773479462,
225
+ "learning_rate": 0.0001488605814759156,
226
+ "loss": 0.1404,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.04564067172762825,
231
+ "grad_norm": 0.12451935559511185,
232
+ "learning_rate": 0.00014862203875857477,
233
+ "loss": 0.1297,
234
+ "step": 31
235
+ },
236
+ {
237
+ "epoch": 0.04711295146077755,
238
+ "grad_norm": 0.12354013323783875,
239
+ "learning_rate": 0.0001483610700550354,
240
+ "loss": 0.0667,
241
+ "step": 32
242
+ },
243
+ {
244
+ "epoch": 0.048585231193926846,
245
+ "grad_norm": 0.11861127614974976,
246
+ "learning_rate": 0.00014807775485889264,
247
+ "loss": 0.1057,
248
+ "step": 33
249
+ },
250
+ {
251
+ "epoch": 0.050057510927076145,
252
+ "grad_norm": 0.11489235609769821,
253
+ "learning_rate": 0.0001477721794706997,
254
+ "loss": 0.0717,
255
+ "step": 34
256
  }
257
  ],
258
  "logging_steps": 1,
 
272
  "attributes": {}
273
  }
274
  },
275
+ "total_flos": 3.597635405563822e+17,
276
  "train_batch_size": 2,
277
  "trial_name": null,
278
  "trial_params": null