error577 commited on
Commit
aba6b74
·
verified ·
1 Parent(s): ec5311f

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b8526645aa10c8c370c36943130d1cefbfc8f02dbcdbe98fd4c9bd8b917e57f
3
  size 1579384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e326cab99b9185673026663619315b66fbc144ff1c4a4cc0f73758d0a7e97df
3
  size 1579384
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:21615e5cb8507075c0003e1546245cfedd277c9465b3b44f7587a57f804e30af
3
  size 857274
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb1b63fea4a66c08312e4978f73870c35ab3335f4b1705b84dcf6d655978dc5
3
  size 857274
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8eff25987b5fd2c04395d3be2d6b618db1a3880a66cb60c0a2d3cd458dd41db
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f720d721c792d0a9412a85ed8fc6e2eb685bfd50ae210c270b08af2ed177b79e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db8e23e4b636e6d4cfbe5cdf1b076811f8e1f9d4c97603e891aee3c95c35c66b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a75dcc5ec09eada6641b366eac390a2a47e7ec4306b94cfdb718bc9a73ac9b0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.013206223432792704,
5
  "eval_steps": 20,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -163,6 +163,154 @@
163
  "eval_samples_per_second": 75.565,
164
  "eval_steps_per_second": 75.565,
165
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  }
167
  ],
168
  "logging_steps": 1,
@@ -182,7 +330,7 @@
182
  "attributes": {}
183
  }
184
  },
185
- "total_flos": 15771925217280.0,
186
  "train_batch_size": 1,
187
  "trial_name": null,
188
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.026412446865585408,
5
  "eval_steps": 20,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
163
  "eval_samples_per_second": 75.565,
164
  "eval_steps_per_second": 75.565,
165
  "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.013866534604432339,
169
+ "grad_norm": 361644.25,
170
+ "learning_rate": 0.00029893625775634835,
171
+ "loss": 97.0436,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.014526845776071974,
176
+ "grad_norm": 1564964.125,
177
+ "learning_rate": 0.0002987343436093454,
178
+ "loss": 279.3873,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.015187156947711609,
183
+ "grad_norm": 1539204.875,
184
+ "learning_rate": 0.00029851497482766547,
185
+ "loss": 668.731,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.015847468119351244,
190
+ "grad_norm": 2246419.0,
191
+ "learning_rate": 0.00029827817715520773,
192
+ "loss": 758.2188,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.01650777929099088,
197
+ "grad_norm": 2776799.5,
198
+ "learning_rate": 0.0002980239783812289,
199
+ "loss": 546.4688,
200
+ "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.017168090462630514,
204
+ "grad_norm": 1617533.25,
205
+ "learning_rate": 0.0002977524083370822,
206
+ "loss": 422.7344,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.01782840163427015,
211
+ "grad_norm": 450934.75,
212
+ "learning_rate": 0.00029746349889271645,
213
+ "loss": 339.3945,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.018488712805909784,
218
+ "grad_norm": 2286499.5,
219
+ "learning_rate": 0.0002971572839529358,
220
+ "loss": 236.2812,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.01914902397754942,
225
+ "grad_norm": 3999992.25,
226
+ "learning_rate": 0.00029683379945342125,
227
+ "loss": 159.8123,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.019809335149189054,
232
+ "grad_norm": 308621.125,
233
+ "learning_rate": 0.000296493083356513,
234
+ "loss": 124.3508,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.02046964632082869,
239
+ "grad_norm": 497120.59375,
240
+ "learning_rate": 0.00029613517564675565,
241
+ "loss": 138.8941,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.021129957492468324,
246
+ "grad_norm": 99928.7890625,
247
+ "learning_rate": 0.0002957601183262058,
248
+ "loss": 151.8223,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.02179026866410796,
253
+ "grad_norm": 405539.0625,
254
+ "learning_rate": 0.000295367955409503,
255
+ "loss": 1203.2074,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.022450579835747594,
260
+ "grad_norm": 463994.21875,
261
+ "learning_rate": 0.00029495873291870436,
262
+ "loss": 1256.5367,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.02311089100738723,
267
+ "grad_norm": 953879.5625,
268
+ "learning_rate": 0.0002945324988778834,
269
+ "loss": 938.6675,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.023771202179026868,
274
+ "grad_norm": 178221.28125,
275
+ "learning_rate": 0.00029408930330749477,
276
+ "loss": 356.9532,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.0244315133506665,
281
+ "grad_norm": 146182.625,
282
+ "learning_rate": 0.0002936291982185036,
283
+ "loss": 238.3703,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.025091824522306138,
288
+ "grad_norm": 116065.7421875,
289
+ "learning_rate": 0.00029315223760628217,
290
+ "loss": 212.9555,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.02575213569394577,
295
+ "grad_norm": 129193.03125,
296
+ "learning_rate": 0.00029265847744427303,
297
+ "loss": 227.929,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.026412446865585408,
302
+ "grad_norm": 152996.75,
303
+ "learning_rate": 0.00029214797567742035,
304
+ "loss": 220.4361,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.026412446865585408,
309
+ "eval_loss": 16.9798526763916,
310
+ "eval_runtime": 6.5479,
311
+ "eval_samples_per_second": 75.597,
312
+ "eval_steps_per_second": 75.597,
313
+ "step": 40
314
  }
315
  ],
316
  "logging_steps": 1,
 
330
  "attributes": {}
331
  }
332
  },
333
+ "total_flos": 28561970823168.0,
334
  "train_batch_size": 1,
335
  "trial_name": null,
336
  "trial_params": null