leixa commited on
Commit
9897fb5
·
verified ·
1 Parent(s): fb2f151

Training in progress, step 117, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2c8aa53823ec3979ff7e1d30c047e5acabcd3fa8498d84f6505bf010dca950e
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5798fb376cb521b651dd28099ce2c086647fdcbbb595c40f31e09f0bca4fabf
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96de6f303d3e97c3b7742b2a9b1b32fd5b6504ff4598532cec8774dd143d3b3f
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47df0430f355abf76b457d4820080696300cbebbc336555910b86ea1f907aefe
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:907075ac1b403dbc4ff0a0072e837127222d83d6490ad3ff06d167e2f6175a5e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70bd3f5a04474750befb510a828eb2569e420cfa0abc940751ff6ccd74605249
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ea77b645dadc8ac2aadc6fa983d5a64d4e6a08d5eac1b852910f8408f0833c7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96bbb5cc30267ffbb6fcba5b12ec3b2b908df1e2e422c7d381d682f082ea4159
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.11754732975416785,
5
  "eval_steps": 13,
6
- "global_step": 104,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -317,6 +317,49 @@
317
  "eval_samples_per_second": 6.62,
318
  "eval_steps_per_second": 0.831,
319
  "step": 104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  }
321
  ],
322
  "logging_steps": 3,
@@ -336,7 +379,7 @@
336
  "attributes": {}
337
  }
338
  },
339
- "total_flos": 2.925022064588882e+17,
340
  "train_batch_size": 8,
341
  "trial_name": null,
342
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.13224074597343882,
5
  "eval_steps": 13,
6
+ "global_step": 117,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
317
  "eval_samples_per_second": 6.62,
318
  "eval_steps_per_second": 0.831,
319
  "step": 104
320
+ },
321
+ {
322
+ "epoch": 0.11867759254026561,
323
+ "grad_norm": 1.0611317157745361,
324
+ "learning_rate": 1.1699198087116589e-05,
325
+ "loss": 0.5579,
326
+ "step": 105
327
+ },
328
+ {
329
+ "epoch": 0.12206838089855891,
330
+ "grad_norm": 1.7777539491653442,
331
+ "learning_rate": 1.0305368692688174e-05,
332
+ "loss": 0.5391,
333
+ "step": 108
334
+ },
335
+ {
336
+ "epoch": 0.12545916925685222,
337
+ "grad_norm": 1.0344315767288208,
338
+ "learning_rate": 8.978109650374397e-06,
339
+ "loss": 0.4931,
340
+ "step": 111
341
+ },
342
+ {
343
+ "epoch": 0.12884995761514553,
344
+ "grad_norm": 1.0285758972167969,
345
+ "learning_rate": 7.723433775328384e-06,
346
+ "loss": 0.5289,
347
+ "step": 114
348
+ },
349
+ {
350
+ "epoch": 0.13224074597343882,
351
+ "grad_norm": 0.9961230158805847,
352
+ "learning_rate": 6.547025062816486e-06,
353
+ "loss": 0.5086,
354
+ "step": 117
355
+ },
356
+ {
357
+ "epoch": 0.13224074597343882,
358
+ "eval_loss": 0.12818647921085358,
359
+ "eval_runtime": 224.9913,
360
+ "eval_samples_per_second": 6.622,
361
+ "eval_steps_per_second": 0.831,
362
+ "step": 117
363
  }
364
  ],
365
  "logging_steps": 3,
 
379
  "attributes": {}
380
  }
381
  },
382
+ "total_flos": 3.290649822662492e+17,
383
  "train_batch_size": 8,
384
  "trial_name": null,
385
  "trial_params": null