leixa commited on
Commit
6838a93
·
verified ·
1 Parent(s): 1fb90b0

Training in progress, step 78, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:974949f77f0a14b4199397c8ba5e2711f830212568ab0bfb2ed01e6c7250c7e0
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f43328dd2d8116603704d5da077f1cdfcadf0ca60d4f6c67042c456d651e7e
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0bc4aa4048af27fec57240e1206a51560d0aa8ae57d2f6ef5d4a90bcda27d0b
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ddfc9cb48a268339574aadd00a883430d46df5729370a923adb4c5bc610f51c
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c150e7a0df1bbdf6fa35b33f81ebbc56fd08d0eb855b36785312a07efa2d1cde
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:589530963918166f3856b646184357baa64b7df7b77ae1064a2164e37ebf14e7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15a153ae523f6f915c8c717e9164535eda8efd532a3b2e2efaf3c667d39d242b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e14b0e4bebf6d66c9099b7587b528a3013e8c4759510f4562172108e92d4284
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0734670810963549,
5
  "eval_steps": 13,
6
- "global_step": 65,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -202,6 +202,49 @@
202
  "eval_samples_per_second": 6.628,
203
  "eval_steps_per_second": 0.832,
204
  "step": 65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  }
206
  ],
207
  "logging_steps": 3,
@@ -221,7 +264,7 @@
221
  "attributes": {}
222
  }
223
  },
224
- "total_flos": 1.8281387903680512e+17,
225
  "train_batch_size": 8,
226
  "trial_name": null,
227
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08816049731562588,
5
  "eval_steps": 13,
6
+ "global_step": 78,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
202
  "eval_samples_per_second": 6.628,
203
  "eval_steps_per_second": 0.832,
204
  "step": 65
205
+ },
206
+ {
207
+ "epoch": 0.07459734388245268,
208
+ "grad_norm": 0.9956369996070862,
209
+ "learning_rate": 3.272542485937369e-05,
210
+ "loss": 0.5392,
211
+ "step": 66
212
+ },
213
+ {
214
+ "epoch": 0.07798813224074597,
215
+ "grad_norm": 1.1790428161621094,
216
+ "learning_rate": 3.110851015344735e-05,
217
+ "loss": 0.5354,
218
+ "step": 69
219
+ },
220
+ {
221
+ "epoch": 0.08137892059903927,
222
+ "grad_norm": 1.041911005973816,
223
+ "learning_rate": 2.9463922369965917e-05,
224
+ "loss": 0.5366,
225
+ "step": 72
226
+ },
227
+ {
228
+ "epoch": 0.08476970895733257,
229
+ "grad_norm": 1.101559042930603,
230
+ "learning_rate": 2.7799111902582696e-05,
231
+ "loss": 0.5158,
232
+ "step": 75
233
+ },
234
+ {
235
+ "epoch": 0.08816049731562588,
236
+ "grad_norm": 0.9680448174476624,
237
+ "learning_rate": 2.6121620758762877e-05,
238
+ "loss": 0.4675,
239
+ "step": 78
240
+ },
241
+ {
242
+ "epoch": 0.08816049731562588,
243
+ "eval_loss": 0.13185884058475494,
244
+ "eval_runtime": 224.7208,
245
+ "eval_samples_per_second": 6.63,
246
+ "eval_steps_per_second": 0.832,
247
+ "step": 78
248
  }
249
  ],
250
  "logging_steps": 3,
 
264
  "attributes": {}
265
  }
266
  },
267
+ "total_flos": 2.1937665484416614e+17,
268
  "train_batch_size": 8,
269
  "trial_name": null,
270
  "trial_params": null