mamung commited on
Commit
51078f9
·
verified ·
1 Parent(s): c47ba95

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:039db4adeb8cd1b6bb0ec007ed3c006bcbdcea9b09c0987fd0ea3a02fc671feb
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fcf40764db523771739328a3bd8edf0f09346f5cf80ca2b3b503e156dcb44db
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b81944db0af75ec0e56e629a9109e10e3fd144e4ac69ffa21c621a23bb16c945
3
  size 1342555602
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9726494e019dd6a5135b6f19f889f17403a6938f4aa6c091cdb8602e409c7dd5
3
  size 1342555602
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68529ee711356fd9d47647e5bd3dd9d92d8d90801ebf2381a18d532f68111077
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf26d2680b85dcd667f80e96b5cc6e90865b5e4d06d5ef11eb1400f3de4c7b7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1eef4c60eef56c59d148e4191bea64e9a3bf517b29f5a413c536dcdc8e61edf1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5416093c744b622c4436f38b8545546a70241fb2d36e53a533c7ae67d7ced8b6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07775377969762419,
5
  "eval_steps": 9,
6
- "global_step": 99,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -236,6 +236,13 @@
236
  "eval_samples_per_second": 19.379,
237
  "eval_steps_per_second": 2.422,
238
  "step": 99
 
 
 
 
 
 
 
239
  }
240
  ],
241
  "logging_steps": 5,
@@ -250,12 +257,12 @@
250
  "should_evaluate": false,
251
  "should_log": false,
252
  "should_save": true,
253
- "should_training_stop": false
254
  },
255
  "attributes": {}
256
  }
257
  },
258
- "total_flos": 1.4934284429583974e+17,
259
  "train_batch_size": 8,
260
  "trial_name": null,
261
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0785391714117416,
5
  "eval_steps": 9,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
236
  "eval_samples_per_second": 19.379,
237
  "eval_steps_per_second": 2.422,
238
  "step": 99
239
+ },
240
+ {
241
+ "epoch": 0.0785391714117416,
242
+ "grad_norm": 4.896818161010742,
243
+ "learning_rate": 0.0,
244
+ "loss": 2.9994,
245
+ "step": 100
246
  }
247
  ],
248
  "logging_steps": 5,
 
257
  "should_evaluate": false,
258
  "should_log": false,
259
  "should_save": true,
260
+ "should_training_stop": true
261
  },
262
  "attributes": {}
263
  }
264
  },
265
+ "total_flos": 1.508513578745856e+17,
266
  "train_batch_size": 8,
267
  "trial_name": null,
268
  "trial_params": null