ardaspear commited on
Commit
62ce89c
·
verified ·
1 Parent(s): 89bf0c1

Training in progress, step 63, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a61782b8acdd212d7f8e54236156cc8fbbc9ae201b2eb5a5047396a24f5adde5
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf8ae972af11f668559bb08b04015ffd3644ebdc724085a829c79571270a204
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61bf198ee1140b5436dba92a7b4c340adab529125f702643df4be9ef075c8b85
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8491a1e6f11d6229fe72932a0fe7bb4e797c61efe7bd5dc9ba48b7c851bf6f6c
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87951bd1d28cfc789427636c05788fc21db0e7e40be21589b4278413c44c66bd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49644ad8f5452e53758ca52013eb3650a3941bcec6083d72c46c935a677f7d88
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2011b1e019073e4bafc29de9703ff0a6e7c1252c3a53d804807bd1c99d390d1c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53b95eff99f2334cbf7ed4c962db83cb42e931305982518e928382563b5670d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.015902230729588455,
5
  "eval_steps": 9,
6
- "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -189,6 +189,35 @@
189
  "eval_samples_per_second": 14.502,
190
  "eval_steps_per_second": 1.813,
191
  "step": 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
193
  ],
194
  "logging_steps": 3,
@@ -208,7 +237,7 @@
208
  "attributes": {}
209
  }
210
  },
211
- "total_flos": 7.597138352327885e+16,
212
  "train_batch_size": 8,
213
  "trial_name": null,
214
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0185526025178532,
5
  "eval_steps": 9,
6
+ "global_step": 63,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
189
  "eval_samples_per_second": 14.502,
190
  "eval_steps_per_second": 1.813,
191
  "step": 54
192
+ },
193
+ {
194
+ "epoch": 0.01678568799234337,
195
+ "grad_norm": 6.218847751617432,
196
+ "learning_rate": 4.6512176312793736e-05,
197
+ "loss": 3.5507,
198
+ "step": 57
199
+ },
200
+ {
201
+ "epoch": 0.017669145255098283,
202
+ "grad_norm": 6.713202953338623,
203
+ "learning_rate": 4.131759111665349e-05,
204
+ "loss": 3.2378,
205
+ "step": 60
206
+ },
207
+ {
208
+ "epoch": 0.0185526025178532,
209
+ "grad_norm": 6.8192548751831055,
210
+ "learning_rate": 3.6218132209150045e-05,
211
+ "loss": 3.4102,
212
+ "step": 63
213
+ },
214
+ {
215
+ "epoch": 0.0185526025178532,
216
+ "eval_loss": 0.8609820604324341,
217
+ "eval_runtime": 394.5997,
218
+ "eval_samples_per_second": 14.496,
219
+ "eval_steps_per_second": 1.812,
220
+ "step": 63
221
  }
222
  ],
223
  "logging_steps": 3,
 
237
  "attributes": {}
238
  }
239
  },
240
+ "total_flos": 8.863328077715866e+16,
241
  "train_batch_size": 8,
242
  "trial_name": null,
243
  "trial_params": null