Nexspear commited on
Commit
0a92ae0
·
verified ·
1 Parent(s): b715b97

Training in progress, step 54, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75410168de9c9bc220b54027abcffb1122fbd9afb0f31cdb49c905ab20aadb19
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d15cf64ecec0a56de9c6510b1d7afea5016131d9594db248bb116cb482893b41
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7012dc9d46f0f15c8164eb131494a13eaae25387fcd97b772839bbacb59cdbf8
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9911c34e1d05b715a51919ef9bdb30451b021909e53efaa150490585ecee6f8a
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a15f8b7e107d1944aa714244b7981525070751630348b2f99259785cab9c85cb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87951bd1d28cfc789427636c05788fc21db0e7e40be21589b4278413c44c66bd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9776c8df8be516beee92201f32f4a3f5d90539bf47f8ef27e1636cdc132a851e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec430a8fba90f7f39f74e916eb32712c363a0fd20bb4904251fce0eb82f2b9cf
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.033482142857142856,
5
  "eval_steps": 9,
6
- "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -160,6 +160,35 @@
160
  "eval_samples_per_second": 13.985,
161
  "eval_steps_per_second": 1.748,
162
  "step": 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  }
164
  ],
165
  "logging_steps": 3,
@@ -179,7 +208,7 @@
179
  "attributes": {}
180
  }
181
  },
182
- "total_flos": 6.676985552044032e+16,
183
  "train_batch_size": 8,
184
  "trial_name": null,
185
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04017857142857143,
5
  "eval_steps": 9,
6
+ "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
160
  "eval_samples_per_second": 13.985,
161
  "eval_steps_per_second": 1.748,
162
  "step": 45
163
+ },
164
+ {
165
+ "epoch": 0.03571428571428571,
166
+ "grad_norm": 6.039599418640137,
167
+ "learning_rate": 3.104804738999169e-05,
168
+ "loss": 4.1585,
169
+ "step": 48
170
+ },
171
+ {
172
+ "epoch": 0.03794642857142857,
173
+ "grad_norm": 5.91121244430542,
174
+ "learning_rate": 2.8479327524001636e-05,
175
+ "loss": 4.1427,
176
+ "step": 51
177
+ },
178
+ {
179
+ "epoch": 0.04017857142857143,
180
+ "grad_norm": 5.7630181312561035,
181
+ "learning_rate": 2.587248741756253e-05,
182
+ "loss": 3.8608,
183
+ "step": 54
184
+ },
185
+ {
186
+ "epoch": 0.04017857142857143,
187
+ "eval_loss": 4.20692253112793,
188
+ "eval_runtime": 162.0191,
189
+ "eval_samples_per_second": 13.974,
190
+ "eval_steps_per_second": 1.747,
191
+ "step": 54
192
  }
193
  ],
194
  "logging_steps": 3,
 
208
  "attributes": {}
209
  }
210
  },
211
+ "total_flos": 8.012382662452838e+16,
212
  "train_batch_size": 8,
213
  "trial_name": null,
214
  "trial_params": null