Nexspear commited on
Commit
78f2221
·
verified ·
1 Parent(s): 5ef6d8e

Training in progress, step 203, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad666b86b139114843e6387e24af26157b938f210b5a90a0dbd7dfc69288645b
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808c4f782273009bc7d5e123764f8173007ba1f183c7c31c2f68850ccdddbcba
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83760bdb6e06b4f72fc799a47fae8075e9ef8eaa08f01a1a7080fef1d2770585
3
  size 170920084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a01986773b4b8ceab6052d090c3081d1e961e06bbf9fa0803428990272b5a1a
3
  size 170920084
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2077fbebb3b57119eb881a10a206c01b69c507aed3d8aa947064d01b97b8ccc1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de5239eff1f2d8ff451112658f26d1b5c61b81da50d6460962c3cb9ebe1a6793
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad9d713402818babdb8f091baee35ed4ccf44ceb7da5916ccb017adc10bc53d3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a1bd27e7159fdb46bc339fa4287a931a87599457f6b0651c1f3f5ef3ac6ecd2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.007178535684943199,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 2.2222222222222223,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 13.368,
145
  "eval_steps_per_second": 3.401,
146
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 10,
@@ -167,12 +210,12 @@
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
- "should_training_stop": false
171
  },
172
  "attributes": {}
173
  }
174
  },
175
- "total_flos": 2.1456277793773978e+17,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null
 
1
  {
2
  "best_metric": 0.007178535684943199,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
+ "epoch": 3.0074074074074075,
5
  "eval_steps": 50,
6
+ "global_step": 203,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 13.368,
145
  "eval_steps_per_second": 3.401,
146
  "step": 150
147
+ },
148
+ {
149
+ "epoch": 2.3703703703703702,
150
+ "grad_norm": 0.1319075971841812,
151
+ "learning_rate": 2.3511937499075508e-05,
152
+ "loss": 0.0143,
153
+ "step": 160
154
+ },
155
+ {
156
+ "epoch": 2.5185185185185186,
157
+ "grad_norm": 0.2327166497707367,
158
+ "learning_rate": 1.4083642978043277e-05,
159
+ "loss": 0.0191,
160
+ "step": 170
161
+ },
162
+ {
163
+ "epoch": 2.6666666666666665,
164
+ "grad_norm": 0.19548346102237701,
165
+ "learning_rate": 6.9267914241362585e-06,
166
+ "loss": 0.0226,
167
+ "step": 180
168
+ },
169
+ {
170
+ "epoch": 2.814814814814815,
171
+ "grad_norm": 0.1185711920261383,
172
+ "learning_rate": 2.230594539204489e-06,
173
+ "loss": 0.0134,
174
+ "step": 190
175
+ },
176
+ {
177
+ "epoch": 2.962962962962963,
178
+ "grad_norm": 0.09321712702512741,
179
+ "learning_rate": 1.192096287513711e-07,
180
+ "loss": 0.0152,
181
+ "step": 200
182
+ },
183
+ {
184
+ "epoch": 2.962962962962963,
185
+ "eval_loss": 0.006627695634961128,
186
+ "eval_runtime": 8.5391,
187
+ "eval_samples_per_second": 13.35,
188
+ "eval_steps_per_second": 3.396,
189
+ "step": 200
190
  }
191
  ],
192
  "logging_steps": 10,
 
210
  "should_evaluate": false,
211
  "should_log": false,
212
  "should_save": true,
213
+ "should_training_stop": true
214
  },
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 2.90017796878762e+17,
219
  "train_batch_size": 8,
220
  "trial_name": null,
221
  "trial_params": null