tarabukinivan commited on
Commit
4d3cf6e
·
verified ·
1 Parent(s): b33d477

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b447d7d316addbc66a21bd7e8321f06e652a336ffe5142dffca235ed075e07b1
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57828a60a5abd1d3030f940f71191f400e9cc5d666e8191bdcc71f698151f178
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1761c6539343a88505b90894c93e1ccd5e70fd95fbdcbbe107accb5a732afcc5
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b8b4dc57484753ae86395042d266733bd8022cd38c159cb336f87055483d41d
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b5a1d05b4449fe5c23c9940b89b45ac69280f93864f11d840efc4f7cef005ef
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3014d12eaf385de1d820412b759a4ad2e53da090792e88792b3b17a6608572fa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c792918044964431737f4cb39f3769dbfd230048b1125ac69a6439eb6c8534b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.016974326331423723,
5
  "eval_steps": 13,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -130,6 +130,27 @@
130
  "eval_samples_per_second": 11.753,
131
  "eval_steps_per_second": 5.883,
132
  "step": 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  }
134
  ],
135
  "logging_steps": 3,
@@ -144,12 +165,12 @@
144
  "should_evaluate": false,
145
  "should_log": false,
146
  "should_save": true,
147
- "should_training_stop": false
148
  },
149
  "attributes": {}
150
  }
151
  },
152
- "total_flos": 1.4930481581654016e+16,
153
  "train_batch_size": 2,
154
  "trial_name": null,
155
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.021217907914279653,
5
  "eval_steps": 13,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
130
  "eval_samples_per_second": 11.753,
131
  "eval_steps_per_second": 5.883,
132
  "step": 39
133
+ },
134
+ {
135
+ "epoch": 0.01782304264799491,
136
+ "grad_norm": 0.48636701703071594,
137
+ "learning_rate": 1.9098300562505266e-05,
138
+ "loss": 1.0753,
139
+ "step": 42
140
+ },
141
+ {
142
+ "epoch": 0.019096117122851686,
143
+ "grad_norm": 0.5368080139160156,
144
+ "learning_rate": 7.612046748871327e-06,
145
+ "loss": 1.1679,
146
+ "step": 45
147
+ },
148
+ {
149
+ "epoch": 0.020369191597708464,
150
+ "grad_norm": 0.5309668779373169,
151
+ "learning_rate": 1.231165940486234e-06,
152
+ "loss": 1.1657,
153
+ "step": 48
154
  }
155
  ],
156
  "logging_steps": 3,
 
165
  "should_evaluate": false,
166
  "should_log": false,
167
  "should_save": true,
168
+ "should_training_stop": true
169
  },
170
  "attributes": {}
171
  }
172
  },
173
+ "total_flos": 1.8639917999456256e+16,
174
  "train_batch_size": 2,
175
  "trial_name": null,
176
  "trial_params": null