shibajustfor commited on
Commit
5a06bab
·
verified ·
1 Parent(s): 07fc174

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ffe6dfd3fda6166569dca8fd4206441fab84382574468a7d016dfff75a13ffa
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bed6dbe9a463afbdaa420d23f53fbeefe0bd5abe4bae29fe559d3f9cb28b9eab
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61ee0a22e7754951c6a5e53b9f0e7234ea3abf0eac9e4ec5d3f4d7f0044a86a5
3
  size 41119636
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbed3428b24e1c58e48c953eaea8913eb17ef0ed962516d13f3ea9bf5fa5c0c7
3
  size 41119636
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a782246aff15a3409595f4a10bd4fc20fe0486446db75db049245365ae75220e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a65fb9fd056a32de3901e991c48a566cc7007cf9d229006a3b1a882eda11f4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fd8212ec5ec3406d74a7f927b717dd30ea8a06115ee6582e14976f7b84b4b58
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b489c13e2d776471e9a48162938854a24466ce6713df9c0fb59abd9a09fb226b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1088139281828074,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 15.949,
102
  "eval_steps_per_second": 7.995,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 3.017257970879693e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.1632208922742111,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 15.949,
102
  "eval_steps_per_second": 7.995,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.11969532100108814,
107
+ "grad_norm": 1.7176902294158936,
108
+ "learning_rate": 8.79463319744677e-05,
109
+ "loss": 0.4559,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.1305767138193689,
114
+ "grad_norm": 0.39822494983673096,
115
+ "learning_rate": 7.217825360835473e-05,
116
+ "loss": 0.4364,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.14145810663764963,
121
+ "grad_norm": 0.42606645822525024,
122
+ "learning_rate": 5.713074385969457e-05,
123
+ "loss": 0.3936,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.15233949945593037,
128
+ "grad_norm": 0.992786169052124,
129
+ "learning_rate": 4.3193525326884435e-05,
130
+ "loss": 0.4474,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.1632208922742111,
135
+ "grad_norm": 0.35880205035209656,
136
+ "learning_rate": 3.072756464904006e-05,
137
+ "loss": 0.3639,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.1632208922742111,
142
+ "eval_loss": 0.09264663606882095,
143
+ "eval_runtime": 24.2837,
144
+ "eval_samples_per_second": 15.937,
145
+ "eval_steps_per_second": 7.989,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 4.511635400451686e+16,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null