dimasik87 commited on
Commit
3f6b5df
·
verified ·
1 Parent(s): 4a18bcd

Training in progress, step 36, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -10,23 +10,23 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
 
24
  "up_proj",
25
  "k_proj",
26
- "q_proj",
27
- "down_proj",
28
  "gate_proj",
29
- "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "down_proj",
25
  "up_proj",
26
  "k_proj",
27
+ "o_proj",
 
28
  "gate_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c646f392aaf5ffe251d79f16f9c5aefbee754426cb4708fe2d5d34551bcc0e6
3
- size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6453bdb0395fa8c4d11b15fd7f743cf902cda76f9c56fd48148e48a6c60e32
3
+ size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:842d68b94184abc7a320914c5753b95302a3a4c76261a2400b65001a749e8d5f
3
- size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2874ab5eb82d434bfc0b3aa8ae48bb0ba8ef59b0416fb0a82bc3ca044821102e
3
+ size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c03e971f2de1fda980f54f3289028b715b1671cbcd9e283c00c849bd70f82934
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59f4bef5f627d7afe8f2103773befa851585a000baf07cc5f352d4521942c00
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4521b8db9cc205e54aa606d85e707c024abd2d8ad4a20bec4b2cff365dc59cdf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aa0f75a1f3e346be25756b578158b09a68943f0b9f1cfe29f97939687f864ef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,266 +1,351 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010778184953653805,
5
- "eval_steps": 3,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0004311273981461522,
13
- "grad_norm": 14.988656044006348,
14
  "learning_rate": 2e-05,
15
- "loss": 11.9265,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.0004311273981461522,
20
- "eval_loss": 12.78434944152832,
21
- "eval_runtime": 58.2929,
22
- "eval_samples_per_second": 8.389,
23
- "eval_steps_per_second": 8.389,
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.0008622547962923044,
28
- "grad_norm": 15.124715805053711,
29
  "learning_rate": 4e-05,
30
- "loss": 12.5452,
31
  "step": 2
32
  },
33
  {
34
- "epoch": 0.0012933821944384565,
35
- "grad_norm": 12.851057052612305,
36
  "learning_rate": 6e-05,
37
- "loss": 13.0835,
38
  "step": 3
39
  },
40
  {
41
- "epoch": 0.0012933821944384565,
42
- "eval_loss": 12.626995086669922,
43
- "eval_runtime": 58.3375,
44
- "eval_samples_per_second": 8.382,
45
- "eval_steps_per_second": 8.382,
46
- "step": 3
47
  },
48
  {
49
- "epoch": 0.0017245095925846087,
50
- "grad_norm": 15.81566047668457,
51
- "learning_rate": 8e-05,
52
- "loss": 12.3524,
 
53
  "step": 4
54
  },
55
  {
56
- "epoch": 0.002155636990730761,
57
- "grad_norm": 18.372833251953125,
58
  "learning_rate": 0.0001,
59
- "loss": 11.0613,
60
  "step": 5
61
  },
62
  {
63
- "epoch": 0.002586764388876913,
64
- "grad_norm": 21.02024269104004,
65
  "learning_rate": 0.00012,
66
- "loss": 11.0406,
67
  "step": 6
68
  },
69
  {
70
- "epoch": 0.002586764388876913,
71
- "eval_loss": 8.352932929992676,
72
- "eval_runtime": 58.4155,
73
- "eval_samples_per_second": 8.371,
74
- "eval_steps_per_second": 8.371,
75
- "step": 6
76
- },
77
- {
78
- "epoch": 0.003017891787023065,
79
- "grad_norm": 20.353683471679688,
80
  "learning_rate": 0.00014,
81
- "loss": 9.0451,
82
  "step": 7
83
  },
84
  {
85
- "epoch": 0.0034490191851692174,
86
- "grad_norm": 35.790313720703125,
87
  "learning_rate": 0.00016,
88
- "loss": 7.4442,
89
  "step": 8
90
  },
91
  {
92
- "epoch": 0.0038801465833153696,
93
- "grad_norm": 30.771913528442383,
94
- "learning_rate": 0.00018,
95
- "loss": 6.2842,
96
- "step": 9
 
97
  },
98
  {
99
- "epoch": 0.0038801465833153696,
100
- "eval_loss": 4.413941860198975,
101
- "eval_runtime": 58.3924,
102
- "eval_samples_per_second": 8.374,
103
- "eval_steps_per_second": 8.374,
104
  "step": 9
105
  },
106
  {
107
- "epoch": 0.004311273981461522,
108
- "grad_norm": 31.092329025268555,
109
  "learning_rate": 0.0002,
110
- "loss": 4.2657,
111
  "step": 10
112
  },
113
  {
114
- "epoch": 0.004742401379607674,
115
- "grad_norm": 24.426212310791016,
116
- "learning_rate": 0.00019781476007338058,
117
- "loss": 1.2179,
118
  "step": 11
119
  },
120
  {
121
- "epoch": 0.005173528777753826,
122
- "grad_norm": 26.97491455078125,
123
- "learning_rate": 0.0001913545457642601,
124
- "loss": 1.8928,
125
  "step": 12
126
  },
127
  {
128
- "epoch": 0.005173528777753826,
129
- "eval_loss": 1.3277825117111206,
130
- "eval_runtime": 58.4038,
131
- "eval_samples_per_second": 8.373,
132
- "eval_steps_per_second": 8.373,
133
  "step": 12
134
  },
135
  {
136
- "epoch": 0.005604656175899978,
137
- "grad_norm": 24.090688705444336,
138
- "learning_rate": 0.00018090169943749476,
139
- "loss": 2.1082,
140
  "step": 13
141
  },
142
  {
143
- "epoch": 0.00603578357404613,
144
- "grad_norm": 54.64119338989258,
145
- "learning_rate": 0.00016691306063588583,
146
- "loss": 1.25,
147
  "step": 14
148
  },
149
  {
150
- "epoch": 0.006466910972192283,
151
- "grad_norm": 26.83595848083496,
152
- "learning_rate": 0.00015000000000000001,
153
- "loss": 1.1507,
154
  "step": 15
155
  },
156
  {
157
- "epoch": 0.006466910972192283,
158
- "eval_loss": 0.8006857633590698,
159
- "eval_runtime": 58.359,
160
- "eval_samples_per_second": 8.379,
161
- "eval_steps_per_second": 8.379,
162
- "step": 15
163
  },
164
  {
165
- "epoch": 0.006898038370338435,
166
- "grad_norm": 18.02296257019043,
167
- "learning_rate": 0.00013090169943749476,
168
- "loss": 0.9997,
 
169
  "step": 16
170
  },
171
  {
172
- "epoch": 0.007329165768484587,
173
- "grad_norm": 21.693586349487305,
174
- "learning_rate": 0.00011045284632676536,
175
- "loss": 0.8036,
176
  "step": 17
177
  },
178
  {
179
- "epoch": 0.007760293166630739,
180
- "grad_norm": 24.909374237060547,
181
- "learning_rate": 8.954715367323468e-05,
182
- "loss": 0.8566,
183
- "step": 18
184
- },
185
- {
186
- "epoch": 0.007760293166630739,
187
- "eval_loss": 0.6211725473403931,
188
- "eval_runtime": 58.3093,
189
- "eval_samples_per_second": 8.386,
190
- "eval_steps_per_second": 8.386,
191
  "step": 18
192
  },
193
  {
194
- "epoch": 0.008191420564776892,
195
- "grad_norm": 16.71841049194336,
196
- "learning_rate": 6.909830056250527e-05,
197
- "loss": 0.3249,
198
  "step": 19
199
  },
200
  {
201
- "epoch": 0.008622547962923045,
202
- "grad_norm": 11.01509952545166,
203
- "learning_rate": 5.000000000000002e-05,
204
- "loss": 0.3442,
205
  "step": 20
206
  },
207
  {
208
- "epoch": 0.009053675361069196,
209
- "grad_norm": 10.443573951721191,
210
- "learning_rate": 3.308693936411421e-05,
211
- "loss": 0.4075,
212
- "step": 21
 
213
  },
214
  {
215
- "epoch": 0.009053675361069196,
216
- "eval_loss": 0.8346744775772095,
217
- "eval_runtime": 58.344,
218
- "eval_samples_per_second": 8.381,
219
- "eval_steps_per_second": 8.381,
220
  "step": 21
221
  },
222
  {
223
- "epoch": 0.009484802759215347,
224
- "grad_norm": 19.735132217407227,
225
- "learning_rate": 1.9098300562505266e-05,
226
- "loss": 1.2175,
227
  "step": 22
228
  },
229
  {
230
- "epoch": 0.0099159301573615,
231
- "grad_norm": 15.83383846282959,
232
- "learning_rate": 8.645454235739903e-06,
233
- "loss": 0.9296,
234
  "step": 23
235
  },
236
  {
237
- "epoch": 0.010347057555507652,
238
- "grad_norm": 8.399575233459473,
239
- "learning_rate": 2.1852399266194314e-06,
240
- "loss": 0.8743,
241
  "step": 24
242
  },
243
  {
244
- "epoch": 0.010347057555507652,
245
- "eval_loss": 0.8398098349571228,
246
- "eval_runtime": 58.4574,
247
- "eval_samples_per_second": 8.365,
248
- "eval_steps_per_second": 8.365,
249
  "step": 24
250
  },
251
  {
252
- "epoch": 0.010778184953653805,
253
- "grad_norm": 19.13532066345215,
254
- "learning_rate": 0.0,
255
- "loss": 0.8357,
256
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  }
258
  ],
259
  "logging_steps": 1,
260
- "max_steps": 25,
261
  "num_input_tokens_seen": 0,
262
  "num_train_epochs": 1,
263
- "save_steps": 3,
264
  "stateful_callbacks": {
265
  "TrainerControl": {
266
  "args": {
@@ -268,13 +353,13 @@
268
  "should_evaluate": false,
269
  "should_log": false,
270
  "should_save": true,
271
- "should_training_stop": true
272
  },
273
  "attributes": {}
274
  }
275
  },
276
- "total_flos": 9247821240729600.0,
277
- "train_batch_size": 1,
278
  "trial_name": null,
279
  "trial_params": null
280
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.031041172666522957,
5
+ "eval_steps": 4,
6
+ "global_step": 36,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0008622547962923044,
13
+ "grad_norm": 18.349796295166016,
14
  "learning_rate": 2e-05,
15
+ "loss": 12.2382,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.0008622547962923044,
20
+ "eval_loss": 12.783363342285156,
21
+ "eval_runtime": 42.0314,
22
+ "eval_samples_per_second": 11.634,
23
+ "eval_steps_per_second": 5.829,
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.0017245095925846087,
28
+ "grad_norm": 18.404314041137695,
29
  "learning_rate": 4e-05,
30
+ "loss": 12.818,
31
  "step": 2
32
  },
33
  {
34
+ "epoch": 0.002586764388876913,
35
+ "grad_norm": 18.81452178955078,
36
  "learning_rate": 6e-05,
37
+ "loss": 12.2314,
38
  "step": 3
39
  },
40
  {
41
+ "epoch": 0.0034490191851692174,
42
+ "grad_norm": 18.826942443847656,
43
+ "learning_rate": 8e-05,
44
+ "loss": 12.0138,
45
+ "step": 4
 
46
  },
47
  {
48
+ "epoch": 0.0034490191851692174,
49
+ "eval_loss": 10.487162590026855,
50
+ "eval_runtime": 41.3199,
51
+ "eval_samples_per_second": 11.834,
52
+ "eval_steps_per_second": 5.929,
53
  "step": 4
54
  },
55
  {
56
+ "epoch": 0.004311273981461522,
57
+ "grad_norm": 23.916542053222656,
58
  "learning_rate": 0.0001,
59
+ "loss": 10.8756,
60
  "step": 5
61
  },
62
  {
63
+ "epoch": 0.005173528777753826,
64
+ "grad_norm": 25.37953758239746,
65
  "learning_rate": 0.00012,
66
+ "loss": 7.9933,
67
  "step": 6
68
  },
69
  {
70
+ "epoch": 0.00603578357404613,
71
+ "grad_norm": 36.603450775146484,
 
 
 
 
 
 
 
 
72
  "learning_rate": 0.00014,
73
+ "loss": 6.8124,
74
  "step": 7
75
  },
76
  {
77
+ "epoch": 0.006898038370338435,
78
+ "grad_norm": 39.93467712402344,
79
  "learning_rate": 0.00016,
80
+ "loss": 4.8259,
81
  "step": 8
82
  },
83
  {
84
+ "epoch": 0.006898038370338435,
85
+ "eval_loss": 1.5156302452087402,
86
+ "eval_runtime": 41.4263,
87
+ "eval_samples_per_second": 11.804,
88
+ "eval_steps_per_second": 5.914,
89
+ "step": 8
90
  },
91
  {
92
+ "epoch": 0.007760293166630739,
93
+ "grad_norm": 24.483657836914062,
94
+ "learning_rate": 0.00018,
95
+ "loss": 1.6681,
 
96
  "step": 9
97
  },
98
  {
99
+ "epoch": 0.008622547962923045,
100
+ "grad_norm": 20.87933921813965,
101
  "learning_rate": 0.0002,
102
+ "loss": 1.6076,
103
  "step": 10
104
  },
105
  {
106
+ "epoch": 0.009484802759215347,
107
+ "grad_norm": 28.462326049804688,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 0.9284,
110
  "step": 11
111
  },
112
  {
113
+ "epoch": 0.010347057555507652,
114
+ "grad_norm": 7.382307529449463,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 0.5719,
117
  "step": 12
118
  },
119
  {
120
+ "epoch": 0.010347057555507652,
121
+ "eval_loss": 0.8780114650726318,
122
+ "eval_runtime": 41.4971,
123
+ "eval_samples_per_second": 11.784,
124
+ "eval_steps_per_second": 5.904,
125
  "step": 12
126
  },
127
  {
128
+ "epoch": 0.011209312351799956,
129
+ "grad_norm": 23.78351402282715,
130
+ "learning_rate": 0.00019723699203976766,
131
+ "loss": 1.0036,
132
  "step": 13
133
  },
134
  {
135
+ "epoch": 0.01207156714809226,
136
+ "grad_norm": 6.90433406829834,
137
+ "learning_rate": 0.00019510565162951537,
138
+ "loss": 0.6748,
139
  "step": 14
140
  },
141
  {
142
+ "epoch": 0.012933821944384565,
143
+ "grad_norm": 12.185566902160645,
144
+ "learning_rate": 0.0001923879532511287,
145
+ "loss": 0.6046,
146
  "step": 15
147
  },
148
  {
149
+ "epoch": 0.01379607674067687,
150
+ "grad_norm": 19.96064567565918,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 0.9423,
153
+ "step": 16
 
154
  },
155
  {
156
+ "epoch": 0.01379607674067687,
157
+ "eval_loss": 0.5733368992805481,
158
+ "eval_runtime": 41.5222,
159
+ "eval_samples_per_second": 11.777,
160
+ "eval_steps_per_second": 5.9,
161
  "step": 16
162
  },
163
  {
164
+ "epoch": 0.014658331536969174,
165
+ "grad_norm": 13.482269287109375,
166
+ "learning_rate": 0.00018526401643540922,
167
+ "loss": 0.4834,
168
  "step": 17
169
  },
170
  {
171
+ "epoch": 0.015520586333261479,
172
+ "grad_norm": 22.92808723449707,
173
+ "learning_rate": 0.00018090169943749476,
174
+ "loss": 0.9821,
 
 
 
 
 
 
 
 
175
  "step": 18
176
  },
177
  {
178
+ "epoch": 0.016382841129553783,
179
+ "grad_norm": 19.03671646118164,
180
+ "learning_rate": 0.0001760405965600031,
181
+ "loss": 0.8987,
182
  "step": 19
183
  },
184
  {
185
+ "epoch": 0.01724509592584609,
186
+ "grad_norm": 7.670022487640381,
187
+ "learning_rate": 0.00017071067811865476,
188
+ "loss": 0.4543,
189
  "step": 20
190
  },
191
  {
192
+ "epoch": 0.01724509592584609,
193
+ "eval_loss": 0.8335480093955994,
194
+ "eval_runtime": 41.5035,
195
+ "eval_samples_per_second": 11.782,
196
+ "eval_steps_per_second": 5.903,
197
+ "step": 20
198
  },
199
  {
200
+ "epoch": 0.018107350722138392,
201
+ "grad_norm": 16.6032657623291,
202
+ "learning_rate": 0.00016494480483301836,
203
+ "loss": 0.8973,
 
204
  "step": 21
205
  },
206
  {
207
+ "epoch": 0.018969605518430695,
208
+ "grad_norm": 7.4585185050964355,
209
+ "learning_rate": 0.00015877852522924732,
210
+ "loss": 0.4621,
211
  "step": 22
212
  },
213
  {
214
+ "epoch": 0.019831860314723,
215
+ "grad_norm": 6.364231586456299,
216
+ "learning_rate": 0.0001522498564715949,
217
+ "loss": 0.6574,
218
  "step": 23
219
  },
220
  {
221
+ "epoch": 0.020694115111015304,
222
+ "grad_norm": 20.26422882080078,
223
+ "learning_rate": 0.00014539904997395468,
224
+ "loss": 1.3692,
225
  "step": 24
226
  },
227
  {
228
+ "epoch": 0.020694115111015304,
229
+ "eval_loss": 1.0001345872879028,
230
+ "eval_runtime": 41.524,
231
+ "eval_samples_per_second": 11.776,
232
+ "eval_steps_per_second": 5.9,
233
  "step": 24
234
  },
235
  {
236
+ "epoch": 0.02155636990730761,
237
+ "grad_norm": 7.076972961425781,
238
+ "learning_rate": 0.000138268343236509,
239
+ "loss": 0.3212,
240
  "step": 25
241
+ },
242
+ {
243
+ "epoch": 0.022418624703599913,
244
+ "grad_norm": 6.530824661254883,
245
+ "learning_rate": 0.00013090169943749476,
246
+ "loss": 0.6942,
247
+ "step": 26
248
+ },
249
+ {
250
+ "epoch": 0.02328087949989222,
251
+ "grad_norm": 7.959903717041016,
252
+ "learning_rate": 0.00012334453638559057,
253
+ "loss": 0.4833,
254
+ "step": 27
255
+ },
256
+ {
257
+ "epoch": 0.02414313429618452,
258
+ "grad_norm": 10.797367095947266,
259
+ "learning_rate": 0.0001156434465040231,
260
+ "loss": 0.9116,
261
+ "step": 28
262
+ },
263
+ {
264
+ "epoch": 0.02414313429618452,
265
+ "eval_loss": 0.6466512680053711,
266
+ "eval_runtime": 41.5138,
267
+ "eval_samples_per_second": 11.779,
268
+ "eval_steps_per_second": 5.902,
269
+ "step": 28
270
+ },
271
+ {
272
+ "epoch": 0.025005389092476828,
273
+ "grad_norm": 7.114670276641846,
274
+ "learning_rate": 0.0001078459095727845,
275
+ "loss": 0.5407,
276
+ "step": 29
277
+ },
278
+ {
279
+ "epoch": 0.02586764388876913,
280
+ "grad_norm": 3.2049789428710938,
281
+ "learning_rate": 0.0001,
282
+ "loss": 0.4781,
283
+ "step": 30
284
+ },
285
+ {
286
+ "epoch": 0.026729898685061437,
287
+ "grad_norm": 9.187115669250488,
288
+ "learning_rate": 9.215409042721552e-05,
289
+ "loss": 0.591,
290
+ "step": 31
291
+ },
292
+ {
293
+ "epoch": 0.02759215348135374,
294
+ "grad_norm": 5.867708206176758,
295
+ "learning_rate": 8.435655349597689e-05,
296
+ "loss": 0.7579,
297
+ "step": 32
298
+ },
299
+ {
300
+ "epoch": 0.02759215348135374,
301
+ "eval_loss": 0.8243234753608704,
302
+ "eval_runtime": 41.5067,
303
+ "eval_samples_per_second": 11.781,
304
+ "eval_steps_per_second": 5.903,
305
+ "step": 32
306
+ },
307
+ {
308
+ "epoch": 0.028454408277646046,
309
+ "grad_norm": 10.666952133178711,
310
+ "learning_rate": 7.66554636144095e-05,
311
+ "loss": 0.9866,
312
+ "step": 33
313
+ },
314
+ {
315
+ "epoch": 0.029316663073938348,
316
+ "grad_norm": 4.174076080322266,
317
+ "learning_rate": 6.909830056250527e-05,
318
+ "loss": 0.6182,
319
+ "step": 34
320
+ },
321
+ {
322
+ "epoch": 0.030178917870230654,
323
+ "grad_norm": 3.677931070327759,
324
+ "learning_rate": 6.173165676349103e-05,
325
+ "loss": 0.5182,
326
+ "step": 35
327
+ },
328
+ {
329
+ "epoch": 0.031041172666522957,
330
+ "grad_norm": 3.110746145248413,
331
+ "learning_rate": 5.4600950026045326e-05,
332
+ "loss": 0.4857,
333
+ "step": 36
334
+ },
335
+ {
336
+ "epoch": 0.031041172666522957,
337
+ "eval_loss": 0.6007174849510193,
338
+ "eval_runtime": 41.5227,
339
+ "eval_samples_per_second": 11.777,
340
+ "eval_steps_per_second": 5.9,
341
+ "step": 36
342
  }
343
  ],
344
  "logging_steps": 1,
345
+ "max_steps": 50,
346
  "num_input_tokens_seen": 0,
347
  "num_train_epochs": 1,
348
+ "save_steps": 4,
349
  "stateful_callbacks": {
350
  "TrainerControl": {
351
  "args": {
 
353
  "should_evaluate": false,
354
  "should_log": false,
355
  "should_save": true,
356
+ "should_training_stop": false
357
  },
358
  "attributes": {}
359
  }
360
  },
361
+ "total_flos": 1.3353971104088064e+16,
362
+ "train_batch_size": 2,
363
  "trial_name": null,
364
  "trial_params": null
365
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1c25b60f410e4ec79e489229656c7b2a433ea4b96521a9df498c8e899096118
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2068f42f79a870a63a6397e16dda8eebb4ed67ad19583a418d7733a958a03a3d
3
  size 6776