eddysang commited on
Commit
1a59d14
·
verified ·
1 Parent(s): 8b4712a

Training in progress, step 187, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63d64bb4252a56462612ce8ea325bbb4f7eb1e476a12210a5f3bc4adabecffe5
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7cf0bd9a85d6e3b1dfba727a272bb5ec1b34ad233dbee1759f9c3dc7db4c616
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4beda4b81812c494724c944ce42a1384386d898438a92682ca41d1363ea7cd53
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130097d8b9a1531563be49666bf68e868a3488e44a4aa76718c4310d857b5354
3
  size 671466706
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d2cc8b4da1c1e828f3ecd6fdcd0ddcc7555dae3d2812a0ace74e33cfe7a8a86
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfcd98a6fb5e6805c0c88270935cb6243951ff273b1a434ad4ff4ae8b2aadba3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9a138d58efee4722b14f409e14731dcd0935c0eaa8a6078a4918744b9529bab
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f9ab26294f369ba5d99cf6e2514a5d05f3df1a90b36a045191f6b23077ba365
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7878349022447502,
5
  "eval_steps": 50,
6
- "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1229,6 +1229,125 @@
1229
  "learning_rate": 1.0048094716167095e-05,
1230
  "loss": 0.1441,
1231
  "step": 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232
  }
1233
  ],
1234
  "logging_steps": 1,
@@ -1248,7 +1367,7 @@
1248
  "attributes": {}
1249
  }
1250
  },
1251
- "total_flos": 1.0145742149438669e+18,
1252
  "train_batch_size": 2,
1253
  "trial_name": null,
1254
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.8666183924692252,
5
  "eval_steps": 50,
6
+ "global_step": 187,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1229
  "learning_rate": 1.0048094716167095e-05,
1230
  "loss": 0.1441,
1231
  "step": 170
1232
+ },
1233
+ {
1234
+ "epoch": 0.7924692251991311,
1235
+ "grad_norm": 1.155311942100525,
1236
+ "learning_rate": 9.40352196454532e-06,
1237
+ "loss": 0.1666,
1238
+ "step": 171
1239
+ },
1240
+ {
1241
+ "epoch": 0.7971035481535119,
1242
+ "grad_norm": 1.0911647081375122,
1243
+ "learning_rate": 8.778930535580474e-06,
1244
+ "loss": 0.185,
1245
+ "step": 172
1246
+ },
1247
+ {
1248
+ "epoch": 0.8017378711078929,
1249
+ "grad_norm": 0.9174829721450806,
1250
+ "learning_rate": 8.174510685872415e-06,
1251
+ "loss": 0.1147,
1252
+ "step": 173
1253
+ },
1254
+ {
1255
+ "epoch": 0.8063721940622737,
1256
+ "grad_norm": 1.362585425376892,
1257
+ "learning_rate": 7.5904465275624884e-06,
1258
+ "loss": 0.2452,
1259
+ "step": 174
1260
+ },
1261
+ {
1262
+ "epoch": 0.8110065170166546,
1263
+ "grad_norm": 1.197021722793579,
1264
+ "learning_rate": 7.026915972251254e-06,
1265
+ "loss": 0.2385,
1266
+ "step": 175
1267
+ },
1268
+ {
1269
+ "epoch": 0.8156408399710355,
1270
+ "grad_norm": 1.0614386796951294,
1271
+ "learning_rate": 6.484090676804926e-06,
1272
+ "loss": 0.1824,
1273
+ "step": 176
1274
+ },
1275
+ {
1276
+ "epoch": 0.8202751629254164,
1277
+ "grad_norm": 1.1456388235092163,
1278
+ "learning_rate": 5.962135991066971e-06,
1279
+ "loss": 0.1921,
1280
+ "step": 177
1281
+ },
1282
+ {
1283
+ "epoch": 0.8249094858797973,
1284
+ "grad_norm": 1.2095783948898315,
1285
+ "learning_rate": 5.461210907490951e-06,
1286
+ "loss": 0.1803,
1287
+ "step": 178
1288
+ },
1289
+ {
1290
+ "epoch": 0.8295438088341781,
1291
+ "grad_norm": 1.2167255878448486,
1292
+ "learning_rate": 4.981468012709877e-06,
1293
+ "loss": 0.1777,
1294
+ "step": 179
1295
+ },
1296
+ {
1297
+ "epoch": 0.834178131788559,
1298
+ "grad_norm": 1.2779210805892944,
1299
+ "learning_rate": 4.523053441056876e-06,
1300
+ "loss": 0.2648,
1301
+ "step": 180
1302
+ },
1303
+ {
1304
+ "epoch": 0.8388124547429399,
1305
+ "grad_norm": 1.1896816492080688,
1306
+ "learning_rate": 4.086106830051236e-06,
1307
+ "loss": 0.1531,
1308
+ "step": 181
1309
+ },
1310
+ {
1311
+ "epoch": 0.8434467776973208,
1312
+ "grad_norm": 1.0845763683319092,
1313
+ "learning_rate": 3.670761277863485e-06,
1314
+ "loss": 0.1953,
1315
+ "step": 182
1316
+ },
1317
+ {
1318
+ "epoch": 0.8480811006517016,
1319
+ "grad_norm": 1.338794231414795,
1320
+ "learning_rate": 3.277143302772342e-06,
1321
+ "loss": 0.1878,
1322
+ "step": 183
1323
+ },
1324
+ {
1325
+ "epoch": 0.8527154236060825,
1326
+ "grad_norm": 1.036252737045288,
1327
+ "learning_rate": 2.9053728046260825e-06,
1328
+ "loss": 0.1352,
1329
+ "step": 184
1330
+ },
1331
+ {
1332
+ "epoch": 0.8573497465604635,
1333
+ "grad_norm": 1.3027642965316772,
1334
+ "learning_rate": 2.555563028319885e-06,
1335
+ "loss": 0.2213,
1336
+ "step": 185
1337
+ },
1338
+ {
1339
+ "epoch": 0.8619840695148443,
1340
+ "grad_norm": 1.0671361684799194,
1341
+ "learning_rate": 2.227820529300264e-06,
1342
+ "loss": 0.2172,
1343
+ "step": 186
1344
+ },
1345
+ {
1346
+ "epoch": 0.8666183924692252,
1347
+ "grad_norm": 1.0457696914672852,
1348
+ "learning_rate": 1.9222451411073645e-06,
1349
+ "loss": 0.1704,
1350
+ "step": 187
1351
  }
1352
  ],
1353
  "logging_steps": 1,
 
1367
  "attributes": {}
1368
  }
1369
  },
1370
+ "total_flos": 1.1160316364382536e+18,
1371
  "train_batch_size": 2,
1372
  "trial_name": null,
1373
  "trial_params": null