error577 commited on
Commit
d12a398
·
verified ·
1 Parent(s): ab7e226

Training in progress, step 220, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfd977cf7243d9c1f34b62b57b65ae2c467137aae625dd14db8a6cd1df7e4639
3
  size 1579384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:379e4bf4bb490ea0b90d9b33347844b50130394e712de1211c8399dedb57115c
3
  size 1579384
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:171b376fd04807724ef479a4221c327ea4709dcca15adfcf9297e572211e8b8b
3
  size 857274
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3ce2007ab70312728b054080dcbdfaa104c9b8bebf880c9e3901634ded32319
3
  size 857274
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76709219306dc47d70628efaf5fcbc74b4416a92813b7b4f1d01168bd10c9f7c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76842eb4fcb2a51cac39c00a5e5559ba4d358a8862a4a20b442cc1d7861de163
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c822dec639f0641927f6a0448fd2ae65913fdbdae3d08ed0701aa491ca071f0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fc887555ce5286f8ed809bc469842a587a9d7ef2ff84cb87e5fa06e8f663830
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.13206223432792705,
5
  "eval_steps": 20,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1495,6 +1495,154 @@
1495
  "eval_samples_per_second": 75.365,
1496
  "eval_steps_per_second": 75.365,
1497
  "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498
  }
1499
  ],
1500
  "logging_steps": 1,
@@ -1514,7 +1662,7 @@
1514
  "attributes": {}
1515
  }
1516
  },
1517
- "total_flos": 129748728545280.0,
1518
  "train_batch_size": 1,
1519
  "trial_name": null,
1520
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.14526845776071973,
5
  "eval_steps": 20,
6
+ "global_step": 220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1495
  "eval_samples_per_second": 75.365,
1496
  "eval_steps_per_second": 75.365,
1497
  "step": 200
1498
+ },
1499
+ {
1500
+ "epoch": 0.13272254549956666,
1501
+ "grad_norm": 3305.528076171875,
1502
+ "learning_rate": 7.830721146206451e-05,
1503
+ "loss": 92.4619,
1504
+ "step": 201
1505
+ },
1506
+ {
1507
+ "epoch": 0.1333828566712063,
1508
+ "grad_norm": 22266.435546875,
1509
+ "learning_rate": 7.688410249570214e-05,
1510
+ "loss": 90.5834,
1511
+ "step": 202
1512
+ },
1513
+ {
1514
+ "epoch": 0.13404316784284595,
1515
+ "grad_norm": 26988.037109375,
1516
+ "learning_rate": 7.54695740040912e-05,
1517
+ "loss": 86.903,
1518
+ "step": 203
1519
+ },
1520
+ {
1521
+ "epoch": 0.13470347901448557,
1522
+ "grad_norm": 31513.767578125,
1523
+ "learning_rate": 7.406379198842189e-05,
1524
+ "loss": 95.0657,
1525
+ "step": 204
1526
+ },
1527
+ {
1528
+ "epoch": 0.1353637901861252,
1529
+ "grad_norm": 28028.66796875,
1530
+ "learning_rate": 7.266692142344672e-05,
1531
+ "loss": 94.906,
1532
+ "step": 205
1533
+ },
1534
+ {
1535
+ "epoch": 0.13602410135776485,
1536
+ "grad_norm": 36479.84375,
1537
+ "learning_rate": 7.127912623811993e-05,
1538
+ "loss": 101.7062,
1539
+ "step": 206
1540
+ },
1541
+ {
1542
+ "epoch": 0.13668441252940447,
1543
+ "grad_norm": 17603.3515625,
1544
+ "learning_rate": 6.990056929635957e-05,
1545
+ "loss": 97.2723,
1546
+ "step": 207
1547
+ },
1548
+ {
1549
+ "epoch": 0.1373447237010441,
1550
+ "grad_norm": 29691.853515625,
1551
+ "learning_rate": 6.853141237793506e-05,
1552
+ "loss": 103.2447,
1553
+ "step": 208
1554
+ },
1555
+ {
1556
+ "epoch": 0.13800503487268376,
1557
+ "grad_norm": 37476.609375,
1558
+ "learning_rate": 6.717181615948126e-05,
1559
+ "loss": 96.3164,
1560
+ "step": 209
1561
+ },
1562
+ {
1563
+ "epoch": 0.1386653460443234,
1564
+ "grad_norm": 34801.02734375,
1565
+ "learning_rate": 6.582194019564266e-05,
1566
+ "loss": 92.4449,
1567
+ "step": 210
1568
+ },
1569
+ {
1570
+ "epoch": 0.13932565721596302,
1571
+ "grad_norm": 16438.6015625,
1572
+ "learning_rate": 6.448194290034848e-05,
1573
+ "loss": 98.2269,
1574
+ "step": 211
1575
+ },
1576
+ {
1577
+ "epoch": 0.13998596838760266,
1578
+ "grad_norm": 39508.171875,
1579
+ "learning_rate": 6.315198152822272e-05,
1580
+ "loss": 86.2956,
1581
+ "step": 212
1582
+ },
1583
+ {
1584
+ "epoch": 0.1406462795592423,
1585
+ "grad_norm": 25744.484375,
1586
+ "learning_rate": 6.183221215612904e-05,
1587
+ "loss": 92.17,
1588
+ "step": 213
1589
+ },
1590
+ {
1591
+ "epoch": 0.14130659073088192,
1592
+ "grad_norm": 23628.517578125,
1593
+ "learning_rate": 6.052278966485491e-05,
1594
+ "loss": 84.7765,
1595
+ "step": 214
1596
+ },
1597
+ {
1598
+ "epoch": 0.14196690190252156,
1599
+ "grad_norm": 58147.26953125,
1600
+ "learning_rate": 5.922386772093526e-05,
1601
+ "loss": 98.1311,
1602
+ "step": 215
1603
+ },
1604
+ {
1605
+ "epoch": 0.1426272130741612,
1606
+ "grad_norm": 39249.59765625,
1607
+ "learning_rate": 5.793559875861938e-05,
1608
+ "loss": 85.3396,
1609
+ "step": 216
1610
+ },
1611
+ {
1612
+ "epoch": 0.14328752424580082,
1613
+ "grad_norm": 29692.048828125,
1614
+ "learning_rate": 5.6658133961981894e-05,
1615
+ "loss": 91.5496,
1616
+ "step": 217
1617
+ },
1618
+ {
1619
+ "epoch": 0.14394783541744047,
1620
+ "grad_norm": 49323.46875,
1621
+ "learning_rate": 5.5391623247180744e-05,
1622
+ "loss": 84.2652,
1623
+ "step": 218
1624
+ },
1625
+ {
1626
+ "epoch": 0.1446081465890801,
1627
+ "grad_norm": 43771.90234375,
1628
+ "learning_rate": 5.413621524486363e-05,
1629
+ "loss": 97.1517,
1630
+ "step": 219
1631
+ },
1632
+ {
1633
+ "epoch": 0.14526845776071973,
1634
+ "grad_norm": 37146.6796875,
1635
+ "learning_rate": 5.289205728272586e-05,
1636
+ "loss": 88.1685,
1637
+ "step": 220
1638
+ },
1639
+ {
1640
+ "epoch": 0.14526845776071973,
1641
+ "eval_loss": 9.690956115722656,
1642
+ "eval_runtime": 6.5198,
1643
+ "eval_samples_per_second": 75.923,
1644
+ "eval_steps_per_second": 75.923,
1645
+ "step": 220
1646
  }
1647
  ],
1648
  "logging_steps": 1,
 
1662
  "attributes": {}
1663
  }
1664
  },
1665
+ "total_flos": 145520653762560.0,
1666
  "train_batch_size": 1,
1667
  "trial_name": null,
1668
  "trial_params": null