eddysang commited on
Commit
cc20e98
·
verified ·
1 Parent(s): 2b8dac9

Training in progress, step 85, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fec14f6a01d61272bacee525bac8b5fbe0d1cc5cfe555534176434a00372067
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f220adf6445faa3084119deeca55d2d287f74f40538df7e9218a2aeee61a809a
3
  size 640009682
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c72d9f3ca2f0dee1e321d24a425433f913801087616a40c32e828d954463375
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ee335a62849ee2439378e9094b922b6e35c167d78196a6c3c8df14be4850d15
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f4e77fd2a3bb3f08929494d77da2f57f8781f91a45852bcf8f71a5777dd088c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b383ad1d61ff4e9bbd86bd276c043e414782d2bb7de68ada3e289a786eb79681
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07472014284733192,
5
  "eval_steps": 50,
6
- "global_step": 68,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -499,6 +499,125 @@
499
  "learning_rate": 0.00012518479547691435,
500
  "loss": 0.0,
501
  "step": 68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502
  }
503
  ],
504
  "logging_steps": 1,
@@ -518,7 +637,7 @@
518
  "attributes": {}
519
  }
520
  },
521
- "total_flos": 4.0051762314765926e+17,
522
  "train_batch_size": 2,
523
  "trial_name": null,
524
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0934001785591649,
5
  "eval_steps": 50,
6
+ "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
499
  "learning_rate": 0.00012518479547691435,
500
  "loss": 0.0,
501
  "step": 68
502
+ },
503
+ {
504
+ "epoch": 0.07581896847743974,
505
+ "grad_norm": NaN,
506
+ "learning_rate": 0.00012420442717428804,
507
+ "loss": 0.0,
508
+ "step": 69
509
+ },
510
+ {
511
+ "epoch": 0.07691779410754755,
512
+ "grad_norm": NaN,
513
+ "learning_rate": 0.00012320907072649044,
514
+ "loss": 0.0,
515
+ "step": 70
516
+ },
517
+ {
518
+ "epoch": 0.07801661973765538,
519
+ "grad_norm": NaN,
520
+ "learning_rate": 0.0001221990293287378,
521
+ "loss": 0.0,
522
+ "step": 71
523
+ },
524
+ {
525
+ "epoch": 0.0791154453677632,
526
+ "grad_norm": NaN,
527
+ "learning_rate": 0.00012117461064942435,
528
+ "loss": 0.0,
529
+ "step": 72
530
+ },
531
+ {
532
+ "epoch": 0.08021427099787103,
533
+ "grad_norm": NaN,
534
+ "learning_rate": 0.00012013612673640363,
535
+ "loss": 0.0,
536
+ "step": 73
537
+ },
538
+ {
539
+ "epoch": 0.08131309662797885,
540
+ "grad_norm": NaN,
541
+ "learning_rate": 0.00011908389392193547,
542
+ "loss": 0.0,
543
+ "step": 74
544
+ },
545
+ {
546
+ "epoch": 0.08241192225808668,
547
+ "grad_norm": NaN,
548
+ "learning_rate": 0.00011801823272632844,
549
+ "loss": 0.0,
550
+ "step": 75
551
+ },
552
+ {
553
+ "epoch": 0.08351074788819449,
554
+ "grad_norm": NaN,
555
+ "learning_rate": 0.00011693946776030599,
556
+ "loss": 0.0,
557
+ "step": 76
558
+ },
559
+ {
560
+ "epoch": 0.08460957351830231,
561
+ "grad_norm": NaN,
562
+ "learning_rate": 0.00011584792762612703,
563
+ "loss": 0.0,
564
+ "step": 77
565
+ },
566
+ {
567
+ "epoch": 0.08570839914841014,
568
+ "grad_norm": NaN,
569
+ "learning_rate": 0.00011474394481749035,
570
+ "loss": 0.0,
571
+ "step": 78
572
+ },
573
+ {
574
+ "epoch": 0.08680722477851796,
575
+ "grad_norm": NaN,
576
+ "learning_rate": 0.00011362785561825406,
577
+ "loss": 0.0,
578
+ "step": 79
579
+ },
580
+ {
581
+ "epoch": 0.08790605040862579,
582
+ "grad_norm": NaN,
583
+ "learning_rate": 0.0001125,
584
+ "loss": 0.0,
585
+ "step": 80
586
+ },
587
+ {
588
+ "epoch": 0.08900487603873361,
589
+ "grad_norm": NaN,
590
+ "learning_rate": 0.00011136072151847529,
591
+ "loss": 0.0,
592
+ "step": 81
593
+ },
594
+ {
595
+ "epoch": 0.09010370166884142,
596
+ "grad_norm": NaN,
597
+ "learning_rate": 0.00011021036720894179,
598
+ "loss": 0.0,
599
+ "step": 82
600
+ },
601
+ {
602
+ "epoch": 0.09120252729894925,
603
+ "grad_norm": NaN,
604
+ "learning_rate": 0.00010904928748046599,
605
+ "loss": 0.0,
606
+ "step": 83
607
+ },
608
+ {
609
+ "epoch": 0.09230135292905707,
610
+ "grad_norm": NaN,
611
+ "learning_rate": 0.0001078778360091808,
612
+ "loss": 0.0,
613
+ "step": 84
614
+ },
615
+ {
616
+ "epoch": 0.0934001785591649,
617
+ "grad_norm": NaN,
618
+ "learning_rate": 0.00010669636963055245,
619
+ "loss": 0.0,
620
+ "step": 85
621
  }
622
  ],
623
  "logging_steps": 1,
 
637
  "attributes": {}
638
  }
639
  },
640
+ "total_flos": 4.978120149832827e+17,
641
  "train_batch_size": 2,
642
  "trial_name": null,
643
  "trial_params": null