jssky commited on
Commit
5420456
·
verified ·
1 Parent(s): e5815e5

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce9ab012b2435054515222b428ef387b9e6f98c46c724ee8ef8953c6f898169f
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeb86b74d105d4e79f949e4b506c0e24e56db7c18257a55591ab699b32fabfbe
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6705d8e13ba90074ee3767cc501c627581d4484441b6ed891b473051fbfd165
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2beac5dd7166dec0027b98a7ee7c28b0845069d773aff1ba2a4f41b502d8097
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9139ef683d0432374ed32f1de3527bc4c0bf626fb2c0c40bfc8ea85176ed9e89
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e850f5fdf20368267b5e472efa301d9f55b0207ff3a6b5471fc8ce196639b8b4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d359eb5d29e75fb2bbe5b7026981da69b95b8ad1fea469302d13cde104f7e8a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ddb9588ea654e56e83effcf81a2bc03480954babcf6415cb44d41d3bfb8039f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.937868058681488,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.1890359168241966,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -365,6 +365,364 @@
365
  "eval_samples_per_second": 18.669,
366
  "eval_steps_per_second": 4.688,
367
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  }
369
  ],
370
  "logging_steps": 1,
@@ -393,7 +751,7 @@
393
  "attributes": {}
394
  }
395
  },
396
- "total_flos": 7.15499809800192e+16,
397
  "train_batch_size": 8,
398
  "trial_name": null,
399
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8648684620857239,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.3780718336483932,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
365
  "eval_samples_per_second": 18.669,
366
  "eval_steps_per_second": 4.688,
367
  "step": 50
368
+ },
369
+ {
370
+ "epoch": 0.19281663516068054,
371
+ "grad_norm": 19.0587158203125,
372
+ "learning_rate": 8.894386393810563e-05,
373
+ "loss": 1.0027,
374
+ "step": 51
375
+ },
376
+ {
377
+ "epoch": 0.19659735349716445,
378
+ "grad_norm": 10.542956352233887,
379
+ "learning_rate": 8.842005554284296e-05,
380
+ "loss": 0.9294,
381
+ "step": 52
382
+ },
383
+ {
384
+ "epoch": 0.2003780718336484,
385
+ "grad_norm": 10.02282428741455,
386
+ "learning_rate": 8.788574348801675e-05,
387
+ "loss": 0.9812,
388
+ "step": 53
389
+ },
390
+ {
391
+ "epoch": 0.20415879017013233,
392
+ "grad_norm": 15.777267456054688,
393
+ "learning_rate": 8.73410738492077e-05,
394
+ "loss": 0.8996,
395
+ "step": 54
396
+ },
397
+ {
398
+ "epoch": 0.20793950850661624,
399
+ "grad_norm": 9.673592567443848,
400
+ "learning_rate": 8.678619553365659e-05,
401
+ "loss": 0.9667,
402
+ "step": 55
403
+ },
404
+ {
405
+ "epoch": 0.21172022684310018,
406
+ "grad_norm": 8.31556510925293,
407
+ "learning_rate": 8.622126023955446e-05,
408
+ "loss": 0.8437,
409
+ "step": 56
410
+ },
411
+ {
412
+ "epoch": 0.21550094517958412,
413
+ "grad_norm": 8.866250038146973,
414
+ "learning_rate": 8.564642241456986e-05,
415
+ "loss": 0.8598,
416
+ "step": 57
417
+ },
418
+ {
419
+ "epoch": 0.21928166351606806,
420
+ "grad_norm": 8.169628143310547,
421
+ "learning_rate": 8.506183921362443e-05,
422
+ "loss": 0.8421,
423
+ "step": 58
424
+ },
425
+ {
426
+ "epoch": 0.22306238185255198,
427
+ "grad_norm": 7.955894947052002,
428
+ "learning_rate": 8.44676704559283e-05,
429
+ "loss": 0.78,
430
+ "step": 59
431
+ },
432
+ {
433
+ "epoch": 0.22684310018903592,
434
+ "grad_norm": 9.585834503173828,
435
+ "learning_rate": 8.386407858128706e-05,
436
+ "loss": 0.8945,
437
+ "step": 60
438
+ },
439
+ {
440
+ "epoch": 0.23062381852551986,
441
+ "grad_norm": 10.034743309020996,
442
+ "learning_rate": 8.32512286056924e-05,
443
+ "loss": 0.866,
444
+ "step": 61
445
+ },
446
+ {
447
+ "epoch": 0.23440453686200377,
448
+ "grad_norm": 9.564702033996582,
449
+ "learning_rate": 8.262928807620843e-05,
450
+ "loss": 0.9144,
451
+ "step": 62
452
+ },
453
+ {
454
+ "epoch": 0.2381852551984877,
455
+ "grad_norm": 8.519525527954102,
456
+ "learning_rate": 8.199842702516583e-05,
457
+ "loss": 0.7744,
458
+ "step": 63
459
+ },
460
+ {
461
+ "epoch": 0.24196597353497165,
462
+ "grad_norm": 9.579198837280273,
463
+ "learning_rate": 8.135881792367686e-05,
464
+ "loss": 0.9486,
465
+ "step": 64
466
+ },
467
+ {
468
+ "epoch": 0.24574669187145556,
469
+ "grad_norm": 8.82320785522461,
470
+ "learning_rate": 8.07106356344834e-05,
471
+ "loss": 0.834,
472
+ "step": 65
473
+ },
474
+ {
475
+ "epoch": 0.2495274102079395,
476
+ "grad_norm": 25.461153030395508,
477
+ "learning_rate": 8.005405736415126e-05,
478
+ "loss": 0.8452,
479
+ "step": 66
480
+ },
481
+ {
482
+ "epoch": 0.2533081285444234,
483
+ "grad_norm": 9.499732971191406,
484
+ "learning_rate": 7.938926261462366e-05,
485
+ "loss": 0.8343,
486
+ "step": 67
487
+ },
488
+ {
489
+ "epoch": 0.2570888468809074,
490
+ "grad_norm": 9.04475212097168,
491
+ "learning_rate": 7.871643313414718e-05,
492
+ "loss": 0.8387,
493
+ "step": 68
494
+ },
495
+ {
496
+ "epoch": 0.2608695652173913,
497
+ "grad_norm": 9.186306953430176,
498
+ "learning_rate": 7.803575286758364e-05,
499
+ "loss": 0.8606,
500
+ "step": 69
501
+ },
502
+ {
503
+ "epoch": 0.2646502835538752,
504
+ "grad_norm": 8.687911033630371,
505
+ "learning_rate": 7.734740790612136e-05,
506
+ "loss": 0.8632,
507
+ "step": 70
508
+ },
509
+ {
510
+ "epoch": 0.2684310018903592,
511
+ "grad_norm": 9.258337020874023,
512
+ "learning_rate": 7.66515864363997e-05,
513
+ "loss": 0.9112,
514
+ "step": 71
515
+ },
516
+ {
517
+ "epoch": 0.2722117202268431,
518
+ "grad_norm": 9.268692016601562,
519
+ "learning_rate": 7.594847868906076e-05,
520
+ "loss": 0.7968,
521
+ "step": 72
522
+ },
523
+ {
524
+ "epoch": 0.27599243856332706,
525
+ "grad_norm": 9.389679908752441,
526
+ "learning_rate": 7.52382768867422e-05,
527
+ "loss": 0.8604,
528
+ "step": 73
529
+ },
530
+ {
531
+ "epoch": 0.27977315689981097,
532
+ "grad_norm": 8.71633243560791,
533
+ "learning_rate": 7.452117519152542e-05,
534
+ "loss": 0.8564,
535
+ "step": 74
536
+ },
537
+ {
538
+ "epoch": 0.2835538752362949,
539
+ "grad_norm": 8.666472434997559,
540
+ "learning_rate": 7.379736965185368e-05,
541
+ "loss": 0.8845,
542
+ "step": 75
543
+ },
544
+ {
545
+ "epoch": 0.28733459357277885,
546
+ "grad_norm": 8.605114936828613,
547
+ "learning_rate": 7.30670581489344e-05,
548
+ "loss": 0.857,
549
+ "step": 76
550
+ },
551
+ {
552
+ "epoch": 0.29111531190926276,
553
+ "grad_norm": 9.577071189880371,
554
+ "learning_rate": 7.233044034264034e-05,
555
+ "loss": 0.8482,
556
+ "step": 77
557
+ },
558
+ {
559
+ "epoch": 0.2948960302457467,
560
+ "grad_norm": 9.829218864440918,
561
+ "learning_rate": 7.158771761692464e-05,
562
+ "loss": 0.8762,
563
+ "step": 78
564
+ },
565
+ {
566
+ "epoch": 0.29867674858223064,
567
+ "grad_norm": 8.901524543762207,
568
+ "learning_rate": 7.083909302476453e-05,
569
+ "loss": 0.8257,
570
+ "step": 79
571
+ },
572
+ {
573
+ "epoch": 0.30245746691871456,
574
+ "grad_norm": 8.700294494628906,
575
+ "learning_rate": 7.008477123264848e-05,
576
+ "loss": 0.8635,
577
+ "step": 80
578
+ },
579
+ {
580
+ "epoch": 0.30623818525519847,
581
+ "grad_norm": 8.539888381958008,
582
+ "learning_rate": 6.932495846462261e-05,
583
+ "loss": 0.845,
584
+ "step": 81
585
+ },
586
+ {
587
+ "epoch": 0.31001890359168244,
588
+ "grad_norm": 8.737793922424316,
589
+ "learning_rate": 6.855986244591104e-05,
590
+ "loss": 0.7761,
591
+ "step": 82
592
+ },
593
+ {
594
+ "epoch": 0.31379962192816635,
595
+ "grad_norm": 10.041154861450195,
596
+ "learning_rate": 6.778969234612584e-05,
597
+ "loss": 0.78,
598
+ "step": 83
599
+ },
600
+ {
601
+ "epoch": 0.31758034026465026,
602
+ "grad_norm": 10.532041549682617,
603
+ "learning_rate": 6.701465872208216e-05,
604
+ "loss": 0.8732,
605
+ "step": 84
606
+ },
607
+ {
608
+ "epoch": 0.32136105860113423,
609
+ "grad_norm": 10.783872604370117,
610
+ "learning_rate": 6.623497346023418e-05,
611
+ "loss": 0.8432,
612
+ "step": 85
613
+ },
614
+ {
615
+ "epoch": 0.32514177693761814,
616
+ "grad_norm": 8.684202194213867,
617
+ "learning_rate": 6.545084971874738e-05,
618
+ "loss": 0.7461,
619
+ "step": 86
620
+ },
621
+ {
622
+ "epoch": 0.32892249527410206,
623
+ "grad_norm": 12.615586280822754,
624
+ "learning_rate": 6.466250186922325e-05,
625
+ "loss": 0.852,
626
+ "step": 87
627
+ },
628
+ {
629
+ "epoch": 0.332703213610586,
630
+ "grad_norm": 13.347943305969238,
631
+ "learning_rate": 6.387014543809223e-05,
632
+ "loss": 0.9457,
633
+ "step": 88
634
+ },
635
+ {
636
+ "epoch": 0.33648393194706994,
637
+ "grad_norm": 11.482415199279785,
638
+ "learning_rate": 6.307399704769099e-05,
639
+ "loss": 0.847,
640
+ "step": 89
641
+ },
642
+ {
643
+ "epoch": 0.34026465028355385,
644
+ "grad_norm": 9.020286560058594,
645
+ "learning_rate": 6.227427435703997e-05,
646
+ "loss": 0.8173,
647
+ "step": 90
648
+ },
649
+ {
650
+ "epoch": 0.3440453686200378,
651
+ "grad_norm": 9.47546100616455,
652
+ "learning_rate": 6.147119600233758e-05,
653
+ "loss": 0.8804,
654
+ "step": 91
655
+ },
656
+ {
657
+ "epoch": 0.34782608695652173,
658
+ "grad_norm": 9.721048355102539,
659
+ "learning_rate": 6.066498153718735e-05,
660
+ "loss": 0.8527,
661
+ "step": 92
662
+ },
663
+ {
664
+ "epoch": 0.3516068052930057,
665
+ "grad_norm": 10.509419441223145,
666
+ "learning_rate": 5.985585137257401e-05,
667
+ "loss": 0.8958,
668
+ "step": 93
669
+ },
670
+ {
671
+ "epoch": 0.3553875236294896,
672
+ "grad_norm": 10.52464771270752,
673
+ "learning_rate": 5.90440267166055e-05,
674
+ "loss": 0.9308,
675
+ "step": 94
676
+ },
677
+ {
678
+ "epoch": 0.3591682419659735,
679
+ "grad_norm": 12.20820426940918,
680
+ "learning_rate": 5.8229729514036705e-05,
681
+ "loss": 0.7969,
682
+ "step": 95
683
+ },
684
+ {
685
+ "epoch": 0.3629489603024575,
686
+ "grad_norm": 10.435171127319336,
687
+ "learning_rate": 5.74131823855921e-05,
688
+ "loss": 0.8355,
689
+ "step": 96
690
+ },
691
+ {
692
+ "epoch": 0.3667296786389414,
693
+ "grad_norm": 10.847406387329102,
694
+ "learning_rate": 5.6594608567103456e-05,
695
+ "loss": 0.8236,
696
+ "step": 97
697
+ },
698
+ {
699
+ "epoch": 0.3705103969754253,
700
+ "grad_norm": 10.55009937286377,
701
+ "learning_rate": 5.577423184847932e-05,
702
+ "loss": 0.7918,
703
+ "step": 98
704
+ },
705
+ {
706
+ "epoch": 0.3742911153119093,
707
+ "grad_norm": 10.11078929901123,
708
+ "learning_rate": 5.495227651252315e-05,
709
+ "loss": 0.7335,
710
+ "step": 99
711
+ },
712
+ {
713
+ "epoch": 0.3780718336483932,
714
+ "grad_norm": 15.384814262390137,
715
+ "learning_rate": 5.4128967273616625e-05,
716
+ "loss": 0.8123,
717
+ "step": 100
718
+ },
719
+ {
720
+ "epoch": 0.3780718336483932,
721
+ "eval_loss": 0.8648684620857239,
722
+ "eval_runtime": 23.8735,
723
+ "eval_samples_per_second": 18.682,
724
+ "eval_steps_per_second": 4.691,
725
+ "step": 100
726
  }
727
  ],
728
  "logging_steps": 1,
 
751
  "attributes": {}
752
  }
753
  },
754
+ "total_flos": 1.430999619600384e+17,
755
  "train_batch_size": 8,
756
  "trial_name": null,
757
  "trial_params": null