0x1202 commited on
Commit
36d3d92
·
verified ·
1 Parent(s): d40cd3f

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9d7de647e4c6f332e094c49c5d6b795a3f4aa967451300864dabddc85eadd0c
3
  size 34456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cac74c2c7e9d67fed29cdc6a69799bdf778b9535cf087abcb36af8e5495a2f6
3
  size 34456
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d67f7312ee1a2ac669e07cd702a9d826ced6b957bcc380eef00e2085c188f24
3
  size 73222
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cd3fbebec7d703af29a90d4a07ddebefd5c333596d59f4b53ca2975c87a71d
3
  size 73222
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da883aff0d21feb833dee03c00a1a3377cc9508141e1acf34a259db62540bcb0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8611bed02f3437ea1d683eea6dbfa256e3e6311ca35f6d10da816f6e661eebf9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d359eb5d29e75fb2bbe5b7026981da69b95b8ad1fea469302d13cde104f7e8a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 11.5,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.005293526017680377,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -373,6 +373,722 @@
373
  "eval_samples_per_second": 220.929,
374
  "eval_steps_per_second": 55.232,
375
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  }
377
  ],
378
  "logging_steps": 1,
@@ -387,7 +1103,7 @@
387
  "early_stopping_threshold": 0.0
388
  },
389
  "attributes": {
390
- "early_stopping_patience_counter": 0
391
  }
392
  },
393
  "TrainerControl": {
@@ -401,7 +1117,7 @@
401
  "attributes": {}
402
  }
403
  },
404
- "total_flos": 8079813771264.0,
405
  "train_batch_size": 8,
406
  "trial_name": null,
407
  "trial_params": null
 
1
  {
2
  "best_metric": 11.5,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.01588057805304113,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
373
  "eval_samples_per_second": 220.929,
374
  "eval_steps_per_second": 55.232,
375
  "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.0053993965380339844,
379
+ "grad_norm": 0.00015319549129344523,
380
+ "learning_rate": 8.894386393810563e-05,
381
+ "loss": 46.0,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.005505267058387592,
386
+ "grad_norm": 0.00019435409922152758,
387
+ "learning_rate": 8.842005554284296e-05,
388
+ "loss": 46.0,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.0056111375787411995,
393
+ "grad_norm": 0.00020844223035965115,
394
+ "learning_rate": 8.788574348801675e-05,
395
+ "loss": 46.0,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.005717008099094807,
400
+ "grad_norm": 0.00020513297931756824,
401
+ "learning_rate": 8.73410738492077e-05,
402
+ "loss": 46.0,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.0058228786194484145,
407
+ "grad_norm": 0.0001638914691284299,
408
+ "learning_rate": 8.678619553365659e-05,
409
+ "loss": 46.0,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.005928749139802022,
414
+ "grad_norm": 0.00020096043590456247,
415
+ "learning_rate": 8.622126023955446e-05,
416
+ "loss": 46.0,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.0060346196601556296,
421
+ "grad_norm": 0.00023176470131147653,
422
+ "learning_rate": 8.564642241456986e-05,
423
+ "loss": 46.0,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.006140490180509237,
428
+ "grad_norm": 0.0001855547889135778,
429
+ "learning_rate": 8.506183921362443e-05,
430
+ "loss": 46.0,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.006246360700862845,
435
+ "grad_norm": 0.0001982215471798554,
436
+ "learning_rate": 8.44676704559283e-05,
437
+ "loss": 46.0,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 0.006352231221216452,
442
+ "grad_norm": 0.0001941195223480463,
443
+ "learning_rate": 8.386407858128706e-05,
444
+ "loss": 46.0,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 0.00645810174157006,
449
+ "grad_norm": 0.00019479637558106333,
450
+ "learning_rate": 8.32512286056924e-05,
451
+ "loss": 46.0,
452
+ "step": 61
453
+ },
454
+ {
455
+ "epoch": 0.006563972261923667,
456
+ "grad_norm": 0.0001838829048210755,
457
+ "learning_rate": 8.262928807620843e-05,
458
+ "loss": 46.0,
459
+ "step": 62
460
+ },
461
+ {
462
+ "epoch": 0.006669842782277275,
463
+ "grad_norm": 0.00016903660434763879,
464
+ "learning_rate": 8.199842702516583e-05,
465
+ "loss": 46.0,
466
+ "step": 63
467
+ },
468
+ {
469
+ "epoch": 0.006775713302630882,
470
+ "grad_norm": 0.00019123349920846522,
471
+ "learning_rate": 8.135881792367686e-05,
472
+ "loss": 46.0,
473
+ "step": 64
474
+ },
475
+ {
476
+ "epoch": 0.00688158382298449,
477
+ "grad_norm": 0.0002044424181804061,
478
+ "learning_rate": 8.07106356344834e-05,
479
+ "loss": 46.0,
480
+ "step": 65
481
+ },
482
+ {
483
+ "epoch": 0.006987454343338097,
484
+ "grad_norm": 0.00026173770311288536,
485
+ "learning_rate": 8.005405736415126e-05,
486
+ "loss": 46.0,
487
+ "step": 66
488
+ },
489
+ {
490
+ "epoch": 0.007093324863691705,
491
+ "grad_norm": 0.00011461394024081528,
492
+ "learning_rate": 7.938926261462366e-05,
493
+ "loss": 46.0,
494
+ "step": 67
495
+ },
496
+ {
497
+ "epoch": 0.007199195384045312,
498
+ "grad_norm": 0.0001901618525153026,
499
+ "learning_rate": 7.871643313414718e-05,
500
+ "loss": 46.0,
501
+ "step": 68
502
+ },
503
+ {
504
+ "epoch": 0.00730506590439892,
505
+ "grad_norm": 0.00021410213958006352,
506
+ "learning_rate": 7.803575286758364e-05,
507
+ "loss": 46.0,
508
+ "step": 69
509
+ },
510
+ {
511
+ "epoch": 0.007410936424752527,
512
+ "grad_norm": 0.00018753486801870167,
513
+ "learning_rate": 7.734740790612136e-05,
514
+ "loss": 46.0,
515
+ "step": 70
516
+ },
517
+ {
518
+ "epoch": 0.007516806945106135,
519
+ "grad_norm": 0.0002480893163010478,
520
+ "learning_rate": 7.66515864363997e-05,
521
+ "loss": 46.0,
522
+ "step": 71
523
+ },
524
+ {
525
+ "epoch": 0.007622677465459742,
526
+ "grad_norm": 0.0002523521543480456,
527
+ "learning_rate": 7.594847868906076e-05,
528
+ "loss": 46.0,
529
+ "step": 72
530
+ },
531
+ {
532
+ "epoch": 0.00772854798581335,
533
+ "grad_norm": 0.00020747345115523785,
534
+ "learning_rate": 7.52382768867422e-05,
535
+ "loss": 46.0,
536
+ "step": 73
537
+ },
538
+ {
539
+ "epoch": 0.007834418506166958,
540
+ "grad_norm": 0.00020068875164724886,
541
+ "learning_rate": 7.452117519152542e-05,
542
+ "loss": 46.0,
543
+ "step": 74
544
+ },
545
+ {
546
+ "epoch": 0.007940289026520565,
547
+ "grad_norm": 0.00020946303266100585,
548
+ "learning_rate": 7.379736965185368e-05,
549
+ "loss": 46.0,
550
+ "step": 75
551
+ },
552
+ {
553
+ "epoch": 0.008046159546874173,
554
+ "grad_norm": 0.00015278464707080275,
555
+ "learning_rate": 7.30670581489344e-05,
556
+ "loss": 46.0,
557
+ "step": 76
558
+ },
559
+ {
560
+ "epoch": 0.00815203006722778,
561
+ "grad_norm": 0.00016136947670020163,
562
+ "learning_rate": 7.233044034264034e-05,
563
+ "loss": 46.0,
564
+ "step": 77
565
+ },
566
+ {
567
+ "epoch": 0.008257900587581388,
568
+ "grad_norm": 0.0001631510822335258,
569
+ "learning_rate": 7.158771761692464e-05,
570
+ "loss": 46.0,
571
+ "step": 78
572
+ },
573
+ {
574
+ "epoch": 0.008363771107934995,
575
+ "grad_norm": 0.0001458908518543467,
576
+ "learning_rate": 7.083909302476453e-05,
577
+ "loss": 46.0,
578
+ "step": 79
579
+ },
580
+ {
581
+ "epoch": 0.008469641628288603,
582
+ "grad_norm": 0.00016608608711976558,
583
+ "learning_rate": 7.008477123264848e-05,
584
+ "loss": 46.0,
585
+ "step": 80
586
+ },
587
+ {
588
+ "epoch": 0.00857551214864221,
589
+ "grad_norm": 0.00013964880781713873,
590
+ "learning_rate": 6.932495846462261e-05,
591
+ "loss": 46.0,
592
+ "step": 81
593
+ },
594
+ {
595
+ "epoch": 0.008681382668995818,
596
+ "grad_norm": 0.00013575328921433538,
597
+ "learning_rate": 6.855986244591104e-05,
598
+ "loss": 46.0,
599
+ "step": 82
600
+ },
601
+ {
602
+ "epoch": 0.008787253189349425,
603
+ "grad_norm": 0.00019137118943035603,
604
+ "learning_rate": 6.778969234612584e-05,
605
+ "loss": 46.0,
606
+ "step": 83
607
+ },
608
+ {
609
+ "epoch": 0.008893123709703033,
610
+ "grad_norm": 0.00020755836158059537,
611
+ "learning_rate": 6.701465872208216e-05,
612
+ "loss": 46.0,
613
+ "step": 84
614
+ },
615
+ {
616
+ "epoch": 0.00899899423005664,
617
+ "grad_norm": 0.00029875306063331664,
618
+ "learning_rate": 6.623497346023418e-05,
619
+ "loss": 46.0,
620
+ "step": 85
621
+ },
622
+ {
623
+ "epoch": 0.009104864750410249,
624
+ "grad_norm": 0.0003985251532867551,
625
+ "learning_rate": 6.545084971874738e-05,
626
+ "loss": 46.0,
627
+ "step": 86
628
+ },
629
+ {
630
+ "epoch": 0.009210735270763855,
631
+ "grad_norm": 0.0007180428947322071,
632
+ "learning_rate": 6.466250186922325e-05,
633
+ "loss": 46.0,
634
+ "step": 87
635
+ },
636
+ {
637
+ "epoch": 0.009316605791117464,
638
+ "grad_norm": 0.002547956071794033,
639
+ "learning_rate": 6.387014543809223e-05,
640
+ "loss": 46.0,
641
+ "step": 88
642
+ },
643
+ {
644
+ "epoch": 0.00942247631147107,
645
+ "grad_norm": 0.002583778463304043,
646
+ "learning_rate": 6.307399704769099e-05,
647
+ "loss": 46.0,
648
+ "step": 89
649
+ },
650
+ {
651
+ "epoch": 0.009528346831824679,
652
+ "grad_norm": 0.0024923228193074465,
653
+ "learning_rate": 6.227427435703997e-05,
654
+ "loss": 46.0,
655
+ "step": 90
656
+ },
657
+ {
658
+ "epoch": 0.009634217352178285,
659
+ "grad_norm": 0.0021992200054228306,
660
+ "learning_rate": 6.147119600233758e-05,
661
+ "loss": 46.0,
662
+ "step": 91
663
+ },
664
+ {
665
+ "epoch": 0.009740087872531894,
666
+ "grad_norm": 0.001998371910303831,
667
+ "learning_rate": 6.066498153718735e-05,
668
+ "loss": 46.0,
669
+ "step": 92
670
+ },
671
+ {
672
+ "epoch": 0.0098459583928855,
673
+ "grad_norm": 0.002456718124449253,
674
+ "learning_rate": 5.985585137257401e-05,
675
+ "loss": 46.0,
676
+ "step": 93
677
+ },
678
+ {
679
+ "epoch": 0.009951828913239109,
680
+ "grad_norm": 0.0027659297920763493,
681
+ "learning_rate": 5.90440267166055e-05,
682
+ "loss": 46.0,
683
+ "step": 94
684
+ },
685
+ {
686
+ "epoch": 0.010057699433592715,
687
+ "grad_norm": 0.002772964769974351,
688
+ "learning_rate": 5.8229729514036705e-05,
689
+ "loss": 46.0,
690
+ "step": 95
691
+ },
692
+ {
693
+ "epoch": 0.010163569953946324,
694
+ "grad_norm": 0.0028121722862124443,
695
+ "learning_rate": 5.74131823855921e-05,
696
+ "loss": 46.0,
697
+ "step": 96
698
+ },
699
+ {
700
+ "epoch": 0.01026944047429993,
701
+ "grad_norm": 0.002786785364151001,
702
+ "learning_rate": 5.6594608567103456e-05,
703
+ "loss": 46.0,
704
+ "step": 97
705
+ },
706
+ {
707
+ "epoch": 0.010375310994653539,
708
+ "grad_norm": 0.0026966389268636703,
709
+ "learning_rate": 5.577423184847932e-05,
710
+ "loss": 46.0,
711
+ "step": 98
712
+ },
713
+ {
714
+ "epoch": 0.010481181515007145,
715
+ "grad_norm": 0.003058412577956915,
716
+ "learning_rate": 5.495227651252315e-05,
717
+ "loss": 46.0,
718
+ "step": 99
719
+ },
720
+ {
721
+ "epoch": 0.010587052035360754,
722
+ "grad_norm": 0.0024895125534385443,
723
+ "learning_rate": 5.4128967273616625e-05,
724
+ "loss": 46.0,
725
+ "step": 100
726
+ },
727
+ {
728
+ "epoch": 0.010587052035360754,
729
+ "eval_loss": 11.5,
730
+ "eval_runtime": 72.0658,
731
+ "eval_samples_per_second": 220.743,
732
+ "eval_steps_per_second": 55.186,
733
+ "step": 100
734
+ },
735
+ {
736
+ "epoch": 0.01069292255571436,
737
+ "grad_norm": 0.0003552868729457259,
738
+ "learning_rate": 5.330452921628497e-05,
739
+ "loss": 46.0,
740
+ "step": 101
741
+ },
742
+ {
743
+ "epoch": 0.010798793076067969,
744
+ "grad_norm": 0.0003323951968923211,
745
+ "learning_rate": 5.247918773366112e-05,
746
+ "loss": 46.0,
747
+ "step": 102
748
+ },
749
+ {
750
+ "epoch": 0.010904663596421577,
751
+ "grad_norm": 0.0002966799074783921,
752
+ "learning_rate": 5.165316846586541e-05,
753
+ "loss": 46.0,
754
+ "step": 103
755
+ },
756
+ {
757
+ "epoch": 0.011010534116775184,
758
+ "grad_norm": 0.0003673431056085974,
759
+ "learning_rate": 5.0826697238317935e-05,
760
+ "loss": 46.0,
761
+ "step": 104
762
+ },
763
+ {
764
+ "epoch": 0.011116404637128792,
765
+ "grad_norm": 0.00031036778818815947,
766
+ "learning_rate": 5e-05,
767
+ "loss": 46.0,
768
+ "step": 105
769
+ },
770
+ {
771
+ "epoch": 0.011222275157482399,
772
+ "grad_norm": 0.00028706042212434113,
773
+ "learning_rate": 4.917330276168208e-05,
774
+ "loss": 46.0,
775
+ "step": 106
776
+ },
777
+ {
778
+ "epoch": 0.011328145677836007,
779
+ "grad_norm": 0.0002792067243717611,
780
+ "learning_rate": 4.834683153413459e-05,
781
+ "loss": 46.0,
782
+ "step": 107
783
+ },
784
+ {
785
+ "epoch": 0.011434016198189614,
786
+ "grad_norm": 0.00030040412093512714,
787
+ "learning_rate": 4.7520812266338885e-05,
788
+ "loss": 46.0,
789
+ "step": 108
790
+ },
791
+ {
792
+ "epoch": 0.011539886718543222,
793
+ "grad_norm": 0.00028762564761564136,
794
+ "learning_rate": 4.669547078371504e-05,
795
+ "loss": 46.0,
796
+ "step": 109
797
+ },
798
+ {
799
+ "epoch": 0.011645757238896829,
800
+ "grad_norm": 0.0003190697461832315,
801
+ "learning_rate": 4.5871032726383386e-05,
802
+ "loss": 46.0,
803
+ "step": 110
804
+ },
805
+ {
806
+ "epoch": 0.011751627759250437,
807
+ "grad_norm": 0.0003825913299806416,
808
+ "learning_rate": 4.504772348747687e-05,
809
+ "loss": 46.0,
810
+ "step": 111
811
+ },
812
+ {
813
+ "epoch": 0.011857498279604044,
814
+ "grad_norm": 0.0003889522049576044,
815
+ "learning_rate": 4.4225768151520694e-05,
816
+ "loss": 46.0,
817
+ "step": 112
818
+ },
819
+ {
820
+ "epoch": 0.011963368799957652,
821
+ "grad_norm": 0.000495815824251622,
822
+ "learning_rate": 4.3405391432896555e-05,
823
+ "loss": 46.0,
824
+ "step": 113
825
+ },
826
+ {
827
+ "epoch": 0.012069239320311259,
828
+ "grad_norm": 0.0003524029743857682,
829
+ "learning_rate": 4.2586817614407895e-05,
830
+ "loss": 46.0,
831
+ "step": 114
832
+ },
833
+ {
834
+ "epoch": 0.012175109840664868,
835
+ "grad_norm": 0.0003622378862928599,
836
+ "learning_rate": 4.17702704859633e-05,
837
+ "loss": 46.0,
838
+ "step": 115
839
+ },
840
+ {
841
+ "epoch": 0.012280980361018474,
842
+ "grad_norm": 0.00025719942641444504,
843
+ "learning_rate": 4.095597328339452e-05,
844
+ "loss": 46.0,
845
+ "step": 116
846
+ },
847
+ {
848
+ "epoch": 0.012386850881372083,
849
+ "grad_norm": 0.0003259727091062814,
850
+ "learning_rate": 4.0144148627425993e-05,
851
+ "loss": 46.0,
852
+ "step": 117
853
+ },
854
+ {
855
+ "epoch": 0.01249272140172569,
856
+ "grad_norm": 0.0002925765002146363,
857
+ "learning_rate": 3.933501846281267e-05,
858
+ "loss": 46.0,
859
+ "step": 118
860
+ },
861
+ {
862
+ "epoch": 0.012598591922079298,
863
+ "grad_norm": 0.0003228446585126221,
864
+ "learning_rate": 3.852880399766243e-05,
865
+ "loss": 46.0,
866
+ "step": 119
867
+ },
868
+ {
869
+ "epoch": 0.012704462442432904,
870
+ "grad_norm": 0.0003316564834676683,
871
+ "learning_rate": 3.772572564296005e-05,
872
+ "loss": 46.0,
873
+ "step": 120
874
+ },
875
+ {
876
+ "epoch": 0.012810332962786513,
877
+ "grad_norm": 0.0002721836499404162,
878
+ "learning_rate": 3.6926002952309016e-05,
879
+ "loss": 46.0,
880
+ "step": 121
881
+ },
882
+ {
883
+ "epoch": 0.01291620348314012,
884
+ "grad_norm": 0.00028851881506852806,
885
+ "learning_rate": 3.612985456190778e-05,
886
+ "loss": 46.0,
887
+ "step": 122
888
+ },
889
+ {
890
+ "epoch": 0.013022074003493728,
891
+ "grad_norm": 0.0002666217915248126,
892
+ "learning_rate": 3.533749813077677e-05,
893
+ "loss": 46.0,
894
+ "step": 123
895
+ },
896
+ {
897
+ "epoch": 0.013127944523847334,
898
+ "grad_norm": 0.000259630149230361,
899
+ "learning_rate": 3.4549150281252636e-05,
900
+ "loss": 46.0,
901
+ "step": 124
902
+ },
903
+ {
904
+ "epoch": 0.013233815044200943,
905
+ "grad_norm": 0.0003889918443746865,
906
+ "learning_rate": 3.3765026539765834e-05,
907
+ "loss": 46.0,
908
+ "step": 125
909
+ },
910
+ {
911
+ "epoch": 0.01333968556455455,
912
+ "grad_norm": 0.0003328875172883272,
913
+ "learning_rate": 3.298534127791785e-05,
914
+ "loss": 46.0,
915
+ "step": 126
916
+ },
917
+ {
918
+ "epoch": 0.013445556084908158,
919
+ "grad_norm": 0.00018043341697193682,
920
+ "learning_rate": 3.221030765387417e-05,
921
+ "loss": 46.0,
922
+ "step": 127
923
+ },
924
+ {
925
+ "epoch": 0.013551426605261764,
926
+ "grad_norm": 0.0003149941621813923,
927
+ "learning_rate": 3.144013755408895e-05,
928
+ "loss": 46.0,
929
+ "step": 128
930
+ },
931
+ {
932
+ "epoch": 0.013657297125615373,
933
+ "grad_norm": 0.00025322509463876486,
934
+ "learning_rate": 3.0675041535377405e-05,
935
+ "loss": 46.0,
936
+ "step": 129
937
+ },
938
+ {
939
+ "epoch": 0.01376316764596898,
940
+ "grad_norm": 0.00027760243392549455,
941
+ "learning_rate": 2.991522876735154e-05,
942
+ "loss": 46.0,
943
+ "step": 130
944
+ },
945
+ {
946
+ "epoch": 0.013869038166322588,
947
+ "grad_norm": 0.00023040082305669785,
948
+ "learning_rate": 2.916090697523549e-05,
949
+ "loss": 46.0,
950
+ "step": 131
951
+ },
952
+ {
953
+ "epoch": 0.013974908686676194,
954
+ "grad_norm": 0.0002616914571262896,
955
+ "learning_rate": 2.8412282383075363e-05,
956
+ "loss": 46.0,
957
+ "step": 132
958
+ },
959
+ {
960
+ "epoch": 0.014080779207029803,
961
+ "grad_norm": 0.00021648740221280605,
962
+ "learning_rate": 2.766955965735968e-05,
963
+ "loss": 46.0,
964
+ "step": 133
965
+ },
966
+ {
967
+ "epoch": 0.01418664972738341,
968
+ "grad_norm": 0.00030988408252596855,
969
+ "learning_rate": 2.693294185106562e-05,
970
+ "loss": 46.0,
971
+ "step": 134
972
+ },
973
+ {
974
+ "epoch": 0.014292520247737018,
975
+ "grad_norm": 0.000268768664682284,
976
+ "learning_rate": 2.6202630348146324e-05,
977
+ "loss": 46.0,
978
+ "step": 135
979
+ },
980
+ {
981
+ "epoch": 0.014398390768090625,
982
+ "grad_norm": 0.0004775602137669921,
983
+ "learning_rate": 2.547882480847461e-05,
984
+ "loss": 46.0,
985
+ "step": 136
986
+ },
987
+ {
988
+ "epoch": 0.014504261288444233,
989
+ "grad_norm": 0.0006808377802371979,
990
+ "learning_rate": 2.476172311325783e-05,
991
+ "loss": 46.0,
992
+ "step": 137
993
+ },
994
+ {
995
+ "epoch": 0.01461013180879784,
996
+ "grad_norm": 0.0011323445942252874,
997
+ "learning_rate": 2.405152131093926e-05,
998
+ "loss": 46.0,
999
+ "step": 138
1000
+ },
1001
+ {
1002
+ "epoch": 0.014716002329151448,
1003
+ "grad_norm": 0.00297493115067482,
1004
+ "learning_rate": 2.3348413563600325e-05,
1005
+ "loss": 46.0,
1006
+ "step": 139
1007
+ },
1008
+ {
1009
+ "epoch": 0.014821872849505055,
1010
+ "grad_norm": 0.0038632601499557495,
1011
+ "learning_rate": 2.2652592093878666e-05,
1012
+ "loss": 46.0,
1013
+ "step": 140
1014
+ },
1015
+ {
1016
+ "epoch": 0.014927743369858663,
1017
+ "grad_norm": 0.003049037652090192,
1018
+ "learning_rate": 2.196424713241637e-05,
1019
+ "loss": 46.0,
1020
+ "step": 141
1021
+ },
1022
+ {
1023
+ "epoch": 0.01503361389021227,
1024
+ "grad_norm": 0.004032890312373638,
1025
+ "learning_rate": 2.128356686585282e-05,
1026
+ "loss": 46.0,
1027
+ "step": 142
1028
+ },
1029
+ {
1030
+ "epoch": 0.015139484410565878,
1031
+ "grad_norm": 0.003995910286903381,
1032
+ "learning_rate": 2.061073738537635e-05,
1033
+ "loss": 46.0,
1034
+ "step": 143
1035
+ },
1036
+ {
1037
+ "epoch": 0.015245354930919485,
1038
+ "grad_norm": 0.003903838573023677,
1039
+ "learning_rate": 1.9945942635848748e-05,
1040
+ "loss": 46.0,
1041
+ "step": 144
1042
+ },
1043
+ {
1044
+ "epoch": 0.015351225451273093,
1045
+ "grad_norm": 0.0038895534817129374,
1046
+ "learning_rate": 1.928936436551661e-05,
1047
+ "loss": 46.0,
1048
+ "step": 145
1049
+ },
1050
+ {
1051
+ "epoch": 0.0154570959716267,
1052
+ "grad_norm": 0.003806990571320057,
1053
+ "learning_rate": 1.8641182076323148e-05,
1054
+ "loss": 46.0,
1055
+ "step": 146
1056
+ },
1057
+ {
1058
+ "epoch": 0.015562966491980308,
1059
+ "grad_norm": 0.003817790187895298,
1060
+ "learning_rate": 1.800157297483417e-05,
1061
+ "loss": 46.0,
1062
+ "step": 147
1063
+ },
1064
+ {
1065
+ "epoch": 0.015668837012333917,
1066
+ "grad_norm": 0.003742322325706482,
1067
+ "learning_rate": 1.7370711923791567e-05,
1068
+ "loss": 46.0,
1069
+ "step": 148
1070
+ },
1071
+ {
1072
+ "epoch": 0.01577470753268752,
1073
+ "grad_norm": 0.0038418227341026068,
1074
+ "learning_rate": 1.6748771394307585e-05,
1075
+ "loss": 46.0,
1076
+ "step": 149
1077
+ },
1078
+ {
1079
+ "epoch": 0.01588057805304113,
1080
+ "grad_norm": 0.0033004693686962128,
1081
+ "learning_rate": 1.6135921418712956e-05,
1082
+ "loss": 46.0,
1083
+ "step": 150
1084
+ },
1085
+ {
1086
+ "epoch": 0.01588057805304113,
1087
+ "eval_loss": 11.5,
1088
+ "eval_runtime": 72.372,
1089
+ "eval_samples_per_second": 219.809,
1090
+ "eval_steps_per_second": 54.952,
1091
+ "step": 150
1092
  }
1093
  ],
1094
  "logging_steps": 1,
 
1103
  "early_stopping_threshold": 0.0
1104
  },
1105
  "attributes": {
1106
+ "early_stopping_patience_counter": 2
1107
  }
1108
  },
1109
  "TrainerControl": {
 
1117
  "attributes": {}
1118
  }
1119
  },
1120
+ "total_flos": 24239441313792.0,
1121
  "train_batch_size": 8,
1122
  "trial_name": null,
1123
  "trial_params": null