jssky commited on
Commit
ac59fd8
·
verified ·
1 Parent(s): 7ca1a55

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:461591db7872e4ecdac6196c56f8b6aab48af94f0ab6069bd761f487d07ed4c1
3
  size 34456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04b362914b5c7133654dd72d57d63919b5339b71eccdd33826214a9ef17af52e
3
  size 34456
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89078b1e97eb47b64ad0e935c8987fc35837152950f33d05c08bc6270bc524dc
3
  size 73222
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2b0e35c088e4605efffb6802b52801b63d56dafc30f79b9910fe1b00ec7949
3
  size 73222
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e4fb6b0178c3bc83b9dd957e80de85a87e5d5a5ed1d8c5a5e294f4a1ecab8c7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a741ae0fb1c2fb6cc8d6684f5de03756005725769f6fd5dd9a2e8095d556c2c1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d359eb5d29e75fb2bbe5b7026981da69b95b8ad1fea469302d13cde104f7e8a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ddb9588ea654e56e83effcf81a2bc03480954babcf6415cb44d41d3bfb8039f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 11.5,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.029394473838918283,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -365,6 +365,364 @@
365
  "eval_samples_per_second": 208.315,
366
  "eval_steps_per_second": 52.133,
367
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  }
369
  ],
370
  "logging_steps": 1,
@@ -379,7 +737,7 @@
379
  "early_stopping_threshold": 0.0
380
  },
381
  "attributes": {
382
- "early_stopping_patience_counter": 0
383
  }
384
  },
385
  "TrainerControl": {
@@ -393,7 +751,7 @@
393
  "attributes": {}
394
  }
395
  },
396
- "total_flos": 9285756125184.0,
397
  "train_batch_size": 8,
398
  "trial_name": null,
399
  "trial_params": null
 
1
  {
2
  "best_metric": 11.5,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.058788947677836566,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
365
  "eval_samples_per_second": 208.315,
366
  "eval_steps_per_second": 52.133,
367
  "step": 50
368
+ },
369
+ {
370
+ "epoch": 0.029982363315696647,
371
+ "grad_norm": 8.085326408036053e-05,
372
+ "learning_rate": 8.894386393810563e-05,
373
+ "loss": 11.5,
374
+ "step": 51
375
+ },
376
+ {
377
+ "epoch": 0.030570252792475015,
378
+ "grad_norm": 6.59500656183809e-05,
379
+ "learning_rate": 8.842005554284296e-05,
380
+ "loss": 11.5,
381
+ "step": 52
382
+ },
383
+ {
384
+ "epoch": 0.03115814226925338,
385
+ "grad_norm": 9.271319868275896e-05,
386
+ "learning_rate": 8.788574348801675e-05,
387
+ "loss": 11.5,
388
+ "step": 53
389
+ },
390
+ {
391
+ "epoch": 0.031746031746031744,
392
+ "grad_norm": 7.636727241333574e-05,
393
+ "learning_rate": 8.73410738492077e-05,
394
+ "loss": 11.5,
395
+ "step": 54
396
+ },
397
+ {
398
+ "epoch": 0.03233392122281011,
399
+ "grad_norm": 8.564830932300538e-05,
400
+ "learning_rate": 8.678619553365659e-05,
401
+ "loss": 11.5,
402
+ "step": 55
403
+ },
404
+ {
405
+ "epoch": 0.03292181069958848,
406
+ "grad_norm": 6.765264697605744e-05,
407
+ "learning_rate": 8.622126023955446e-05,
408
+ "loss": 11.5,
409
+ "step": 56
410
+ },
411
+ {
412
+ "epoch": 0.03350970017636684,
413
+ "grad_norm": 8.938118844525889e-05,
414
+ "learning_rate": 8.564642241456986e-05,
415
+ "loss": 11.5,
416
+ "step": 57
417
+ },
418
+ {
419
+ "epoch": 0.03409758965314521,
420
+ "grad_norm": 7.598628872074187e-05,
421
+ "learning_rate": 8.506183921362443e-05,
422
+ "loss": 11.5,
423
+ "step": 58
424
+ },
425
+ {
426
+ "epoch": 0.03468547912992358,
427
+ "grad_norm": 9.138462337432429e-05,
428
+ "learning_rate": 8.44676704559283e-05,
429
+ "loss": 11.5,
430
+ "step": 59
431
+ },
432
+ {
433
+ "epoch": 0.03527336860670194,
434
+ "grad_norm": 9.17932266020216e-05,
435
+ "learning_rate": 8.386407858128706e-05,
436
+ "loss": 11.5,
437
+ "step": 60
438
+ },
439
+ {
440
+ "epoch": 0.035861258083480306,
441
+ "grad_norm": 0.00011113197251688689,
442
+ "learning_rate": 8.32512286056924e-05,
443
+ "loss": 11.5,
444
+ "step": 61
445
+ },
446
+ {
447
+ "epoch": 0.036449147560258674,
448
+ "grad_norm": 0.0001215208467328921,
449
+ "learning_rate": 8.262928807620843e-05,
450
+ "loss": 11.5,
451
+ "step": 62
452
+ },
453
+ {
454
+ "epoch": 0.037037037037037035,
455
+ "grad_norm": 0.00011370470747351646,
456
+ "learning_rate": 8.199842702516583e-05,
457
+ "loss": 11.5,
458
+ "step": 63
459
+ },
460
+ {
461
+ "epoch": 0.0376249265138154,
462
+ "grad_norm": 0.00010293946252204478,
463
+ "learning_rate": 8.135881792367686e-05,
464
+ "loss": 11.5,
465
+ "step": 64
466
+ },
467
+ {
468
+ "epoch": 0.03821281599059377,
469
+ "grad_norm": 0.00010239002585876733,
470
+ "learning_rate": 8.07106356344834e-05,
471
+ "loss": 11.5,
472
+ "step": 65
473
+ },
474
+ {
475
+ "epoch": 0.03880070546737213,
476
+ "grad_norm": 0.00011284730135230348,
477
+ "learning_rate": 8.005405736415126e-05,
478
+ "loss": 11.5,
479
+ "step": 66
480
+ },
481
+ {
482
+ "epoch": 0.0393885949441505,
483
+ "grad_norm": 0.00010800938616739586,
484
+ "learning_rate": 7.938926261462366e-05,
485
+ "loss": 11.5,
486
+ "step": 67
487
+ },
488
+ {
489
+ "epoch": 0.03997648442092887,
490
+ "grad_norm": 0.00012077321298420429,
491
+ "learning_rate": 7.871643313414718e-05,
492
+ "loss": 11.5,
493
+ "step": 68
494
+ },
495
+ {
496
+ "epoch": 0.04056437389770723,
497
+ "grad_norm": 9.716762724565342e-05,
498
+ "learning_rate": 7.803575286758364e-05,
499
+ "loss": 11.5,
500
+ "step": 69
501
+ },
502
+ {
503
+ "epoch": 0.0411522633744856,
504
+ "grad_norm": 9.754186612553895e-05,
505
+ "learning_rate": 7.734740790612136e-05,
506
+ "loss": 11.5,
507
+ "step": 70
508
+ },
509
+ {
510
+ "epoch": 0.041740152851263965,
511
+ "grad_norm": 0.00013152298924978822,
512
+ "learning_rate": 7.66515864363997e-05,
513
+ "loss": 11.5,
514
+ "step": 71
515
+ },
516
+ {
517
+ "epoch": 0.042328042328042326,
518
+ "grad_norm": 0.00013756412954535335,
519
+ "learning_rate": 7.594847868906076e-05,
520
+ "loss": 11.5,
521
+ "step": 72
522
+ },
523
+ {
524
+ "epoch": 0.042915931804820694,
525
+ "grad_norm": 0.00011569081107154489,
526
+ "learning_rate": 7.52382768867422e-05,
527
+ "loss": 11.5,
528
+ "step": 73
529
+ },
530
+ {
531
+ "epoch": 0.04350382128159906,
532
+ "grad_norm": 0.00012273195898160338,
533
+ "learning_rate": 7.452117519152542e-05,
534
+ "loss": 11.5,
535
+ "step": 74
536
+ },
537
+ {
538
+ "epoch": 0.04409171075837742,
539
+ "grad_norm": 0.00012811228225473315,
540
+ "learning_rate": 7.379736965185368e-05,
541
+ "loss": 11.5,
542
+ "step": 75
543
+ },
544
+ {
545
+ "epoch": 0.04467960023515579,
546
+ "grad_norm": 0.00012965736095793545,
547
+ "learning_rate": 7.30670581489344e-05,
548
+ "loss": 11.5,
549
+ "step": 76
550
+ },
551
+ {
552
+ "epoch": 0.04526748971193416,
553
+ "grad_norm": 0.00012737682845909148,
554
+ "learning_rate": 7.233044034264034e-05,
555
+ "loss": 11.5,
556
+ "step": 77
557
+ },
558
+ {
559
+ "epoch": 0.04585537918871252,
560
+ "grad_norm": 0.00015797096421010792,
561
+ "learning_rate": 7.158771761692464e-05,
562
+ "loss": 11.5,
563
+ "step": 78
564
+ },
565
+ {
566
+ "epoch": 0.04644326866549089,
567
+ "grad_norm": 0.00022033862478565425,
568
+ "learning_rate": 7.083909302476453e-05,
569
+ "loss": 11.5,
570
+ "step": 79
571
+ },
572
+ {
573
+ "epoch": 0.047031158142269255,
574
+ "grad_norm": 0.0001866580860223621,
575
+ "learning_rate": 7.008477123264848e-05,
576
+ "loss": 11.5,
577
+ "step": 80
578
+ },
579
+ {
580
+ "epoch": 0.047619047619047616,
581
+ "grad_norm": 0.00018126929353456944,
582
+ "learning_rate": 6.932495846462261e-05,
583
+ "loss": 11.5,
584
+ "step": 81
585
+ },
586
+ {
587
+ "epoch": 0.048206937095825984,
588
+ "grad_norm": 0.00021677513723261654,
589
+ "learning_rate": 6.855986244591104e-05,
590
+ "loss": 11.5,
591
+ "step": 82
592
+ },
593
+ {
594
+ "epoch": 0.04879482657260435,
595
+ "grad_norm": 0.0002002787950914353,
596
+ "learning_rate": 6.778969234612584e-05,
597
+ "loss": 11.5,
598
+ "step": 83
599
+ },
600
+ {
601
+ "epoch": 0.04938271604938271,
602
+ "grad_norm": 0.0001375633291900158,
603
+ "learning_rate": 6.701465872208216e-05,
604
+ "loss": 11.5,
605
+ "step": 84
606
+ },
607
+ {
608
+ "epoch": 0.04997060552616108,
609
+ "grad_norm": 0.0001406855444656685,
610
+ "learning_rate": 6.623497346023418e-05,
611
+ "loss": 11.5,
612
+ "step": 85
613
+ },
614
+ {
615
+ "epoch": 0.05055849500293945,
616
+ "grad_norm": 0.00016629204037599266,
617
+ "learning_rate": 6.545084971874738e-05,
618
+ "loss": 11.5,
619
+ "step": 86
620
+ },
621
+ {
622
+ "epoch": 0.05114638447971781,
623
+ "grad_norm": 0.0001735340483719483,
624
+ "learning_rate": 6.466250186922325e-05,
625
+ "loss": 11.5,
626
+ "step": 87
627
+ },
628
+ {
629
+ "epoch": 0.05173427395649618,
630
+ "grad_norm": 0.0001282371231354773,
631
+ "learning_rate": 6.387014543809223e-05,
632
+ "loss": 11.5,
633
+ "step": 88
634
+ },
635
+ {
636
+ "epoch": 0.052322163433274546,
637
+ "grad_norm": 0.00019439813331700861,
638
+ "learning_rate": 6.307399704769099e-05,
639
+ "loss": 11.5,
640
+ "step": 89
641
+ },
642
+ {
643
+ "epoch": 0.05291005291005291,
644
+ "grad_norm": 0.0002183008036809042,
645
+ "learning_rate": 6.227427435703997e-05,
646
+ "loss": 11.5,
647
+ "step": 90
648
+ },
649
+ {
650
+ "epoch": 0.053497942386831275,
651
+ "grad_norm": 0.0002012772747548297,
652
+ "learning_rate": 6.147119600233758e-05,
653
+ "loss": 11.5,
654
+ "step": 91
655
+ },
656
+ {
657
+ "epoch": 0.05408583186360964,
658
+ "grad_norm": 0.00022741183056496084,
659
+ "learning_rate": 6.066498153718735e-05,
660
+ "loss": 11.5,
661
+ "step": 92
662
+ },
663
+ {
664
+ "epoch": 0.054673721340388004,
665
+ "grad_norm": 0.00018610736879054457,
666
+ "learning_rate": 5.985585137257401e-05,
667
+ "loss": 11.5,
668
+ "step": 93
669
+ },
670
+ {
671
+ "epoch": 0.05526161081716637,
672
+ "grad_norm": 0.00023018394131213427,
673
+ "learning_rate": 5.90440267166055e-05,
674
+ "loss": 11.5,
675
+ "step": 94
676
+ },
677
+ {
678
+ "epoch": 0.05584950029394474,
679
+ "grad_norm": 0.0001871351123554632,
680
+ "learning_rate": 5.8229729514036705e-05,
681
+ "loss": 11.5,
682
+ "step": 95
683
+ },
684
+ {
685
+ "epoch": 0.0564373897707231,
686
+ "grad_norm": 0.00014184312021825463,
687
+ "learning_rate": 5.74131823855921e-05,
688
+ "loss": 11.5,
689
+ "step": 96
690
+ },
691
+ {
692
+ "epoch": 0.05702527924750147,
693
+ "grad_norm": 0.00019797985441982746,
694
+ "learning_rate": 5.6594608567103456e-05,
695
+ "loss": 11.5,
696
+ "step": 97
697
+ },
698
+ {
699
+ "epoch": 0.05761316872427984,
700
+ "grad_norm": 0.0002441562246531248,
701
+ "learning_rate": 5.577423184847932e-05,
702
+ "loss": 11.5,
703
+ "step": 98
704
+ },
705
+ {
706
+ "epoch": 0.0582010582010582,
707
+ "grad_norm": 0.0001735905243549496,
708
+ "learning_rate": 5.495227651252315e-05,
709
+ "loss": 11.5,
710
+ "step": 99
711
+ },
712
+ {
713
+ "epoch": 0.058788947677836566,
714
+ "grad_norm": 0.00039850923349149525,
715
+ "learning_rate": 5.4128967273616625e-05,
716
+ "loss": 11.5,
717
+ "step": 100
718
+ },
719
+ {
720
+ "epoch": 0.058788947677836566,
721
+ "eval_loss": 11.5,
722
+ "eval_runtime": 14.1119,
723
+ "eval_samples_per_second": 203.02,
724
+ "eval_steps_per_second": 50.808,
725
+ "step": 100
726
  }
727
  ],
728
  "logging_steps": 1,
 
737
  "early_stopping_threshold": 0.0
738
  },
739
  "attributes": {
740
+ "early_stopping_patience_counter": 1
741
  }
742
  },
743
  "TrainerControl": {
 
751
  "attributes": {}
752
  }
753
  },
754
+ "total_flos": 18531314171904.0,
755
  "train_batch_size": 8,
756
  "trial_name": null,
757
  "trial_params": null