lesso05 commited on
Commit
042e006
·
verified ·
1 Parent(s): ba3e638

Training in progress, step 75, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13b612a7965f275dc5396b23fd0bd952952c11c2200906b97934bf059371a7f7
3
  size 159712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b2726c067ceb015977b793b11e76c3dd71c19e7f1770c1abf477e6dcc87497
3
  size 159712
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c3770920a8fd14d121f571323bd06444bc617f6d9c5508d08f99888c54f4d79
3
  size 336202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80aafa1870be89def46775c0f33f2aa72591701074aae27a7137bfd15797bcc8
3
  size 336202
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a080d3277fb6ccb5ed989cb90ca2c5dbb10923ca8748d5a82c16ca3dd1e9dbd3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7390d69c035960909a3614dd1bb0738527d72c78eaafed1ea04f6c83d7885b4d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d8615f1aeccd0f9873fc0cf2c0322fde20ac202421c015654a471730a5fa755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feed204db996a9f7ac3167cc4686c04dccd6605b9b2a7d539da6ff57c605983d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08163265306122448,
5
  "eval_steps": 9,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -405,6 +405,205 @@
405
  "learning_rate": 1.1736481776669307e-05,
406
  "loss": 10.8299,
407
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  }
409
  ],
410
  "logging_steps": 1,
@@ -424,7 +623,7 @@
424
  "attributes": {}
425
  }
426
  },
427
- "total_flos": 16501859942400.0,
428
  "train_batch_size": 8,
429
  "trial_name": null,
430
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.12244897959183673,
5
  "eval_steps": 9,
6
+ "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
405
  "learning_rate": 1.1736481776669307e-05,
406
  "loss": 10.8299,
407
  "step": 50
408
+ },
409
+ {
410
+ "epoch": 0.08326530612244898,
411
+ "grad_norm": 0.4201260507106781,
412
+ "learning_rate": 1.1391731009600655e-05,
413
+ "loss": 10.8119,
414
+ "step": 51
415
+ },
416
+ {
417
+ "epoch": 0.08489795918367347,
418
+ "grad_norm": 0.39234814047813416,
419
+ "learning_rate": 1.1045284632676535e-05,
420
+ "loss": 10.8114,
421
+ "step": 52
422
+ },
423
+ {
424
+ "epoch": 0.08653061224489796,
425
+ "grad_norm": 0.39883697032928467,
426
+ "learning_rate": 1.0697564737441254e-05,
427
+ "loss": 10.8213,
428
+ "step": 53
429
+ },
430
+ {
431
+ "epoch": 0.08816326530612245,
432
+ "grad_norm": 0.3016551434993744,
433
+ "learning_rate": 1.0348994967025012e-05,
434
+ "loss": 10.8257,
435
+ "step": 54
436
+ },
437
+ {
438
+ "epoch": 0.08816326530612245,
439
+ "eval_loss": 10.823841094970703,
440
+ "eval_runtime": 3.2735,
441
+ "eval_samples_per_second": 157.631,
442
+ "eval_steps_per_second": 19.857,
443
+ "step": 54
444
+ },
445
+ {
446
+ "epoch": 0.08979591836734693,
447
+ "grad_norm": 0.3536202907562256,
448
+ "learning_rate": 1e-05,
449
+ "loss": 10.8218,
450
+ "step": 55
451
+ },
452
+ {
453
+ "epoch": 0.09142857142857143,
454
+ "grad_norm": 0.3456736207008362,
455
+ "learning_rate": 9.651005032974994e-06,
456
+ "loss": 10.8277,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.09306122448979592,
461
+ "grad_norm": 0.3298264145851135,
462
+ "learning_rate": 9.302435262558748e-06,
463
+ "loss": 10.8243,
464
+ "step": 57
465
+ },
466
+ {
467
+ "epoch": 0.0946938775510204,
468
+ "grad_norm": 0.4061192274093628,
469
+ "learning_rate": 8.954715367323468e-06,
470
+ "loss": 10.8157,
471
+ "step": 58
472
+ },
473
+ {
474
+ "epoch": 0.0963265306122449,
475
+ "grad_norm": 0.404811829328537,
476
+ "learning_rate": 8.60826899039935e-06,
477
+ "loss": 10.8105,
478
+ "step": 59
479
+ },
480
+ {
481
+ "epoch": 0.09795918367346938,
482
+ "grad_norm": 0.40592458844184875,
483
+ "learning_rate": 8.263518223330698e-06,
484
+ "loss": 10.8179,
485
+ "step": 60
486
+ },
487
+ {
488
+ "epoch": 0.09959183673469388,
489
+ "grad_norm": 0.3541053831577301,
490
+ "learning_rate": 7.92088309182241e-06,
491
+ "loss": 10.8242,
492
+ "step": 61
493
+ },
494
+ {
495
+ "epoch": 0.10122448979591837,
496
+ "grad_norm": 0.322318971157074,
497
+ "learning_rate": 7.580781044003324e-06,
498
+ "loss": 10.827,
499
+ "step": 62
500
+ },
501
+ {
502
+ "epoch": 0.10285714285714286,
503
+ "grad_norm": 0.36938780546188354,
504
+ "learning_rate": 7.243626441830009e-06,
505
+ "loss": 10.8152,
506
+ "step": 63
507
+ },
508
+ {
509
+ "epoch": 0.10285714285714286,
510
+ "eval_loss": 10.82233715057373,
511
+ "eval_runtime": 3.9005,
512
+ "eval_samples_per_second": 132.29,
513
+ "eval_steps_per_second": 16.664,
514
+ "step": 63
515
+ },
516
+ {
517
+ "epoch": 0.10448979591836735,
518
+ "grad_norm": 0.3522907495498657,
519
+ "learning_rate": 6.909830056250527e-06,
520
+ "loss": 10.813,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 0.10612244897959183,
525
+ "grad_norm": 0.4052838385105133,
526
+ "learning_rate": 6.579798566743314e-06,
527
+ "loss": 10.8225,
528
+ "step": 65
529
+ },
530
+ {
531
+ "epoch": 0.10775510204081633,
532
+ "grad_norm": 0.3478013575077057,
533
+ "learning_rate": 6.25393406584088e-06,
534
+ "loss": 10.8207,
535
+ "step": 66
536
+ },
537
+ {
538
+ "epoch": 0.10938775510204081,
539
+ "grad_norm": 0.3661433160305023,
540
+ "learning_rate": 5.932633569242e-06,
541
+ "loss": 10.8141,
542
+ "step": 67
543
+ },
544
+ {
545
+ "epoch": 0.1110204081632653,
546
+ "grad_norm": 0.30546510219573975,
547
+ "learning_rate": 5.616288532109225e-06,
548
+ "loss": 10.8194,
549
+ "step": 68
550
+ },
551
+ {
552
+ "epoch": 0.1126530612244898,
553
+ "grad_norm": 0.3616853952407837,
554
+ "learning_rate": 5.305284372141095e-06,
555
+ "loss": 10.8135,
556
+ "step": 69
557
+ },
558
+ {
559
+ "epoch": 0.11428571428571428,
560
+ "grad_norm": 0.38204777240753174,
561
+ "learning_rate": 5.000000000000003e-06,
562
+ "loss": 10.8198,
563
+ "step": 70
564
+ },
565
+ {
566
+ "epoch": 0.11591836734693878,
567
+ "grad_norm": 0.347457617521286,
568
+ "learning_rate": 4.700807357667953e-06,
569
+ "loss": 10.8235,
570
+ "step": 71
571
+ },
572
+ {
573
+ "epoch": 0.11755102040816326,
574
+ "grad_norm": 0.30413001775741577,
575
+ "learning_rate": 4.408070965292534e-06,
576
+ "loss": 10.8379,
577
+ "step": 72
578
+ },
579
+ {
580
+ "epoch": 0.11755102040816326,
581
+ "eval_loss": 10.821290969848633,
582
+ "eval_runtime": 3.9229,
583
+ "eval_samples_per_second": 131.536,
584
+ "eval_steps_per_second": 16.569,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 0.11918367346938775,
589
+ "grad_norm": 0.3408275246620178,
590
+ "learning_rate": 4.12214747707527e-06,
591
+ "loss": 10.823,
592
+ "step": 73
593
+ },
594
+ {
595
+ "epoch": 0.12081632653061225,
596
+ "grad_norm": 0.37985777854919434,
597
+ "learning_rate": 3.8433852467434175e-06,
598
+ "loss": 10.8096,
599
+ "step": 74
600
+ },
601
+ {
602
+ "epoch": 0.12244897959183673,
603
+ "grad_norm": 0.34624186158180237,
604
+ "learning_rate": 3.5721239031346067e-06,
605
+ "loss": 10.8258,
606
+ "step": 75
607
  }
608
  ],
609
  "logging_steps": 1,
 
623
  "attributes": {}
624
  }
625
  },
626
+ "total_flos": 24752789913600.0,
627
  "train_batch_size": 8,
628
  "trial_name": null,
629
  "trial_params": null