cimol commited on
Commit
90d98d4
·
verified ·
1 Parent(s): f98ac44

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d95501e2faf37a10f7cb7244f116d5a577a5f4513b331af0fba71fd09a3745e0
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca5cb65a0c8e41dd5840a249884f0a782952af47dbd3f265b29b20b9d8b2566
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7a9674e60a0ad3042fddf59880d19e9e2cbd62559bc2b394764b467f3fd6380
3
  size 170920532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:143ee8a41179d4f9d6ccf1444b100a5f799c6768d739a8343243896814935414
3
  size 170920532
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17f8f9f4387f81b40a6eb494a35dbbd9471508cfba03a93ad0463a107021279f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e50df1ff4672c4a578ad9199d83d6e53829b74f0e3358db91be8155fd418448
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70f0f789b56065211b8c0b1a5e2a97dd0b5b08a816bbbe288fb6f9c677282af9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad54995b081fae25638228c5d9c8f38ca277e5c5ad00bc3e49897b543f84405
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.2555510997772217,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-450",
4
- "epoch": 0.31452035645640397,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -402,6 +402,135 @@
402
  "eval_samples_per_second": 13.578,
403
  "eval_steps_per_second": 3.397,
404
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  }
406
  ],
407
  "logging_steps": 10,
@@ -425,12 +554,12 @@
425
  "should_evaluate": false,
426
  "should_log": false,
427
  "should_save": true,
428
- "should_training_stop": false
429
  },
430
  "attributes": {}
431
  }
432
  },
433
- "total_flos": 6.365274005569536e+17,
434
  "train_batch_size": 8,
435
  "trial_name": null,
436
  "trial_params": null
 
1
  {
2
+ "best_metric": 2.206881284713745,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-600",
4
+ "epoch": 0.4193604752752053,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
402
  "eval_samples_per_second": 13.578,
403
  "eval_steps_per_second": 3.397,
404
  "step": 450
405
+ },
406
+ {
407
+ "epoch": 0.32150969771099075,
408
+ "grad_norm": 6.887442111968994,
409
+ "learning_rate": 2.6522584913693294e-05,
410
+ "loss": 9.1238,
411
+ "step": 460
412
+ },
413
+ {
414
+ "epoch": 0.3284990389655775,
415
+ "grad_norm": 10.633115768432617,
416
+ "learning_rate": 2.301660165700936e-05,
417
+ "loss": 8.9099,
418
+ "step": 470
419
+ },
420
+ {
421
+ "epoch": 0.33548838022016425,
422
+ "grad_norm": 11.0469331741333,
423
+ "learning_rate": 1.9728836206903656e-05,
424
+ "loss": 8.8348,
425
+ "step": 480
426
+ },
427
+ {
428
+ "epoch": 0.34247772147475103,
429
+ "grad_norm": 11.225386619567871,
430
+ "learning_rate": 1.6668608091748495e-05,
431
+ "loss": 8.9982,
432
+ "step": 490
433
+ },
434
+ {
435
+ "epoch": 0.34946706272933775,
436
+ "grad_norm": 24.77507972717285,
437
+ "learning_rate": 1.3844591860619383e-05,
438
+ "loss": 9.5045,
439
+ "step": 500
440
+ },
441
+ {
442
+ "epoch": 0.34946706272933775,
443
+ "eval_loss": 2.2151849269866943,
444
+ "eval_runtime": 177.4559,
445
+ "eval_samples_per_second": 13.581,
446
+ "eval_steps_per_second": 3.398,
447
+ "step": 500
448
+ },
449
+ {
450
+ "epoch": 0.35645640398392453,
451
+ "grad_norm": 8.592710494995117,
452
+ "learning_rate": 1.1264792494342857e-05,
453
+ "loss": 8.9247,
454
+ "step": 510
455
+ },
456
+ {
457
+ "epoch": 0.36344574523851125,
458
+ "grad_norm": 10.218486785888672,
459
+ "learning_rate": 8.936522714508678e-06,
460
+ "loss": 8.7595,
461
+ "step": 520
462
+ },
463
+ {
464
+ "epoch": 0.37043508649309803,
465
+ "grad_norm": 13.771211624145508,
466
+ "learning_rate": 6.866382254766157e-06,
467
+ "loss": 8.7061,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.37742442774768475,
472
+ "grad_norm": 12.511350631713867,
473
+ "learning_rate": 5.060239153161872e-06,
474
+ "loss": 8.9121,
475
+ "step": 540
476
+ },
477
+ {
478
+ "epoch": 0.38441376900227153,
479
+ "grad_norm": 19.870609283447266,
480
+ "learning_rate": 3.5232131185484076e-06,
481
+ "loss": 9.5899,
482
+ "step": 550
483
+ },
484
+ {
485
+ "epoch": 0.38441376900227153,
486
+ "eval_loss": 2.205038070678711,
487
+ "eval_runtime": 177.3353,
488
+ "eval_samples_per_second": 13.59,
489
+ "eval_steps_per_second": 3.4,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 0.3914031102568583,
494
+ "grad_norm": 7.469477653503418,
495
+ "learning_rate": 2.259661018213333e-06,
496
+ "loss": 9.0318,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 0.39839245151144503,
501
+ "grad_norm": 9.499835968017578,
502
+ "learning_rate": 1.2731645278655445e-06,
503
+ "loss": 8.7828,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 0.4053817927660318,
508
+ "grad_norm": 14.364631652832031,
509
+ "learning_rate": 5.665199789862907e-07,
510
+ "loss": 8.7563,
511
+ "step": 580
512
+ },
513
+ {
514
+ "epoch": 0.41237113402061853,
515
+ "grad_norm": 12.063446998596191,
516
+ "learning_rate": 1.4173043232380557e-07,
517
+ "loss": 8.691,
518
+ "step": 590
519
+ },
520
+ {
521
+ "epoch": 0.4193604752752053,
522
+ "grad_norm": 20.08045768737793,
523
+ "learning_rate": 0.0,
524
+ "loss": 9.6758,
525
+ "step": 600
526
+ },
527
+ {
528
+ "epoch": 0.4193604752752053,
529
+ "eval_loss": 2.206881284713745,
530
+ "eval_runtime": 177.3758,
531
+ "eval_samples_per_second": 13.587,
532
+ "eval_steps_per_second": 3.4,
533
+ "step": 600
534
  }
535
  ],
536
  "logging_steps": 10,
 
554
  "should_evaluate": false,
555
  "should_log": false,
556
  "should_save": true,
557
+ "should_training_stop": true
558
  },
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 8.487032007426048e+17,
563
  "train_batch_size": 8,
564
  "trial_name": null,
565
  "trial_params": null