tarabukinivan commited on
Commit
e479265
·
verified ·
1 Parent(s): 9cb1ae9

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1ba11c3907ff10759f1f55fb0e5e40ac436fd17caff80cc794a59b2888a4fe4
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ed090d487cdc9b5aa3341481357254b238cfd291f94ab79b0f143526da57202
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef656700a5944accddc894e83b805b77aa296b66c0db854deb35c56279f1e958
3
  size 320194002
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c21c0ece7af4b4a9cf8ca83046e312bc388078c867b0c13c7a7fce691a9e90
3
  size 320194002
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb665841853cc42c910cb63819d00b335b3d08b5c5ab669fcca3e270facf08f4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb8f742328bfffa68d3697fe4655cdfe31ad19b761f081134e1ec9c8c44b0fce
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baae720e33260fead254c87141d85e241b839ae924033bfd9652fb777f3f1bf0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0008633155634213196,
5
  "eval_steps": 13,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -65,6 +65,42 @@
65
  "learning_rate": 0.00018090169943749476,
66
  "loss": 6.7296,
67
  "step": 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
  ],
70
  "logging_steps": 3,
@@ -84,7 +120,7 @@
84
  "attributes": {}
85
  }
86
  },
87
- "total_flos": 6534646443540480.0,
88
  "train_batch_size": 2,
89
  "trial_name": null,
90
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0012949733451319795,
5
  "eval_steps": 13,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
65
  "learning_rate": 0.00018090169943749476,
66
  "loss": 6.7296,
67
  "step": 18
68
+ },
69
+ {
70
+ "epoch": 0.0009064813415923856,
71
+ "grad_norm": 3.6164493560791016,
72
+ "learning_rate": 0.00016494480483301836,
73
+ "loss": 6.2438,
74
+ "step": 21
75
+ },
76
+ {
77
+ "epoch": 0.0010359786761055835,
78
+ "grad_norm": 4.651533603668213,
79
+ "learning_rate": 0.00014539904997395468,
80
+ "loss": 6.3792,
81
+ "step": 24
82
+ },
83
+ {
84
+ "epoch": 0.0011223102324477154,
85
+ "eval_loss": 1.5663001537322998,
86
+ "eval_runtime": 956.4981,
87
+ "eval_samples_per_second": 10.199,
88
+ "eval_steps_per_second": 5.1,
89
+ "step": 26
90
+ },
91
+ {
92
+ "epoch": 0.0011654760106187814,
93
+ "grad_norm": 3.52091383934021,
94
+ "learning_rate": 0.00012334453638559057,
95
+ "loss": 6.5441,
96
+ "step": 27
97
+ },
98
+ {
99
+ "epoch": 0.0012949733451319795,
100
+ "grad_norm": 4.742680549621582,
101
+ "learning_rate": 0.0001,
102
+ "loss": 6.3389,
103
+ "step": 30
104
  }
105
  ],
106
  "logging_steps": 3,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 9801969665310720.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null