ardaspear commited on
Commit
d8383f6
·
verified ·
1 Parent(s): 880dcd7

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98e73bbb56ed33df02becfea3375596f6d64b44a1dbbf2432e64797751172d8e
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:111764613685ffc037603e88edea7c8fa029974a28056f2a0b4441f6a62cd001
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1271ea4d6fa8467eced8f934f22c530c32f8468a964095e9dd5cc3ea3dcf8996
3
  size 81730196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94951bde0f3f2d33e885e6c14f4e1ab0c5117bce5c17dd7b8283d7d2a2c03889
3
  size 81730196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8532b81b442a52e7641152d52819f99f24bb427b98bc8d1a82e25a081ba985ff
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:295f0a621346f38e97a347d06bb9927816fe04926f6efca6f6b2fe56f30407d9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eee81c5e75673c3b9060a012dcd6537b28a2cc9b4464a9db8908848717dda896
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361be27873b08d40e317526a465204192a8fb0defda2b993ddf9efe3fb3de7d7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02942258183155572,
5
  "eval_steps": 5,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -89,6 +89,28 @@
89
  "eval_samples_per_second": 11.03,
90
  "eval_steps_per_second": 1.387,
91
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  ],
94
  "logging_steps": 3,
@@ -108,7 +130,7 @@
108
  "attributes": {}
109
  }
110
  },
111
- "total_flos": 2.613858577416192e+16,
112
  "train_batch_size": 8,
113
  "trial_name": null,
114
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.036778227289444645,
5
  "eval_steps": 5,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
89
  "eval_samples_per_second": 11.03,
90
  "eval_steps_per_second": 1.387,
91
  "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.030893710923133505,
95
+ "grad_norm": 1.5845534801483154,
96
+ "learning_rate": 4.123620120825459e-05,
97
+ "loss": 8.3277,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.035307098197866864,
102
+ "grad_norm": 1.4348173141479492,
103
+ "learning_rate": 3.634976249348867e-05,
104
+ "loss": 7.9879,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.036778227289444645,
109
+ "eval_loss": 1.9180830717086792,
110
+ "eval_runtime": 103.7801,
111
+ "eval_samples_per_second": 11.033,
112
+ "eval_steps_per_second": 1.388,
113
+ "step": 25
114
  }
115
  ],
116
  "logging_steps": 3,
 
130
  "attributes": {}
131
  }
132
  },
133
+ "total_flos": 3.26732322177024e+16,
134
  "train_batch_size": 8,
135
  "trial_name": null,
136
  "trial_params": null