ardaspear commited on
Commit
b4cb939
·
verified ·
1 Parent(s): 84fbc1e

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:252b097295c8cb658a95186584c8c191bbf681e89c901ff7fe67c5936bb25b00
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e73bbb56ed33df02becfea3375596f6d64b44a1dbbf2432e64797751172d8e
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6ab08126725be321b919615ac92cfbe2e14395147018761831431bcaf5a7ee0
3
  size 81730196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1271ea4d6fa8467eced8f934f22c530c32f8468a964095e9dd5cc3ea3dcf8996
3
  size 81730196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab22ddf51bfbb0e52394ea82ce6def3703300cb6b77357d2a920d3ed8bca3047
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8532b81b442a52e7641152d52819f99f24bb427b98bc8d1a82e25a081ba985ff
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:964730355c317bebf1d8ccbe8c0becdecfcea59207dec1596c5fd980e1b6084d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eee81c5e75673c3b9060a012dcd6537b28a2cc9b4464a9db8908848717dda896
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02206693637366679,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -74,6 +74,21 @@
74
  "eval_samples_per_second": 11.03,
75
  "eval_steps_per_second": 1.387,
76
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  }
78
  ],
79
  "logging_steps": 3,
@@ -93,7 +108,7 @@
93
  "attributes": {}
94
  }
95
  },
96
- "total_flos": 1.960393933062144e+16,
97
  "train_batch_size": 8,
98
  "trial_name": null,
99
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.02942258183155572,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
74
  "eval_samples_per_second": 11.03,
75
  "eval_steps_per_second": 1.387,
76
  "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.026480323648400146,
80
+ "grad_norm": 1.6007380485534668,
81
+ "learning_rate": 4.522542485937369e-05,
82
+ "loss": 8.6175,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.02942258183155572,
87
+ "eval_loss": 2.0439629554748535,
88
+ "eval_runtime": 103.8112,
89
+ "eval_samples_per_second": 11.03,
90
+ "eval_steps_per_second": 1.387,
91
+ "step": 20
92
  }
93
  ],
94
  "logging_steps": 3,
 
108
  "attributes": {}
109
  }
110
  },
111
+ "total_flos": 2.613858577416192e+16,
112
  "train_batch_size": 8,
113
  "trial_name": null,
114
  "trial_params": null