gokulsrinivasagan commited on
Commit
38be482
·
verified ·
1 Parent(s): 7efe5e2

End of training

Browse files
README.md CHANGED
@@ -1,14 +1,29 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
  base_model: google/bert_uncased_L-4_H-256_A-4
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - accuracy
9
  model-index:
10
  - name: bert_uncased_L-4_H-256_A-4_mnli
11
- results: []
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +31,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # bert_uncased_L-4_H-256_A-4_mnli
18
 
19
- This model is a fine-tuned version of [google/bert_uncased_L-4_H-256_A-4](https://huggingface.co/google/bert_uncased_L-4_H-256_A-4) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6584
22
- - Accuracy: 0.7608
23
 
24
  ## Model description
25
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  license: apache-2.0
6
  base_model: google/bert_uncased_L-4_H-256_A-4
7
  tags:
8
  - generated_from_trainer
9
+ datasets:
10
+ - glue
11
  metrics:
12
  - accuracy
13
  model-index:
14
  - name: bert_uncased_L-4_H-256_A-4_mnli
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE MNLI
21
+ type: glue
22
+ args: mnli
23
+ metrics:
24
+ - name: Accuracy
25
+ type: accuracy
26
+ value: 0.7651545972335232
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  # bert_uncased_L-4_H-256_A-4_mnli
33
 
34
+ This model is a fine-tuned version of [google/bert_uncased_L-4_H-256_A-4](https://huggingface.co/google/bert_uncased_L-4_H-256_A-4) on the GLUE MNLI dataset.
35
  It achieves the following results on the evaluation set:
36
+ - Loss: 0.5852
37
+ - Accuracy: 0.7652
38
 
39
  ## Model description
40
 
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "epoch_mm": 9.0,
4
+ "eval_accuracy": 0.7552725420275089,
5
+ "eval_accuracy_mm": 0.7651545972335232,
6
+ "eval_loss": 0.6099433302879333,
7
+ "eval_loss_mm": 0.5852423310279846,
8
+ "eval_runtime": 3.4113,
9
+ "eval_runtime_mm": 3.4446,
10
+ "eval_samples": 9815,
11
+ "eval_samples_mm": 9832,
12
+ "eval_samples_per_second": 2877.217,
13
+ "eval_samples_per_second_mm": 2854.319,
14
+ "eval_steps_per_second": 11.433,
15
+ "eval_steps_per_second_mm": 11.322,
16
+ "total_flos": 1.751365065917952e+16,
17
+ "train_loss": 0.5555856487258144,
18
+ "train_runtime": 1686.0242,
19
+ "train_samples": 392702,
20
+ "train_samples_per_second": 11645.8,
21
+ "train_steps_per_second": 45.492
22
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "epoch_mm": 9.0,
4
+ "eval_accuracy": 0.7552725420275089,
5
+ "eval_accuracy_mm": 0.7651545972335232,
6
+ "eval_loss": 0.6099433302879333,
7
+ "eval_loss_mm": 0.5852423310279846,
8
+ "eval_runtime": 3.4113,
9
+ "eval_runtime_mm": 3.4446,
10
+ "eval_samples": 9815,
11
+ "eval_samples_mm": 9832,
12
+ "eval_samples_per_second": 2877.217,
13
+ "eval_samples_per_second_mm": 2854.319,
14
+ "eval_steps_per_second": 11.433,
15
+ "eval_steps_per_second_mm": 11.322
16
+ }
logs/events.out.tfevents.1733334846.ki-g0008.1761130.17 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:406e4e4ad18e425c9abe356ced792229b427b2115468124db1665432e613d153
3
+ size 734
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "total_flos": 1.751365065917952e+16,
4
+ "train_loss": 0.5555856487258144,
5
+ "train_runtime": 1686.0242,
6
+ "train_samples": 392702,
7
+ "train_samples_per_second": 11645.8,
8
+ "train_steps_per_second": 45.492
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6099433302879333,
3
+ "best_model_checkpoint": "bert_uncased_L-4_H-256_A-4_mnli/checkpoint-6136",
4
+ "epoch": 9.0,
5
+ "eval_steps": 500,
6
+ "global_step": 13806,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 2.477856159210205,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.7878,
16
+ "step": 1534
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.7006622516556291,
21
+ "eval_loss": 0.7087482213973999,
22
+ "eval_runtime": 3.4685,
23
+ "eval_samples_per_second": 2829.716,
24
+ "eval_steps_per_second": 11.244,
25
+ "step": 1534
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "grad_norm": 2.7286040782928467,
30
+ "learning_rate": 4.8e-05,
31
+ "loss": 0.6683,
32
+ "step": 3068
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_accuracy": 0.7295975547631177,
37
+ "eval_loss": 0.6437011361122131,
38
+ "eval_runtime": 3.5697,
39
+ "eval_samples_per_second": 2749.523,
40
+ "eval_steps_per_second": 10.925,
41
+ "step": 3068
42
+ },
43
+ {
44
+ "epoch": 3.0,
45
+ "grad_norm": 2.3989334106445312,
46
+ "learning_rate": 4.7e-05,
47
+ "loss": 0.6112,
48
+ "step": 4602
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "eval_accuracy": 0.7465104431991849,
53
+ "eval_loss": 0.6203623414039612,
54
+ "eval_runtime": 3.4599,
55
+ "eval_samples_per_second": 2836.766,
56
+ "eval_steps_per_second": 11.272,
57
+ "step": 4602
58
+ },
59
+ {
60
+ "epoch": 4.0,
61
+ "grad_norm": 2.6142916679382324,
62
+ "learning_rate": 4.600000000000001e-05,
63
+ "loss": 0.5683,
64
+ "step": 6136
65
+ },
66
+ {
67
+ "epoch": 4.0,
68
+ "eval_accuracy": 0.7552725420275089,
69
+ "eval_loss": 0.6099433302879333,
70
+ "eval_runtime": 3.4644,
71
+ "eval_samples_per_second": 2833.094,
72
+ "eval_steps_per_second": 11.257,
73
+ "step": 6136
74
+ },
75
+ {
76
+ "epoch": 5.0,
77
+ "grad_norm": 3.1504721641540527,
78
+ "learning_rate": 4.5e-05,
79
+ "loss": 0.532,
80
+ "step": 7670
81
+ },
82
+ {
83
+ "epoch": 5.0,
84
+ "eval_accuracy": 0.757208354559348,
85
+ "eval_loss": 0.6147148013114929,
86
+ "eval_runtime": 3.3611,
87
+ "eval_samples_per_second": 2920.179,
88
+ "eval_steps_per_second": 11.603,
89
+ "step": 7670
90
+ },
91
+ {
92
+ "epoch": 6.0,
93
+ "grad_norm": 3.1118323802948,
94
+ "learning_rate": 4.4000000000000006e-05,
95
+ "loss": 0.4997,
96
+ "step": 9204
97
+ },
98
+ {
99
+ "epoch": 6.0,
100
+ "eval_accuracy": 0.7551706571574122,
101
+ "eval_loss": 0.6381344199180603,
102
+ "eval_runtime": 3.4815,
103
+ "eval_samples_per_second": 2819.177,
104
+ "eval_steps_per_second": 11.202,
105
+ "step": 9204
106
+ },
107
+ {
108
+ "epoch": 7.0,
109
+ "grad_norm": 2.8937153816223145,
110
+ "learning_rate": 4.3e-05,
111
+ "loss": 0.4707,
112
+ "step": 10738
113
+ },
114
+ {
115
+ "epoch": 7.0,
116
+ "eval_accuracy": 0.7588385124808966,
117
+ "eval_loss": 0.6195704340934753,
118
+ "eval_runtime": 3.458,
119
+ "eval_samples_per_second": 2838.345,
120
+ "eval_steps_per_second": 11.278,
121
+ "step": 10738
122
+ },
123
+ {
124
+ "epoch": 8.0,
125
+ "grad_norm": 3.1744437217712402,
126
+ "learning_rate": 4.2e-05,
127
+ "loss": 0.4436,
128
+ "step": 12272
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_accuracy": 0.7589403973509934,
133
+ "eval_loss": 0.6404411196708679,
134
+ "eval_runtime": 3.4687,
135
+ "eval_samples_per_second": 2829.597,
136
+ "eval_steps_per_second": 11.243,
137
+ "step": 12272
138
+ },
139
+ {
140
+ "epoch": 9.0,
141
+ "grad_norm": 2.90157151222229,
142
+ "learning_rate": 4.1e-05,
143
+ "loss": 0.4187,
144
+ "step": 13806
145
+ },
146
+ {
147
+ "epoch": 9.0,
148
+ "eval_accuracy": 0.7607743250127356,
149
+ "eval_loss": 0.6584197282791138,
150
+ "eval_runtime": 3.4328,
151
+ "eval_samples_per_second": 2859.191,
152
+ "eval_steps_per_second": 11.361,
153
+ "step": 13806
154
+ },
155
+ {
156
+ "epoch": 9.0,
157
+ "step": 13806,
158
+ "total_flos": 1.751365065917952e+16,
159
+ "train_loss": 0.5555856487258144,
160
+ "train_runtime": 1686.0242,
161
+ "train_samples_per_second": 11645.8,
162
+ "train_steps_per_second": 45.492
163
+ }
164
+ ],
165
+ "logging_steps": 1,
166
+ "max_steps": 76700,
167
+ "num_input_tokens_seen": 0,
168
+ "num_train_epochs": 50,
169
+ "save_steps": 500,
170
+ "stateful_callbacks": {
171
+ "EarlyStoppingCallback": {
172
+ "args": {
173
+ "early_stopping_patience": 5,
174
+ "early_stopping_threshold": 0.0
175
+ },
176
+ "attributes": {
177
+ "early_stopping_patience_counter": 5
178
+ }
179
+ },
180
+ "TrainerControl": {
181
+ "args": {
182
+ "should_epoch_stop": false,
183
+ "should_evaluate": false,
184
+ "should_log": false,
185
+ "should_save": true,
186
+ "should_training_stop": true
187
+ },
188
+ "attributes": {}
189
+ }
190
+ },
191
+ "total_flos": 1.751365065917952e+16,
192
+ "train_batch_size": 256,
193
+ "trial_name": null,
194
+ "trial_params": null
195
+ }