Upload 9 files
Browse files- config.json +26 -0
- eval_results.txt +11 -0
- merges.txt +0 -0
- run_unsup_example.sh +29 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- train_results.txt +3 -0
- trainer_state.json +208 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "simcse-roberta-large-with-mask",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForCL"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"gradient_checkpointing": false,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 514,
|
17 |
+
"model_type": "roberta",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 24,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"position_embedding_type": "absolute",
|
22 |
+
"transformers_version": "4.2.1",
|
23 |
+
"type_vocab_size": 1,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 50265
|
26 |
+
}
|
eval_results.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
------ test ------
|
2 |
+
+-------+-------+-------+-------+-------+--------------+-----------------+-------+
|
3 |
+
| STS12 | STS13 | STS14 | STS15 | STS16 | STSBenchmark | SICKRelatedness | Avg. |
|
4 |
+
+-------+-------+-------+-------+-------+--------------+-----------------+-------+
|
5 |
+
| 62.53 | 75.35 | 68.75 | 80.49 | 76.69 | 76.67 | 69.24 | 72.82 |
|
6 |
+
+-------+-------+-------+-------+-------+--------------+-----------------+-------+
|
7 |
+
+-------+-------+-------+-------+-------+-------+-------+-------+
|
8 |
+
| MR | CR | SUBJ | MPQA | SST2 | TREC | MRPC | Avg. |
|
9 |
+
+-------+-------+-------+-------+-------+-------+-------+-------+
|
10 |
+
| 84.85 | 88.29 | 95.20 | 88.35 | 89.29 | 91.40 | 71.94 | 87.05 |
|
11 |
+
+-------+-------+-------+-------+-------+-------+-------+-------+
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run_unsup_example.sh
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# In this example, we show how to train SimCSE on unsupervised Wikipedia data.
|
4 |
+
# If you want to train it with multiple GPU cards, see "run_sup_example.sh"
|
5 |
+
# about how to use PyTorch's distributed data parallel.
|
6 |
+
|
7 |
+
python train.py \
|
8 |
+
--model_name_or_path $BC_ROBERTA_LARGE_PATH \
|
9 |
+
--train_file $BC_WIKI1M_PATH \
|
10 |
+
--output_dir result/simcse-roberta-large-with-mask \
|
11 |
+
--num_train_epochs 1 \
|
12 |
+
--per_device_train_batch_size 64 \
|
13 |
+
--learning_rate 5e-6 \
|
14 |
+
--max_seq_length 32 \
|
15 |
+
--pooler_type cls \
|
16 |
+
--mlp_only_train \
|
17 |
+
--overwrite_output_dir \
|
18 |
+
--temp 0.05 \
|
19 |
+
--do_train \
|
20 |
+
--fp16 \
|
21 |
+
--do_mlm \
|
22 |
+
--save_steps 5000 \
|
23 |
+
|
24 |
+
# --load_best_model_at_end \
|
25 |
+
# --eval_steps 125 \
|
26 |
+
# --evaluation_strategy steps \
|
27 |
+
# --metric_for_best_model stsb_spearman \
|
28 |
+
# --do_eval \
|
29 |
+
# "$@"
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "/mnt/nfs-storage-pvc-n26-20241218/rizejin/wzl/model-files/roberta-large"}
|
train_results.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
epoch = 1.0
|
2 |
+
train_runtime = 3654.5011
|
3 |
+
train_samples_per_second = 4.276
|
trainer_state.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.0,
|
5 |
+
"global_step": 15625,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.03,
|
12 |
+
"learning_rate": 4.84e-06,
|
13 |
+
"loss": 0.6713,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.06,
|
18 |
+
"learning_rate": 4.680000000000001e-06,
|
19 |
+
"loss": 0.1946,
|
20 |
+
"step": 1000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.1,
|
24 |
+
"learning_rate": 4.520000000000001e-06,
|
25 |
+
"loss": 0.1884,
|
26 |
+
"step": 1500
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.13,
|
30 |
+
"learning_rate": 4.360000000000001e-06,
|
31 |
+
"loss": 0.1858,
|
32 |
+
"step": 2000
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.16,
|
36 |
+
"learning_rate": 4.2000000000000004e-06,
|
37 |
+
"loss": 0.1833,
|
38 |
+
"step": 2500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.19,
|
42 |
+
"learning_rate": 4.04e-06,
|
43 |
+
"loss": 0.1839,
|
44 |
+
"step": 3000
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.22,
|
48 |
+
"learning_rate": 3.88e-06,
|
49 |
+
"loss": 0.1816,
|
50 |
+
"step": 3500
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.26,
|
54 |
+
"learning_rate": 3.7200000000000004e-06,
|
55 |
+
"loss": 0.1807,
|
56 |
+
"step": 4000
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 0.29,
|
60 |
+
"learning_rate": 3.5600000000000002e-06,
|
61 |
+
"loss": 0.1804,
|
62 |
+
"step": 4500
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 0.32,
|
66 |
+
"learning_rate": 3.4000000000000005e-06,
|
67 |
+
"loss": 0.1803,
|
68 |
+
"step": 5000
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 0.35,
|
72 |
+
"learning_rate": 3.2400000000000003e-06,
|
73 |
+
"loss": 0.179,
|
74 |
+
"step": 5500
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 0.38,
|
78 |
+
"learning_rate": 3.08e-06,
|
79 |
+
"loss": 0.18,
|
80 |
+
"step": 6000
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 0.42,
|
84 |
+
"learning_rate": 2.92e-06,
|
85 |
+
"loss": 0.1774,
|
86 |
+
"step": 6500
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.45,
|
90 |
+
"learning_rate": 2.7600000000000003e-06,
|
91 |
+
"loss": 0.178,
|
92 |
+
"step": 7000
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 0.48,
|
96 |
+
"learning_rate": 2.6e-06,
|
97 |
+
"loss": 0.178,
|
98 |
+
"step": 7500
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 0.51,
|
102 |
+
"learning_rate": 2.4400000000000004e-06,
|
103 |
+
"loss": 0.1775,
|
104 |
+
"step": 8000
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 0.54,
|
108 |
+
"learning_rate": 2.28e-06,
|
109 |
+
"loss": 0.1759,
|
110 |
+
"step": 8500
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 0.58,
|
114 |
+
"learning_rate": 2.12e-06,
|
115 |
+
"loss": 0.1773,
|
116 |
+
"step": 9000
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 0.61,
|
120 |
+
"learning_rate": 1.9600000000000003e-06,
|
121 |
+
"loss": 0.1747,
|
122 |
+
"step": 9500
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 0.64,
|
126 |
+
"learning_rate": 1.8000000000000001e-06,
|
127 |
+
"loss": 0.1756,
|
128 |
+
"step": 10000
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.67,
|
132 |
+
"learning_rate": 1.6400000000000002e-06,
|
133 |
+
"loss": 0.1768,
|
134 |
+
"step": 10500
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"epoch": 0.7,
|
138 |
+
"learning_rate": 1.48e-06,
|
139 |
+
"loss": 0.1753,
|
140 |
+
"step": 11000
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"epoch": 0.74,
|
144 |
+
"learning_rate": 1.32e-06,
|
145 |
+
"loss": 0.175,
|
146 |
+
"step": 11500
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"epoch": 0.77,
|
150 |
+
"learning_rate": 1.1600000000000001e-06,
|
151 |
+
"loss": 0.1764,
|
152 |
+
"step": 12000
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"epoch": 0.8,
|
156 |
+
"learning_rate": 1.0000000000000002e-06,
|
157 |
+
"loss": 0.1758,
|
158 |
+
"step": 12500
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 0.83,
|
162 |
+
"learning_rate": 8.400000000000001e-07,
|
163 |
+
"loss": 0.1752,
|
164 |
+
"step": 13000
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"epoch": 0.86,
|
168 |
+
"learning_rate": 6.800000000000001e-07,
|
169 |
+
"loss": 0.1755,
|
170 |
+
"step": 13500
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.9,
|
174 |
+
"learning_rate": 5.2e-07,
|
175 |
+
"loss": 0.1751,
|
176 |
+
"step": 14000
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"epoch": 0.93,
|
180 |
+
"learning_rate": 3.6e-07,
|
181 |
+
"loss": 0.1755,
|
182 |
+
"step": 14500
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"epoch": 0.96,
|
186 |
+
"learning_rate": 2.0000000000000002e-07,
|
187 |
+
"loss": 0.1738,
|
188 |
+
"step": 15000
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"epoch": 0.99,
|
192 |
+
"learning_rate": 4e-08,
|
193 |
+
"loss": 0.1747,
|
194 |
+
"step": 15500
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"epoch": 1.0,
|
198 |
+
"step": 15625,
|
199 |
+
"train_runtime": 3654.5011,
|
200 |
+
"train_samples_per_second": 4.276
|
201 |
+
}
|
202 |
+
],
|
203 |
+
"max_steps": 15625,
|
204 |
+
"num_train_epochs": 1,
|
205 |
+
"total_flos": 313292557056000000,
|
206 |
+
"trial_name": null,
|
207 |
+
"trial_params": null
|
208 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|