Commit
·
bb37c53
1
Parent(s):
04a5c19
first commit
Browse files- config.json +41 -0
- log_history.json +156 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_num_labels": 1,
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"gradient_checkpointing": false,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"label2id": {
|
17 |
+
"LABEL_0": 0
|
18 |
+
},
|
19 |
+
"layer_norm_epsilon": 1e-05,
|
20 |
+
"model_type": "gpt2",
|
21 |
+
"n_ctx": 1024,
|
22 |
+
"n_embd": 1280,
|
23 |
+
"n_head": 20,
|
24 |
+
"n_inner": null,
|
25 |
+
"n_layer": 36,
|
26 |
+
"n_positions": 1024,
|
27 |
+
"resid_pdrop": 0.1,
|
28 |
+
"summary_activation": null,
|
29 |
+
"summary_first_dropout": 0.1,
|
30 |
+
"summary_proj_to_labels": true,
|
31 |
+
"summary_type": "cls_index",
|
32 |
+
"summary_use_proj": true,
|
33 |
+
"task_specific_params": {
|
34 |
+
"text-generation": {
|
35 |
+
"do_sample": true,
|
36 |
+
"max_length": 50
|
37 |
+
}
|
38 |
+
},
|
39 |
+
"total_flos": 418496391413760000,
|
40 |
+
"vocab_size": 50257
|
41 |
+
}
|
log_history.json
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"loss": 3.608123046875,
|
4 |
+
"learning_rate": 4.777024616482341e-05,
|
5 |
+
"epoch": 0.04459159671359932,
|
6 |
+
"total_flos": 19022563246080000,
|
7 |
+
"step": 500
|
8 |
+
},
|
9 |
+
{
|
10 |
+
"loss": 3.56192529296875,
|
11 |
+
"learning_rate": 4.554049232964681e-05,
|
12 |
+
"epoch": 0.08918319342719865,
|
13 |
+
"total_flos": 38045126492160000,
|
14 |
+
"step": 1000
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"loss": 3.54396728515625,
|
18 |
+
"learning_rate": 4.331073849447021e-05,
|
19 |
+
"epoch": 0.13377479014079796,
|
20 |
+
"total_flos": 57067689738240000,
|
21 |
+
"step": 1500
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"loss": 3.5339208984375,
|
25 |
+
"learning_rate": 4.1080984659293615e-05,
|
26 |
+
"epoch": 0.1783663868543973,
|
27 |
+
"total_flos": 76090252984320000,
|
28 |
+
"step": 2000
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"loss": 3.5219326171875,
|
32 |
+
"learning_rate": 3.885123082411702e-05,
|
33 |
+
"epoch": 0.22295798356799662,
|
34 |
+
"total_flos": 95112816230400000,
|
35 |
+
"step": 2500
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"loss": 3.508724609375,
|
39 |
+
"learning_rate": 3.6621476988940425e-05,
|
40 |
+
"epoch": 0.2675495802815959,
|
41 |
+
"total_flos": 114135379476480000,
|
42 |
+
"step": 3000
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"loss": 3.505310546875,
|
46 |
+
"learning_rate": 3.439172315376383e-05,
|
47 |
+
"epoch": 0.3121411769951953,
|
48 |
+
"total_flos": 133157942722560000,
|
49 |
+
"step": 3500
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"loss": 3.497248046875,
|
53 |
+
"learning_rate": 3.2161969318587235e-05,
|
54 |
+
"epoch": 0.3567327737087946,
|
55 |
+
"total_flos": 152180505968640000,
|
56 |
+
"step": 4000
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"loss": 3.48703125,
|
60 |
+
"learning_rate": 2.993221548341063e-05,
|
61 |
+
"epoch": 0.4013243704223939,
|
62 |
+
"total_flos": 171203069214720000,
|
63 |
+
"step": 4500
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"loss": 3.48241796875,
|
67 |
+
"learning_rate": 2.7702461648234034e-05,
|
68 |
+
"epoch": 0.44591596713599324,
|
69 |
+
"total_flos": 190225632460800000,
|
70 |
+
"step": 5000
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"loss": 3.4734921875,
|
74 |
+
"learning_rate": 2.547270781305744e-05,
|
75 |
+
"epoch": 0.49050756384959254,
|
76 |
+
"total_flos": 209248195706880000,
|
77 |
+
"step": 5500
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"loss": 3.47524609375,
|
81 |
+
"learning_rate": 2.3242953977880844e-05,
|
82 |
+
"epoch": 0.5350991605631918,
|
83 |
+
"total_flos": 228270758952960000,
|
84 |
+
"step": 6000
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"loss": 3.4679453125,
|
88 |
+
"learning_rate": 2.1013200142704246e-05,
|
89 |
+
"epoch": 0.5796907572767912,
|
90 |
+
"total_flos": 247293322199040000,
|
91 |
+
"step": 6500
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"loss": 3.45712890625,
|
95 |
+
"learning_rate": 1.878344630752765e-05,
|
96 |
+
"epoch": 0.6242823539903906,
|
97 |
+
"total_flos": 266315885445120000,
|
98 |
+
"step": 7000
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"loss": 3.458171875,
|
102 |
+
"learning_rate": 1.6553692472351056e-05,
|
103 |
+
"epoch": 0.6688739507039898,
|
104 |
+
"total_flos": 285338448691200000,
|
105 |
+
"step": 7500
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"loss": 3.45421875,
|
109 |
+
"learning_rate": 1.4323938637174455e-05,
|
110 |
+
"epoch": 0.7134655474175892,
|
111 |
+
"total_flos": 304361011937280000,
|
112 |
+
"step": 8000
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"loss": 3.45114453125,
|
116 |
+
"learning_rate": 1.209418480199786e-05,
|
117 |
+
"epoch": 0.7580571441311885,
|
118 |
+
"total_flos": 323383575183360000,
|
119 |
+
"step": 8500
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"loss": 3.4411015625,
|
123 |
+
"learning_rate": 9.864430966821263e-06,
|
124 |
+
"epoch": 0.8026487408447878,
|
125 |
+
"total_flos": 342406138429440000,
|
126 |
+
"step": 9000
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"loss": 3.44307421875,
|
130 |
+
"learning_rate": 7.634677131644667e-06,
|
131 |
+
"epoch": 0.8472403375583871,
|
132 |
+
"total_flos": 361428701675520000,
|
133 |
+
"step": 9500
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"loss": 3.44175,
|
137 |
+
"learning_rate": 5.40492329646807e-06,
|
138 |
+
"epoch": 0.8918319342719865,
|
139 |
+
"total_flos": 380451264921600000,
|
140 |
+
"step": 10000
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"loss": 3.43734375,
|
144 |
+
"learning_rate": 3.175169461291474e-06,
|
145 |
+
"epoch": 0.9364235309855857,
|
146 |
+
"total_flos": 399473828167680000,
|
147 |
+
"step": 10500
|
148 |
+
},
|
149 |
+
{
|
150 |
+
"loss": 3.4352890625,
|
151 |
+
"learning_rate": 9.45415626114877e-07,
|
152 |
+
"epoch": 0.9810151276991851,
|
153 |
+
"total_flos": 418496391413760000,
|
154 |
+
"step": 11000
|
155 |
+
}
|
156 |
+
]
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c28843f24b1a5aae6def4ca20d454bfe889c4ef68547c69810655c110909ba6f
|
3 |
+
size 3134064907
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 1024}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ebc1f4880f3c3a058ac3cd8262845d240baf128bde3e3b7cc5bfbd05b9b7be05
|
3 |
+
size 1839
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|