Add files using upload-large-folder tool
Browse files- inference/convert.py +84 -0
inference/convert.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from glob import glob
|
5 |
+
from tqdm import tqdm, trange
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from safetensors.torch import safe_open, save_file
|
9 |
+
|
10 |
+
|
11 |
+
mapping = {
|
12 |
+
"embed_tokens": ("embed", 0),
|
13 |
+
"input_layernorm": ("attn_norm", None),
|
14 |
+
"post_attention_layernorm": ("ffn_norm", None),
|
15 |
+
"q_proj": ("wq", 0),
|
16 |
+
"q_a_proj": ("wq_a", None),
|
17 |
+
"q_a_layernorm": ("q_norm", None),
|
18 |
+
"q_b_proj": ("wq_b", 0),
|
19 |
+
"kv_a_proj_with_mqa": ("wkv_a", None),
|
20 |
+
"kv_a_layernorm": ("kv_norm", None),
|
21 |
+
"kv_b_proj": ("wkv_b", 0),
|
22 |
+
"o_proj": ("wo", 1),
|
23 |
+
"gate": ("gate", None),
|
24 |
+
"gate_proj": ("w1", 0),
|
25 |
+
"down_proj": ("w2", 1),
|
26 |
+
"up_proj": ("w3", 0),
|
27 |
+
"norm": ("norm", None),
|
28 |
+
"lm_head": ("head", 0),
|
29 |
+
"scale": ("scale", None),
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
def main(hf_ckpt_path, save_path, n_experts, mp):
|
34 |
+
torch.set_num_threads(8)
|
35 |
+
n_local_experts = n_experts // mp
|
36 |
+
state_dicts = [{} for _ in range(mp)]
|
37 |
+
|
38 |
+
for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))):
|
39 |
+
with safe_open(file_path, framework="pt", device="cpu") as f:
|
40 |
+
for name in f.keys():
|
41 |
+
if "model.layers.61" in name:
|
42 |
+
continue
|
43 |
+
param: torch.Tensor = f.get_tensor(name)
|
44 |
+
if name.startswith("model."):
|
45 |
+
name = name[len("model."):]
|
46 |
+
name = name.replace("self_attn", "attn")
|
47 |
+
name = name.replace("mlp", "ffn")
|
48 |
+
name = name.replace("weight_scale_inv", "scale")
|
49 |
+
name = name.replace("e_score_correction_bias", "bias")
|
50 |
+
key = name.split(".")[-2]
|
51 |
+
assert key in mapping
|
52 |
+
new_key, dim = mapping[key]
|
53 |
+
name = name.replace(key, new_key)
|
54 |
+
for i in range(mp):
|
55 |
+
new_param = param
|
56 |
+
if "experts" in name and "shared_experts" not in name:
|
57 |
+
idx = int(name.split(".")[-3])
|
58 |
+
if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:
|
59 |
+
continue
|
60 |
+
elif dim is not None:
|
61 |
+
assert param.size(dim) % mp == 0
|
62 |
+
shard_size = param.size(dim) // mp
|
63 |
+
new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()
|
64 |
+
state_dicts[i][name] = new_param
|
65 |
+
|
66 |
+
os.makedirs(save_path, exist_ok=True)
|
67 |
+
|
68 |
+
for i in trange(mp):
|
69 |
+
save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors"))
|
70 |
+
|
71 |
+
for file_path in glob(os.path.join(hf_ckpt_path, "*token*")):
|
72 |
+
new_file_path = os.path.join(save_path, os.path.basename(file_path))
|
73 |
+
shutil.copyfile(file_path, new_file_path)
|
74 |
+
|
75 |
+
|
76 |
+
if __name__ == "__main__":
|
77 |
+
parser = ArgumentParser()
|
78 |
+
parser.add_argument("--hf-ckpt-path", type=str, required=True)
|
79 |
+
parser.add_argument("--save-path", type=str, required=True)
|
80 |
+
parser.add_argument("--n-experts", type=int, required=True)
|
81 |
+
parser.add_argument("--model-parallel", type=int, default=1)
|
82 |
+
args = parser.parse_args()
|
83 |
+
assert args.n_experts % args.model_parallel == 0
|
84 |
+
main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
|