Update README.md
Browse files
README.md
CHANGED
@@ -34,6 +34,29 @@ pip install intel-extension-for-transformers
|
|
34 |
~~~python
|
35 |
from auto_round import AutoRoundConfig ##must import for autoround format
|
36 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
import torch
|
38 |
|
39 |
quantized_model_dir = "OPEA/DeepSeek-V3-int4-sym-awq-inc"
|
|
|
34 |
~~~python
|
35 |
from auto_round import AutoRoundConfig ##must import for autoround format
|
36 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
37 |
+
|
38 |
+
# https://github.com/huggingface/transformers/pull/35493
|
39 |
+
def set_initialized_submodules(model, state_dict_keys):
|
40 |
+
"""
|
41 |
+
Sets the `_is_hf_initialized` flag in all submodules of a given model when all its weights are in the loaded state
|
42 |
+
dict.
|
43 |
+
"""
|
44 |
+
state_dict_keys = set(state_dict_keys)
|
45 |
+
not_initialized_submodules = {}
|
46 |
+
for module_name, module in model.named_modules():
|
47 |
+
if module_name == "":
|
48 |
+
# When checking if the root module is loaded there's no need to prepend module_name.
|
49 |
+
module_keys = set(module.state_dict())
|
50 |
+
else:
|
51 |
+
module_keys = {f"{module_name}.{k}" for k in module.state_dict()}
|
52 |
+
if module_keys.issubset(state_dict_keys):
|
53 |
+
module._is_hf_initialized = True
|
54 |
+
else:
|
55 |
+
not_initialized_submodules[module_name] = module
|
56 |
+
return not_initialized_submodules
|
57 |
+
|
58 |
+
transformers.modeling_utils.set_initialized_submodules = set_initialized_submodules
|
59 |
+
|
60 |
import torch
|
61 |
|
62 |
quantized_model_dir = "OPEA/DeepSeek-V3-int4-sym-awq-inc"
|