remove reference to deprecated transformers code

#74
by winglian - opened
Files changed (1) hide show
  1. modeling_deepseek.py +1 -7
modeling_deepseek.py CHANGED
@@ -41,10 +41,7 @@ from transformers.modeling_outputs import (
41
  SequenceClassifierOutputWithPast,
42
  )
43
  from transformers.modeling_utils import PreTrainedModel
44
- from transformers.pytorch_utils import (
45
- ALL_LAYERNORM_LAYERS,
46
- is_torch_greater_or_equal_than_1_13,
47
- )
48
  from transformers.utils import (
49
  add_start_docstrings,
50
  add_start_docstrings_to_model_forward,
@@ -66,9 +63,6 @@ if is_flash_attn_2_available():
66
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
67
  # It means that the function will not be traced through and simply appear as a node in the graph.
68
  if is_torch_fx_available():
69
- if not is_torch_greater_or_equal_than_1_13:
70
- import torch.fx
71
-
72
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
73
 
74
 
 
41
  SequenceClassifierOutputWithPast,
42
  )
43
  from transformers.modeling_utils import PreTrainedModel
44
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
 
 
 
45
  from transformers.utils import (
46
  add_start_docstrings,
47
  add_start_docstrings_to_model_forward,
 
63
  # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
64
  # It means that the function will not be traced through and simply appear as a node in the graph.
65
  if is_torch_fx_available():
 
 
 
66
  _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
67
 
68