Fill-Mask
Transformers
PyTorch
Safetensors
bert
custom_code
robinzixuan commited on
Commit
9f4cfdc
·
verified ·
1 Parent(s): 68311fa

Update modeling_bert.py

Browse files
Files changed (1) hide show
  1. modeling_bert.py +5 -4
modeling_bert.py CHANGED
@@ -4,6 +4,7 @@
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
 
7
  # You may obtain a copy of the License at
8
  #
9
  # http://www.apache.org/licenses/LICENSE-2.0
@@ -413,8 +414,8 @@ class BertOutEffHop(nn.Module):
413
  super().__init__()
414
  if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
415
  raise ValueError(
416
- f"The hidden size ({
417
- config.hidden_size}) is not a multiple of the number of attention "
418
  f"heads ({config.num_attention_heads})"
419
  )
420
 
@@ -820,8 +821,8 @@ class BertLayer(nn.Module):
820
  if self.is_decoder and encoder_hidden_states is not None:
821
  if not hasattr(self, "crossattention"):
822
  raise ValueError(
823
- f"If `encoder_hidden_states` are passed, {
824
- self} has to be instantiated with cross-attention layers"
825
  " by setting `config.add_cross_attention=True`"
826
  )
827
 
 
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
7
+
8
  # You may obtain a copy of the License at
9
  #
10
  # http://www.apache.org/licenses/LICENSE-2.0
 
414
  super().__init__()
415
  if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
416
  raise ValueError(
417
+ f'''The hidden size ({
418
+ config.hidden_size}) is not a multiple of the number of attention '''
419
  f"heads ({config.num_attention_heads})"
420
  )
421
 
 
821
  if self.is_decoder and encoder_hidden_states is not None:
822
  if not hasattr(self, "crossattention"):
823
  raise ValueError(
824
+ f'''If `encoder_hidden_states` are passed, {
825
+ self} has to be instantiated with cross-attention layers'''
826
  " by setting `config.add_cross_attention=True`"
827
  )
828