lanczos commited on
Commit
bc8639a
·
verified ·
1 Parent(s): 3e4bb7b

Upload folder using huggingface_hub

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ fabric*
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/mamba-codestral-7B-v0.1",
3
+ "architectures": [
4
+ "Mamba2ForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_ibs2.IBS2Config",
8
+ "AutoModel": "modeling_ibs2.IBS2ForCausalLM",
9
+ "AutoModelForCausalLM": "modeling_ibs2.IBS2ForCausalLM"
10
+ },
11
+ "bos_token_id": 0,
12
+ "chunk_size": 256,
13
+ "conv_kernel": 4,
14
+ "eos_token_id": 0,
15
+ "expand": 2,
16
+ "head_dim": 64,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 4096,
19
+ "ib_type": "gamma",
20
+ "initializer_range": 0.1,
21
+ "intermediate_size": 8192,
22
+ "layer_norm_epsilon": 1e-05,
23
+ "model_type": "ibs2",
24
+ "n_groups": 8,
25
+ "norm_before_gate": true,
26
+ "num_heads": 128,
27
+ "num_hidden_layers": 64,
28
+ "pad_token_id": 0,
29
+ "rescale_prenorm_residual": false,
30
+ "residual_in_fp32": true,
31
+ "rms_norm": true,
32
+ "state_size": 128,
33
+ "tie_word_embeddings": false,
34
+ "time_step_floor": 0.0001,
35
+ "time_step_init_scheme": "random",
36
+ "time_step_limit": [
37
+ 0.0,
38
+ Infinity
39
+ ],
40
+ "time_step_max": 0.1,
41
+ "time_step_min": 0.001,
42
+ "time_step_rank": 256,
43
+ "time_step_scale": 1.0,
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.43.3",
46
+ "use_bias": false,
47
+ "use_cache": true,
48
+ "use_conv_bias": true,
49
+ "vocab_size": 32768
50
+ }
configuration_ibs2.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MAMBA2 configuration"""
16
+
17
+ import math
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class IBS2Config(PretrainedConfig):
27
+ """
28
+ This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2
29
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30
+ defaults will yield a similar configuration to that of the MAMBA2
31
+ [state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ num_heads (`int`, *optional*, defaults to 128):
39
+ Number of heads for the evolution matrices of mamba 2.
40
+ head_dim (`int`, *optional*, defaults to 64):
41
+ Dimension of each head.
42
+ vocab_size (`int`, *optional*, defaults to 32768):
43
+ Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`Mamba2Model`].
45
+ hidden_size (`int`, *optional*, defaults to 4096):
46
+ Dimensionality of the embeddings and hidden states.
47
+ state_size (`int`, *optional*, defaults to 128): shape of the state space latents.
48
+ num_hidden_layers (`int`, *optional*, defaults to 64):
49
+ Number of hidden layers in the model.
50
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
51
+ The epsilon to use in the layer normalization layers.
52
+ pad_token_id (`int`, *optional*, defaults to 1):
53
+ Padding token id.
54
+ bos_token_id (`int`, *optional*, defaults to 0):
55
+ The id of the beginning of sentence token in the vocabulary.
56
+ eos_token_id (`int`, *optional*, defaults to 2):
57
+ The id of the end of sentence token in the vocabulary.
58
+ expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
59
+ conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.
60
+ n_groups (`int`, *optional*, defaults to 8):
61
+ Number of groups for the evolution matrices of mamba 2.
62
+ use_bias (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block
64
+ use_conv_bias (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to use bias in the convolution layer of the mixer block.
66
+ hidden_act (`str`, *optional*, defaults to `"silu"`):
67
+ The non-linear activation function (function or string) in the decoder.
68
+ initializer_range (`float`, *optional*, defaults to 0.1):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ residual_in_fp32 (`bool`, *optional*, defaults to `True`):
71
+ Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model
72
+ time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
73
+ Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
74
+ time_step_min (`float`, *optional*, defaults to 0.001):
75
+ Minimum `time_step` used to bound `dt_proj.bias`.
76
+ time_step_max (`float`, *optional*, defaults to 0.1):
77
+ Maximum `time_step` used to bound `dt_proj.bias`.
78
+ time_step_floor (`float`, *optional*, defaults to 0.0001):
79
+ Minimum clamping value of the `dt_proj.bias` layer initialization.
80
+ time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):
81
+ Accepted range of time step values.
82
+ rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):
83
+ Whether or not to rescale `out_proj` weights when initializing.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the cache should be used.
86
+ rms_norm (`bool`, *optional*, defaults to `True`):
87
+ Whether to use RMS norm or not.
88
+ chunk_size (`int`, *optional*, defaults to 256):
89
+ Size of the chunks that will comprise the sequence.
90
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
91
+ Whether to tie word embeddings or not.
92
+
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import Mamba2Config, Mamba2Model
98
+
99
+ >>> # Initializing a Mamba2 configuration
100
+ >>> configuration = Mamba2Config()
101
+
102
+ >>> # Initializing a model (with random weights) from the configuration
103
+ >>> model = Mamba2Model(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "ibs2"
110
+
111
+ def __init__(
112
+ self,
113
+ num_classes=1,
114
+ ib_type=None,
115
+ return_attn=False,
116
+ num_heads=128,
117
+ head_dim=64,
118
+ vocab_size=32768,
119
+ hidden_size=4096,
120
+ state_size=128,
121
+ num_hidden_layers=64,
122
+ layer_norm_epsilon=1e-5,
123
+ pad_token_id=1,
124
+ bos_token_id=0,
125
+ eos_token_id=2,
126
+ expand=2,
127
+ conv_kernel=4,
128
+ n_groups=8,
129
+ use_bias=False,
130
+ use_conv_bias=True,
131
+ hidden_act="silu",
132
+ initializer_range=0.1,
133
+ residual_in_fp32=True,
134
+ time_step_rank="auto",
135
+ time_step_min=0.001,
136
+ time_step_max=0.1,
137
+ time_step_floor=1e-4,
138
+ time_step_limit=(0.0, float("inf")),
139
+ rescale_prenorm_residual=False,
140
+ use_cache=True,
141
+ rms_norm=True,
142
+ chunk_size=256,
143
+ tie_word_embeddings=False,
144
+ **kwargs,
145
+ ):
146
+ self.num_classes = num_classes
147
+ self.ib_type = ib_type
148
+ self.return_attn = return_attn
149
+ self.vocab_size = vocab_size
150
+ self.hidden_size = hidden_size
151
+ self.state_size = state_size
152
+ self.num_hidden_layers = num_hidden_layers
153
+ self.layer_norm_epsilon = layer_norm_epsilon
154
+ self.conv_kernel = conv_kernel
155
+ self.expand = expand
156
+
157
+ self.bos_token_id = bos_token_id
158
+ self.eos_token_id = eos_token_id
159
+ self.pad_token_id = pad_token_id
160
+ self.use_bias = use_bias
161
+ self.use_conv_bias = use_conv_bias
162
+ self.hidden_act = hidden_act
163
+ self.initializer_range = initializer_range
164
+ self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank
165
+ self.time_step_min = time_step_min
166
+ self.time_step_max = time_step_max
167
+ self.time_step_floor = time_step_floor
168
+ self.rescale_prenorm_residual = rescale_prenorm_residual
169
+ self.residual_in_fp32 = residual_in_fp32
170
+ self.use_cache = use_cache
171
+ self.n_groups = n_groups
172
+ self.num_heads = num_heads
173
+ self.head_dim = head_dim
174
+ self.rms_norm = rms_norm
175
+ self.state_size = state_size
176
+ self.chunk_size = chunk_size
177
+ self.time_step_limit = time_step_limit
178
+ self.tie_word_embeddings = tie_word_embeddings
179
+
180
+ super().__init__(
181
+ bos_token_id=bos_token_id,
182
+ eos_token_id=eos_token_id,
183
+ pad_token_id=pad_token_id,
184
+ tie_word_embeddings=tie_word_embeddings,
185
+ **kwargs,
186
+ )
187
+
188
+
189
+ __all__ = ["IBS2Config"]
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.43.3"
7
+ }
modeling_ibs2.py ADDED
@@ -0,0 +1,1457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 state-spaces/mamba2 org and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch MAMBA2 model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from transformers.activations import ACT2FN
27
+ from transformers.generation import GenerationMixin
28
+ from transformers.modeling_utils import PreTrainedModel
29
+ from transformers.utils import (
30
+ ModelOutput,
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ )
36
+ from transformers.utils.import_utils import is_causal_conv1d_available, is_torch_available, _is_package_available, version
37
+ from .configuration_ibs2 import IBS2Config
38
+
39
+ def is_mamba_2_ssm_available():
40
+ if is_torch_available():
41
+ import torch
42
+
43
+ if not torch.cuda.is_available():
44
+ return False
45
+ else:
46
+ if _is_package_available("mamba_ssm"):
47
+ import mamba_ssm
48
+
49
+ if version.parse(mamba_ssm.__version__) >= version.parse("2.0.4"):
50
+ return True
51
+ return False
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+
56
+ if is_mamba_2_ssm_available():
57
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
58
+ from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
59
+ else:
60
+ mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, selective_state_update = None, None, None
61
+
62
+ if is_causal_conv1d_available():
63
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
64
+ else:
65
+ causal_conv1d_update, causal_conv1d_fn = None, None
66
+
67
+ is_fast_path_available = all(
68
+ (
69
+ selective_state_update,
70
+ mamba_chunk_scan_combined,
71
+ mamba_split_conv1d_scan_combined,
72
+ causal_conv1d_fn,
73
+ causal_conv1d_update,
74
+ )
75
+ )
76
+
77
+ _CHECKPOINT_FOR_DOC = "mistralai/mamba-codestral-7B-v0.1"
78
+ _CONFIG_FOR_DOC = "Mamba2Config"
79
+
80
+
81
+ # Helper methods for segment sum computation
82
+
83
+
84
+ def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
85
+ """
86
+ Padding x tensor with `pad_size` on the seq_len dim (dim=1)
87
+
88
+ Assumes that we only have tensors of either size 4 or 3
89
+ """
90
+ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
91
+
92
+ return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
93
+
94
+
95
+ def reshape_into_chunks(input_tensor, pad_size, chunk_size):
96
+ """
97
+ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
98
+ simultaneously splitting it into chunk sequences.
99
+
100
+ Assumes that we only have tensors of either size 4 or 3
101
+ """
102
+ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
103
+ input_tensor = pad_tensor_by_size(input_tensor, pad_size)
104
+
105
+ if len(input_tensor.shape) == 3:
106
+ # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
107
+ return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
108
+ else:
109
+ # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
110
+ return input_tensor.reshape(
111
+ input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
112
+ )
113
+
114
+
115
+ def segment_sum(input_tensor):
116
+ """
117
+ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
118
+ """
119
+ chunk_size = input_tensor.size(-1)
120
+ # 1. expand input tensor to have an additional dimension and repeat along that dimension
121
+ # [..., chunk_size] -> [..., chunk_size, chunk_size]
122
+ input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
123
+ # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
124
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
125
+ input_tensor = input_tensor.masked_fill(~mask, 0)
126
+ # 3. compute actual cumsum
127
+ tensor_segsum = torch.cumsum(input_tensor, dim=-2)
128
+
129
+ # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
130
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
131
+ tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
132
+ return tensor_segsum
133
+
134
+
135
+ def apply_mask_to_padding_states(hidden_states, attention_mask):
136
+ """
137
+ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
138
+ """
139
+ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
140
+ dtype = hidden_states.dtype
141
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
142
+
143
+ return hidden_states
144
+
145
+
146
+ class Mamba2Cache:
147
+ """
148
+ Arguments:
149
+ config: Mamba2Config
150
+ batch_size: int
151
+ dtype: torch.dtype
152
+ device: torch.device
153
+
154
+ Attributes:
155
+ dtype: (`torch.dtype`):
156
+ The default `dtype` used to initializing the cache.
157
+ conv_kernel_size: (`int`):
158
+ Model's convolution kernel size taken from config.
159
+ n_groups: (`int`):
160
+ Model's number of groups taken from the config - similar to tensor parallel in Transformer.
161
+ state_size: (`int`):
162
+ Model's SSM state size taken from config.
163
+ num_heads: (`int`):
164
+ The number of heads used in the linear attention / SSM.
165
+ head_dim: (`int`):
166
+ The respective dimension of the heads used in the linear attention / SSM.
167
+ intermediate_size: (`int`):
168
+ Model's intermediate_size based on (expand * hidden_dim) from config.
169
+ conv_states: (`torch.Tensor`):
170
+ A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]` that holds convolutional states.
171
+ ssm_states: (`torch.Tensor`):
172
+ A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states.
173
+ """
174
+
175
+ def __init__(
176
+ self, config: IBS2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
177
+ ):
178
+ self.dtype = dtype
179
+ self.conv_kernel_size = config.conv_kernel
180
+ self.n_groups = config.n_groups
181
+ self.state_size = config.state_size
182
+ self.num_heads = config.num_heads
183
+ self.head_dim = config.head_dim
184
+ self.intermediate_size = int(config.expand * config.hidden_size)
185
+
186
+ self.conv_states = torch.zeros(
187
+ config.num_hidden_layers,
188
+ batch_size,
189
+ self.intermediate_size + 2 * self.n_groups * self.state_size,
190
+ self.conv_kernel_size,
191
+ device=device,
192
+ dtype=dtype,
193
+ )
194
+ self.ssm_states = torch.zeros(
195
+ config.num_hidden_layers,
196
+ batch_size,
197
+ self.num_heads,
198
+ self.head_dim,
199
+ self.state_size,
200
+ device=device,
201
+ dtype=dtype,
202
+ )
203
+
204
+ def update_conv_state(
205
+ self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False
206
+ ) -> torch.Tensor:
207
+ if cache_init:
208
+ self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device)
209
+ else:
210
+ self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1)
211
+ self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device)
212
+ return self.conv_states[layer_idx]
213
+
214
+ def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
215
+ self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
216
+ return self.ssm_states[layer_idx]
217
+
218
+ def reset(self):
219
+ self.conv_states.zero_()
220
+ self.ssm_states.zero_()
221
+
222
+
223
+ class MambaRMSNormGated(torch.nn.Module):
224
+ def __init__(self, hidden_size, eps=1e-6):
225
+ super().__init__()
226
+ self.weight = nn.Parameter(torch.ones(hidden_size))
227
+ self.variance_epsilon = eps
228
+
229
+ def forward(self, hidden_states, gate=None):
230
+ input_dtype = hidden_states.dtype
231
+ hidden_states = hidden_states.to(torch.float32)
232
+
233
+ if gate is not None:
234
+ hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
235
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
236
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
237
+
238
+ return self.weight * hidden_states.to(input_dtype)
239
+
240
+ class Normalize(nn.Module):
241
+ def __init__(self, min_value=None, max_value=None):
242
+ super().__init__()
243
+ self.min_value = min_value
244
+ self.max_value = max_value
245
+
246
+ def forward(self, value_states):
247
+ # 计算 value_states 的最小值和最大值
248
+ min_val = value_states.min(dim=-1, keepdim=True).values
249
+ max_val = value_states.max(dim=-1, keepdim=True).values
250
+
251
+ # 如果 min_value 或 max_value 是 None,则使用另一个值进行归一化
252
+ if self.min_value is None and self.max_value is not None:
253
+ # 只使用 max_value 进行归一化
254
+ scale_factor = self.max_value / (max_val + 1e-6)
255
+ return value_states * scale_factor, scale_factor, None
256
+ elif self.max_value is None and self.min_value is not None:
257
+ # 只使用 min_value 进行归一化
258
+ scale_factor = self.min_value / (min_val + 1e-6)
259
+ return value_states * scale_factor, scale_factor, None
260
+ elif self.min_value is not None and self.max_value is not None:
261
+ # 同时使用 min_value 和 max_value 进行归一化
262
+ scale_factor = (self.max_value - self.min_value) / (max_val - min_val + 1e-6)
263
+ shift_factor = self.min_value - min_val * scale_factor
264
+ normalized_value_states = value_states * scale_factor + shift_factor
265
+ return normalized_value_states, scale_factor, shift_factor
266
+ else:
267
+ # 如果 min_value 和 max_value 都是 None,则不进行归一化
268
+ return value_states, None, None
269
+
270
+
271
+ # torch.lgamma(n) - torch.lgamma(theta) + (theta - n) * torch.special.digamma(theta)
272
+ class GammaIB(nn.Module):
273
+ def __init__(self, hidden_size, alphas=None, return_attn=False, **kwargs) -> None:
274
+ super().__init__()
275
+ self.alphas = alphas
276
+ # self.attributor = nn.Linear(hidden_size, 1)
277
+ self.hidden_size = hidden_size
278
+ self._auxiliary_loss = 0
279
+ self.epoch_frac = 0
280
+ self.epoch_threshold = -1
281
+ self.normalizer = Normalize(max_value=10, min_value=0.1)
282
+ self.return_attn = return_attn
283
+ self._attn = None
284
+
285
+
286
+
287
+ def get_auxiliary_loss(self):
288
+ loss = self._auxiliary_loss
289
+ self._auxiliary_loss = 0.0
290
+ # print("auxiliary_loss", loss)
291
+ return loss
292
+
293
+ def init_alphas(self, param_alphas):
294
+ # shape: [bsz, seq_len, 1]
295
+ if self.alphas is None:
296
+ maxmimum = 8
297
+ else:
298
+ maxmimum = param_alphas.size(1) / self.alphas.size(0) * self.alphas.max().item()
299
+ length = param_alphas.shape[1]
300
+ alphas = torch.linspace(maxmimum, 1, steps=length).float().to(param_alphas.device) # distance-decay: torch.linspace(1, 2, steps=length)
301
+ self.alphas = alphas
302
+
303
+ def compute_loss(self, param_alphas, epsilon=1e-6):
304
+ if self.alphas is None:
305
+ self.init_alphas(param_alphas) # length is the second dimension of att
306
+ print(f"Gamma prior alpha first: {self.alphas[0]}, last: {self.alphas[-1]}, size: {self.alphas.size(0)}", )
307
+ if self.alphas.size(0) != param_alphas.size(1):
308
+ self.init_alphas(param_alphas) # length is the second dimension of att
309
+ print(f"Gamma prior alpha first: {self.alphas[0]}, last: {self.alphas[-1]}, size: {self.alphas.size(0)}", )
310
+
311
+ params = self.alphas.unsqueeze(-1).expand(param_alphas.shape)
312
+ reg_loss = (torch.lgamma(self.alphas) - torch.lgamma(params) + (params - self.alphas) * torch.digamma(params)).mean()
313
+
314
+ return reg_loss
315
+
316
+ def forward(self, states):
317
+ # hidden_states shape [bsz, seq_length, dimension]
318
+ hidden_states, alphas = torch.split(
319
+ states,
320
+ [self.hidden_size, 1],
321
+ dim=-1
322
+ )
323
+ if self.epoch_frac < self.epoch_threshold:
324
+ return hidden_states
325
+ # alphas = F.rms_norm(alphas, [hidden_states.shape[1], 1])
326
+ #! HACK: for finetuning we zero-init and reparametrize to ensure the initialization is one
327
+ alphas = nn.functional.softplus(alphas) - torch.log(torch.tensor(2.0)) + torch.tensor(1.0)
328
+ # if self.return_attn:
329
+ # # print(alphas.shape)
330
+ # if alphas.shape[-2] == 1:
331
+ # pass # seqlen == 1 indicates it is caching
332
+ # else:
333
+ # self._attn = alphas.detach().cpu()
334
+
335
+ shaped_alphas = alphas.expand(-1, -1, hidden_states.shape[2])
336
+ if self.training:
337
+ self._auxiliary_loss = self.compute_loss(alphas)
338
+ value_states = hidden_states.abs() # ensure value_states is positive
339
+ normalized_value_states, scale_factor, shift_factor = self.normalizer(value_states)
340
+
341
+ betas = torch.reciprocal(normalized_value_states)
342
+ sign_states = torch.sign(hidden_states)
343
+ gamma_dist = torch.distributions.gamma.Gamma(shaped_alphas, betas)
344
+ samples = gamma_dist.rsample()
345
+ # Restore the original scale
346
+ if shift_factor is not None:
347
+ time_states = samples * sign_states / scale_factor - shift_factor / scale_factor
348
+ else:
349
+ time_states = samples * sign_states / scale_factor
350
+ else:
351
+ time_states = shaped_alphas * hidden_states # directly use the expectation
352
+
353
+ if self.return_attn:
354
+ # print(alphas.shape)
355
+ if time_states.shape[-2] == 1:
356
+ pass # seqlen == 1 indicates it is caching
357
+ else:
358
+ self._attn = torch.exp(-time_states).mean(dim=-1).detach().cpu()
359
+
360
+ return time_states
361
+
362
+ class BernoulliIB(nn.Module):
363
+ def __init__(self, hidden_size, temp=1, thetas=None, max_seqlen=1024, return_attn=False, **kwargs) -> None:
364
+ super().__init__()
365
+ self.epoch_frac = 0
366
+ self.epoch_threshold = 0
367
+ self.temp = temp
368
+ self.thetas = thetas
369
+ self.hidden_size = hidden_size
370
+ # self.attributor = nn.Linear(hidden_size, 1, bias=True)
371
+ self._auxiliary_loss = 0
372
+ self.max_seqlen = 4096
373
+ self.return_attn = return_attn
374
+ self._attn = None
375
+
376
+ def init_thetas(self, attn):
377
+ # Create a tensor with sequence positions [0, 1, ..., length-1]
378
+ # length = attn.shape[1]
379
+ seq_len = attn.shape[1]
380
+ if seq_len <= self.max_seqlen:
381
+ positions = torch.arange(self.max_seqlen).float().to(attn.device)
382
+ else:
383
+ positions = torch.arange(self.max_seqlen - seq_len, self.max_seqlen).float().to(attn.device)
384
+
385
+ # Define the exponential decay function
386
+ # decay_factor = 0.3 + 0.4 * torch.exp(positions / length - 1) # extrapolable distance-decay (-\infty: 0.3, 0: 0.5, length: 0.7)
387
+ decay_factor = 0.8 - 0.6 * torch.exp(positions / self.max_seqlen - 1) # extrapolable distance-balance (-\infty: 0.7, 0: 0.5, length: 0.3)
388
+
389
+ # Make the decay factor repeat across the batch dimension
390
+ self.thetas = decay_factor
391
+ return decay_factor
392
+
393
+ # def get_token_saliency(self):
394
+ # attn = self._attn
395
+ # self._attn = None
396
+ # return attn
397
+
398
+ def get_auxiliary_loss(self):
399
+ loss = self._auxiliary_loss
400
+ self._auxiliary_loss = 0.0
401
+ return loss
402
+
403
+ def compute_loss(self, att, epsilon=1e-6):
404
+ if self.thetas is None:
405
+ thetas = self.init_thetas(att) # length is the second dimension of att
406
+ print(f"Bernoulli prior theta first: {self.thetas[0]:.2f}, last: {self.thetas[-1]:.2f}, size: {self.thetas.size(0)}")
407
+
408
+ if self.thetas.size(0) >= att.size(1):
409
+ thetas = self.thetas[-att.size(1):]
410
+ elif self.thetas.size(0) < att.size(1):
411
+ thetas = self.init_thetas(att)
412
+ print(f"Bernoulli prior theta first: {self.thetas[0]:.2f}, last: {self.thetas[-1]:.2f}, size: {self.thetas.size(0)}")
413
+
414
+ thetas = thetas.unsqueeze(-1).expand(att.shape)
415
+ # Calculate the regularization loss
416
+ reg_loss = (att * torch.log(att / thetas + epsilon) +
417
+ (1 - att) * torch.log((1 - att) / (1 - thetas + epsilon) + epsilon)).mean()
418
+
419
+ return reg_loss
420
+
421
+ def forward(self, states):
422
+ # hidden_states shape [bsz, seq_length, dimension]
423
+ hidden_states, attn = torch.split(
424
+ states,
425
+ [self.hidden_size, 1],
426
+ dim=-1
427
+ )
428
+ # attn = self.attributor(hidden_states)
429
+ #! HACK: for fintuning, we zero-init and re-paramerize as plus 1
430
+ attn = attn + torch.tensor(1.0)
431
+ if self.epoch_frac < self.epoch_threshold:
432
+ return hidden_states
433
+ if self.training:
434
+ # gumble soft-max
435
+ random_noise = torch.empty_like(attn).uniform_(1e-10, 1 - 1e-10)
436
+ random_noise = torch.log(random_noise) - torch.log(1.0 - random_noise)
437
+ attn_bern = ((attn + random_noise) / self.temp).sigmoid()
438
+ else:
439
+ attn_bern = (attn).sigmoid()
440
+ self._auxiliary_loss = self.compute_loss(attn_bern)
441
+ if self.return_attn:
442
+ if attn_bern.shape[-2] == 1:
443
+ pass # seqlen == 1 indicates it is caching
444
+ else:
445
+ self._attn = attn_bern.detach().cpu()
446
+ return hidden_states * attn_bern
447
+
448
+
449
+ class Mamba2Mixer(nn.Module):
450
+ """
451
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
452
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
453
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
454
+ and is why Mamba is called **selective** state spaces)
455
+ """
456
+
457
+ def __init__(self, config: IBS2Config, layer_idx: int):
458
+ super().__init__()
459
+ self.num_heads = config.num_heads
460
+ self.hidden_size = config.hidden_size
461
+ self.ssm_state_size = config.state_size
462
+ self.conv_kernel_size = config.conv_kernel
463
+ self.intermediate_size = int(config.expand * self.hidden_size)
464
+ self.time_step_rank = int(config.time_step_rank)
465
+ self.layer_idx = layer_idx
466
+ self.use_conv_bias = config.use_conv_bias
467
+ self.activation = config.hidden_act
468
+ self.act = ACT2FN[config.hidden_act]
469
+
470
+ self.layer_norm_epsilon = config.layer_norm_epsilon
471
+ self.rms_norm = config.rms_norm
472
+
473
+ self.n_groups = config.n_groups
474
+ self.head_dim = config.head_dim
475
+ self.chunk_size = config.chunk_size
476
+
477
+ self.time_step_limit = config.time_step_limit
478
+ self.time_step_min = config.time_step_min
479
+ self.time_step_max = config.time_step_max
480
+
481
+ self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
482
+ self.conv1d = nn.Conv1d(
483
+ in_channels=self.conv_dim,
484
+ out_channels=self.conv_dim,
485
+ bias=config.use_conv_bias,
486
+ kernel_size=config.conv_kernel,
487
+ groups=self.conv_dim,
488
+ padding=config.conv_kernel - 1,
489
+ )
490
+
491
+ # projection of the input hidden states
492
+ #! HACK ib_dim
493
+ self._attn = None
494
+ self.return_attn = config.return_attn
495
+ assert config.ib_type in ['bernoulli', 'gamma'], "Invalid IB Prior."
496
+ IB_cls = BernoulliIB if config.ib_type == 'bernoulli' else GammaIB if config.ib_type == 'gamma' else None
497
+ self.ib4dt = IB_cls(self.num_heads, return_attn=config.return_attn) if self.layer_idx in [0, 31, 63] else None
498
+
499
+ self.ib_proj = nn.Linear(
500
+ self.hidden_size,
501
+ 1,
502
+ bias=False,
503
+ ) if self.ib4dt else None
504
+ projection_size = self.intermediate_size + self.conv_dim + self.num_heads
505
+ self.in_proj = nn.Linear(
506
+ self.hidden_size,
507
+ projection_size,
508
+ bias=config.use_bias,
509
+ )
510
+ # selective projection used to make dt, B and C input dependant
511
+
512
+ # time step projection (discretization)
513
+ # instantiate once and copy inv_dt in init_weights of PretrainedModel
514
+ self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
515
+
516
+ # S4D real initialization. These are not discretized!
517
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
518
+ A = torch.arange(1, self.num_heads + 1)
519
+ self.A_log = nn.Parameter(torch.log(A))
520
+ self.A_log._no_weight_decay = True
521
+ self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon)
522
+ self.D = nn.Parameter(torch.ones(self.num_heads))
523
+ self.D._no_weight_decay = True
524
+
525
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
526
+ self.use_bias = config.use_bias
527
+
528
+ if not is_fast_path_available:
529
+ logger.warning_once(
530
+ "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
531
+ " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
532
+ " https://github.com/Dao-AILab/causal-conv1d"
533
+ )
534
+
535
+ def get_token_saliency(self):
536
+ attn = self._attn
537
+ self._attn = None
538
+ return attn
539
+
540
+ def cuda_kernels_forward(
541
+ self,
542
+ hidden_states: torch.Tensor,
543
+ cache_params: Optional[Mamba2Cache] = None,
544
+ cache_position: Optional[torch.LongTensor] = None,
545
+ attention_mask: Optional[torch.Tensor] = None,
546
+ ):
547
+ # 1. Gated MLP's linear projection
548
+ hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
549
+ projected_states = self.in_proj(hidden_states)
550
+
551
+ #! HACK IBS apply
552
+ if self.ib4dt:
553
+ ib_state = self.ib_proj(hidden_states)
554
+ dim = self.head_dim * self.num_heads
555
+ zx, BC, dt = torch.split(projected_states, [dim * 2, + self.n_groups * self.ssm_state_size * 2, self.num_heads], dim=-1)
556
+ dt = self.ib4dt(torch.cat([dt, ib_state], dim=-1))
557
+ projected_states = torch.cat([zx, BC, dt], dim=-1)
558
+ if self.return_attn and dt.shape[-2] != 1:
559
+ dt_plus = nn.functional.softplus(dt + self.dt_bias)
560
+ dA = (dt_plus * (-torch.exp(self.A_log.float())))
561
+ # dA = torch.exp(dA)
562
+ attn = dA.mean(dim=-1) # - 0.1 * dA.std(dim=-1) # attn shape [batch_size, seqlen]
563
+ self._attn = attn
564
+
565
+ # Set up dimensions for reshapes later
566
+ batch_size, seq_len, _ = hidden_states.shape
567
+ groups_time_state_size = self.n_groups * self.ssm_state_size
568
+ d_mlp = (
569
+ projected_states.shape[-1]
570
+ - 2 * self.intermediate_size
571
+ - 2 * self.n_groups * self.ssm_state_size
572
+ - self.num_heads
573
+ ) // 2
574
+
575
+ # Single step calculations via cache
576
+ if cache_params is not None and cache_position is not None and cache_position[0] > 0:
577
+ _, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
578
+ [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
579
+ )
580
+
581
+ # 2. Convolution sequence transformation
582
+ hidden_states_B_C = causal_conv1d_update(
583
+ hidden_states_B_C,
584
+ cache_params.conv_states[self.layer_idx],
585
+ self.conv1d.weight.squeeze(1),
586
+ self.conv1d.bias,
587
+ self.activation,
588
+ )
589
+
590
+ hidden_states, B, C = torch.split(
591
+ hidden_states_B_C,
592
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
593
+ dim=-1,
594
+ )
595
+
596
+ # 3. SSM transformation
597
+ A = -torch.exp(self.A_log.float()) # (nheads,)
598
+ A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
599
+ dt = dt[:, :, None].expand(-1, -1, self.head_dim)
600
+ dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
601
+ D = self.D[:, None, ...].expand(-1, self.head_dim)
602
+ B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
603
+ C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
604
+ hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
605
+ hidden_states = selective_state_update(
606
+ cache_params.ssm_states[self.layer_idx],
607
+ hidden_states_reshaped,
608
+ dt,
609
+ A,
610
+ B,
611
+ C,
612
+ D,
613
+ z=None,
614
+ dt_bias=dt_bias,
615
+ dt_softplus=True,
616
+ )
617
+ hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
618
+ hidden_states = self.norm(hidden_states, gate)
619
+
620
+ # 4. Final linear projection
621
+ out = self.out_proj(hidden_states)[:, None, ...]
622
+
623
+ # Fused calculations or step by step if no initialized cache is found
624
+ else:
625
+ A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
626
+ dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
627
+
628
+ # 2-4. Fused kernel for conv1d, SSM, and the final projection
629
+ if self.training and cache_params is None:
630
+ out = mamba_split_conv1d_scan_combined(
631
+ projected_states,
632
+ self.conv1d.weight.squeeze(1),
633
+ self.conv1d.bias,
634
+ self.dt_bias,
635
+ A,
636
+ D=self.D,
637
+ chunk_size=self.chunk_size,
638
+ seq_idx=None, # was seq_idx
639
+ activation=self.activation,
640
+ rmsnorm_weight=self.norm.weight,
641
+ rmsnorm_eps=self.norm.variance_epsilon,
642
+ outproj_weight=self.out_proj.weight,
643
+ outproj_bias=self.out_proj.bias,
644
+ headdim=self.head_dim,
645
+ ngroups=self.n_groups,
646
+ norm_before_gate=False,
647
+ return_final_states=False,
648
+ **dt_limit_kwargs,
649
+ )
650
+
651
+ else:
652
+ _, _, gate, hidden_states_B_C, dt = projected_states.split(
653
+ [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
654
+ )
655
+
656
+ # 2. Convolution sequence transformation
657
+ # Init cache
658
+ if cache_params is not None:
659
+ hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
660
+ conv_states = nn.functional.pad(
661
+ hidden_states_B_C_transposed,
662
+ (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
663
+ )
664
+ cache_params.update_conv_state(
665
+ layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True
666
+ )
667
+
668
+ if self.activation not in ["silu", "swish"]:
669
+ hidden_states_B_C = self.act(
670
+ self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
671
+ )
672
+ else:
673
+ hidden_states_B_C = causal_conv1d_fn(
674
+ x=hidden_states_B_C.transpose(1, 2),
675
+ weight=self.conv1d.weight.squeeze(1),
676
+ bias=self.conv1d.bias,
677
+ activation=self.activation,
678
+ ).transpose(1, 2)
679
+
680
+ hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
681
+ hidden_states, B, C = torch.split(
682
+ hidden_states_B_C,
683
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
684
+ dim=-1,
685
+ )
686
+
687
+ # 3. SSM transformation
688
+ scan_output, ssm_state = mamba_chunk_scan_combined(
689
+ hidden_states.view(batch_size, seq_len, -1, self.head_dim),
690
+ dt,
691
+ A,
692
+ B.view(batch_size, seq_len, self.n_groups, -1),
693
+ C.view(batch_size, seq_len, self.n_groups, -1),
694
+ chunk_size=self.chunk_size,
695
+ D=self.D,
696
+ z=None,
697
+ seq_idx=None,
698
+ return_final_states=True,
699
+ dt_bias=self.dt_bias,
700
+ dt_softplus=True,
701
+ **dt_limit_kwargs,
702
+ )
703
+
704
+ # Init cache
705
+ if ssm_state is not None and cache_params is not None:
706
+ cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
707
+
708
+ scan_output = scan_output.view(batch_size, seq_len, -1)
709
+ # Multiply "gate" branch and apply extra normalization layer
710
+ scan_output = self.norm(scan_output, gate)
711
+
712
+ # 4. Final linear projection
713
+ out = self.out_proj(scan_output)
714
+ return out
715
+
716
+ # fmt: off
717
+ def torch_forward(self, input_states, cache_params: Optional[Mamba2Cache]=None, cache_position:Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None):
718
+ batch_size, seq_len, _ = input_states.shape
719
+ dtype = input_states.dtype
720
+
721
+ # 1. Gated MLP's linear projection
722
+ input_states = apply_mask_to_padding_states(input_states, attention_mask)
723
+ projected_states = self.in_proj(input_states)
724
+
725
+ if self.ib4dt:
726
+ attn = self.ib_proj(input_states)
727
+ dim = self.head_dim * self.num_heads
728
+ zx, BC, dt = torch.split(projected_states, [dim * 2, + self.n_groups * self.ssm_state_size * 2, self.num_heads], dim=-1)
729
+ dt = self.ib4dt(torch.cat([dt, attn], dim=-1))
730
+ projected_states = torch.cat([zx, BC, dt], dim=-1)
731
+
732
+ d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size-self.num_heads) // 2
733
+ _, _, gate, hidden_states_B_C, dt = projected_states.split(
734
+ [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
735
+ )
736
+
737
+ # 2. Convolution sequence transformation
738
+ if cache_params is not None and cache_position is not None and cache_position[0] > 0:
739
+ cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False)
740
+
741
+ # We need to guarantee that anything regarding the cache is on the same device
742
+ conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
743
+
744
+ hidden_states_B_C = torch.sum(
745
+ conv_states * self.conv1d.weight.squeeze(1), dim=-1
746
+ )
747
+ if self.use_conv_bias:
748
+ hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
749
+ hidden_states_B_C = self.act(hidden_states_B_C)
750
+ else:
751
+ # Init cache
752
+ if cache_params is not None:
753
+ hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
754
+ conv_states = nn.functional.pad(
755
+ hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
756
+ )
757
+ cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True)
758
+
759
+ hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
760
+
761
+ hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
762
+ hidden_states, B, C = torch.split(
763
+ hidden_states_B_C,
764
+ [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
765
+ dim=-1
766
+ )
767
+
768
+ # 3. SSM transformation
769
+ A = -torch.exp(self.A_log.float()) # [num_heads]
770
+ if cache_params is not None and cache_position is not None and cache_position[0] > 0:
771
+ # We need to guarantee that anything regarding the cache is on the same device
772
+ cache_device = cache_params.ssm_states.device
773
+
774
+ # Note: there is no need to pad parameter matrices here, as there is just one new token
775
+ # for batched generation
776
+ dt = dt[:, 0, :][:, None, ...]
777
+ dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
778
+ # [num_heads] -> [num_heads, head_dim]
779
+ dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
780
+
781
+ dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
782
+ dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
783
+ A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
784
+ # [bsz, num_heads, head_dim, state_size]
785
+ dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
786
+
787
+ # Discretize B
788
+ # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
789
+ # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
790
+ B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
791
+ B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
792
+ B = B.reshape(batch_size, -1, B.shape[-1])
793
+ # [bsz, num_heads, head_dim, state_size]
794
+ dB = dt[..., None] * B[..., None, :]
795
+
796
+ # Discretize x into dB
797
+ # [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
798
+ hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
799
+ dBx = (dB * hidden_states[..., None]).to(device=cache_device)
800
+
801
+ # State calculation
802
+ cache_params.update_ssm_state(
803
+ layer_idx=self.layer_idx,
804
+ new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx
805
+ )
806
+
807
+ # Subsequent output
808
+ # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
809
+ C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
810
+ C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
811
+ C = C.reshape(batch_size, -1, C.shape[-1])
812
+ # [bsz, num_heads, head_dim]
813
+
814
+ ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
815
+ # Reshape ssm_states to merge the first two dimensions
816
+ ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
817
+ C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
818
+ y = torch.bmm(ssm_states_reshaped, C_reshaped)
819
+ y = y.view(batch_size, self.num_heads, self.head_dim)
820
+
821
+ # D skip connection
822
+ # [num_heads] -> [num_heads, head_dim]
823
+ D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
824
+ y = (y + hidden_states * D).to(y.dtype)
825
+
826
+ # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
827
+ y = y.reshape(batch_size, -1)[:, None, ...]
828
+ else:
829
+ # begin ssd naive implementation without einsums
830
+ dt = nn.functional.softplus(dt + self.dt_bias)
831
+ dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
832
+ hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
833
+ B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
834
+ C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
835
+ B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
836
+ C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
837
+ pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
838
+
839
+ D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
840
+
841
+ # Discretize x and A
842
+ hidden_states = hidden_states * dt[..., None]
843
+ A = A.to(hidden_states.dtype) * dt
844
+
845
+ # Rearrange into blocks/chunks
846
+ hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
847
+
848
+ # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
849
+ A = A.permute(0, 3, 1, 2)
850
+ A_cumsum = torch.cumsum(A, dim=-1)
851
+
852
+ # 1. Compute the output for each intra-chunk (diagonal blocks)
853
+ # This is the analog of a causal mask
854
+ L = torch.exp(segment_sum(A))
855
+
856
+ # Contraction of C and B to get G (attention-weights like)
857
+ G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
858
+ G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
859
+
860
+ # Compute M, equivalent to applying attention mask to weights
861
+ M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
862
+ M = M_intermediate.sum(dim=-1)
863
+
864
+ # Compute Y_diag (apply to values)
865
+ Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
866
+
867
+ # 2. Compute the state for each intra-chunk
868
+ # (right term of low-rank factorization of off-diagonal blocks; B terms)
869
+ decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
870
+ B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
871
+ states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
872
+
873
+ # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
874
+ # (middle term of factorization of off-diag blocks; A terms)
875
+ if cache_params is not None and cache_position is not None and cache_position[0] > 0:
876
+ previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
877
+ else:
878
+ previous_states = torch.zeros_like(states[:, :1])
879
+ states = torch.cat([previous_states, states], dim=1)
880
+ decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
881
+ decay_chunk = decay_chunk.transpose(1, 3)
882
+ new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
883
+ states, ssm_state = new_states[:, :-1], new_states[:, -1]
884
+
885
+ # 4. Compute state -> output conversion per chunk
886
+ # (left term of low-rank factorization of off-diagonal blocks; C terms)
887
+ state_decay_out = torch.exp(A_cumsum)
888
+ C_times_states = (C[..., None, :] * states[:, :, None, ...])
889
+ state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
890
+ Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
891
+
892
+ # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
893
+ y = Y_diag + Y_off
894
+ # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
895
+ y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
896
+
897
+ y = y + D_residual
898
+ # Cutting off padded chunks
899
+ if pad_size > 0:
900
+ y = y[:, :seq_len, :, :]
901
+ y = y.reshape(batch_size, seq_len, -1)
902
+
903
+ # Init cache
904
+ if ssm_state is not None and cache_params is not None:
905
+ cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
906
+
907
+ scan_output = self.norm(y, gate)
908
+
909
+ # end ssd naive
910
+
911
+ # 4. Final linear projection
912
+ contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
913
+ return contextualized_states
914
+ # fmt: on
915
+
916
+ def forward(
917
+ self,
918
+ hidden_states,
919
+ cache_params: Optional[Mamba2Cache] = None,
920
+ cache_position: Optional[torch.LongTensor] = None,
921
+ attention_mask: Optional[torch.Tensor] = None,
922
+ ):
923
+ if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
924
+ return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
925
+ dtype = hidden_states.dtype
926
+ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
927
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
928
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
929
+
930
+ return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
931
+
932
+
933
+ class Mamba2RMSNorm(nn.Module):
934
+ def __init__(self, hidden_size, eps=1e-6):
935
+ """
936
+ Mamba2RMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
937
+ """
938
+ super().__init__()
939
+ self.weight = nn.Parameter(torch.ones(hidden_size))
940
+ self.variance_epsilon = eps
941
+
942
+ def forward(self, hidden_states):
943
+ input_dtype = hidden_states.dtype
944
+ hidden_states = hidden_states.to(torch.float32)
945
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
946
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
947
+ return self.weight * hidden_states.to(input_dtype)
948
+
949
+
950
+ class IBS2Block(nn.Module):
951
+ def __init__(self, config, layer_idx):
952
+ super().__init__()
953
+ self.config = config
954
+ self.layer_idx = layer_idx
955
+ self.residual_in_fp32 = config.residual_in_fp32
956
+ self.norm = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
957
+ self.mixer = Mamba2Mixer(config, layer_idx=layer_idx)
958
+
959
+ def forward(
960
+ self,
961
+ hidden_states,
962
+ cache_params: Optional[Mamba2Cache] = None,
963
+ cache_position: Optional[torch.LongTensor] = None,
964
+ attention_mask: Optional[torch.Tensor] = None,
965
+ ):
966
+ residual = hidden_states
967
+ hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
968
+ if self.residual_in_fp32:
969
+ residual = residual.to(torch.float32)
970
+
971
+ hidden_states = self.mixer(
972
+ hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask
973
+ )
974
+ hidden_states = residual + hidden_states
975
+ return hidden_states
976
+
977
+
978
+ class Mamba2PreTrainedModel(PreTrainedModel):
979
+ """
980
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
981
+ models.
982
+ """
983
+
984
+ config_class = IBS2Config
985
+ base_model_prefix = "backbone"
986
+ _no_split_modules = ["Mamba2Block"]
987
+ supports_gradient_checkpointing = True
988
+ _is_stateful = True
989
+
990
+ def _init_weights(self, module):
991
+ """Initialize the weights."""
992
+ if isinstance(module, Mamba2Mixer):
993
+ #! HACK
994
+ if getattr(module, "ib_proj"):
995
+ nn.init.zeros_(module.ib_proj.weight)
996
+
997
+ module.A_log._no_weight_decay = True
998
+ module.D._no_weight_decay = True
999
+
1000
+ dt = torch.exp(
1001
+ torch.rand(self.config.num_heads)
1002
+ * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
1003
+ + math.log(self.config.time_step_min)
1004
+ ).clamp(min=self.config.time_step_floor)
1005
+
1006
+ # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
1007
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
1008
+ with torch.no_grad():
1009
+ module.dt_bias.copy_(inv_dt)
1010
+ module.dt_bias._no_reinit = True
1011
+
1012
+ if isinstance(module, nn.Linear):
1013
+ if module.bias is not None:
1014
+ if not getattr(module.bias, "_no_reinit", False):
1015
+ nn.init.zeros_(module.bias)
1016
+ elif isinstance(module, nn.Embedding):
1017
+ nn.init.normal_(module.weight, std=self.config.initializer_range)
1018
+
1019
+ if self.config.rescale_prenorm_residual:
1020
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
1021
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
1022
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
1023
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
1024
+ #
1025
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
1026
+ for name, p in module.named_parameters():
1027
+ if name in ["out_proj.weight"]:
1028
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
1029
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
1030
+ # We need to reinit p since this code could be called multiple times
1031
+ # Having just p *= scale would repeatedly scale it down
1032
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
1033
+ with torch.no_grad():
1034
+ p /= math.sqrt(self.config.num_hidden_layers)
1035
+
1036
+
1037
+ @dataclass
1038
+ # Copied from transformers.models.mamba.modeling_mamba.MambaOutput with MAMBA->MAMBA2,Mamba->Mamba2
1039
+ class Mamba2Output(ModelOutput):
1040
+ """
1041
+ Class for the MAMBA2 model outputs.
1042
+
1043
+ Args:
1044
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
1045
+ Sequence of hidden-states at the output of the last layer of the model.
1046
+ cache_params (`Mamba2Cache`):
1047
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
1048
+ avoid providing the old `input_ids`.
1049
+
1050
+ Includes both the State space model state matrices after the selective scan, and the Convolutional states
1051
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1052
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
1053
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
1054
+
1055
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
1056
+ """
1057
+
1058
+ last_hidden_state: Optional[torch.FloatTensor] = None
1059
+ cache_params: Optional[Mamba2Cache] = None
1060
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
1061
+
1062
+
1063
+ @dataclass
1064
+ # Copied from transformers.models.mamba.modeling_mamba.MambaCausalLMOutput with Mamba->Mamba2
1065
+ class Mamba2CausalLMOutput(ModelOutput):
1066
+ """
1067
+ Base class for causal language model (or autoregressive) outputs.
1068
+
1069
+ Args:
1070
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
1071
+ Language modeling loss (for next-token prediction).
1072
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1073
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1074
+ cache_params (`Mamba2Cache`):
1075
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
1076
+ avoid providing the old `input_ids`.
1077
+
1078
+ Includes both the State space model state matrices after the selective scan, and the Convolutional states
1079
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1080
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
1081
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
1082
+
1083
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
1084
+ """
1085
+
1086
+ loss: Optional[torch.FloatTensor] = None
1087
+ logits: Optional[torch.FloatTensor] = None
1088
+ cache_params: Optional[Mamba2Cache] = None
1089
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
1090
+
1091
+
1092
+ MAMBA2_START_DOCSTRING = r"""
1093
+
1094
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1095
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1096
+ etc.)
1097
+
1098
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1099
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1100
+ and behavior.
1101
+
1102
+ Parameters:
1103
+ config ([`Mamba2Config`]): Model configuration class with all the parameters of the model.
1104
+ Initializing with a config file does not load the weights associated with the model, only the
1105
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1106
+ """
1107
+
1108
+ MAMBA2_INPUTS_DOCSTRING = r"""
1109
+ Args:
1110
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
1111
+ Indices of input sequence tokens in the vocabulary.
1112
+
1113
+ If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as
1114
+ `input_ids`.
1115
+
1116
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1117
+ [`PreTrainedTokenizer.__call__`] for details.
1118
+
1119
+ [What are input IDs?](../glossary#input-ids)
1120
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1121
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1122
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1123
+ model's internal embedding lookup matrix.
1124
+ cache_params (`Mamba2Cache`, *optional*):
1125
+ If passed along, the model uses the previous state in all the blocks (which will give the output for the
1126
+ `input_ids` provided as if the model add `state_input_ids + input_ids` as context).
1127
+ use_cache (`bool`, *optional*):
1128
+ If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
1129
+ output_hidden_states (`bool`, *optional*):
1130
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1131
+ more detail.
1132
+ return_dict (`bool`, *optional*):
1133
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1134
+ cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1135
+ The position of the current input in the cache. This is used to ensure that the cache is correctly updated.
1136
+ If `cache_params` is passed, `cache_position` should also be passed.
1137
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1138
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1139
+
1140
+ - 1 for tokens that are **not masked**,
1141
+ - 0 for tokens that are **masked**.
1142
+
1143
+ [What are attention masks?](../glossary#attention-mask)
1144
+ """
1145
+
1146
+
1147
+ @add_start_docstrings(
1148
+ "The bare MAMBA2 Model transformer outputting raw hidden-states without any specific head on top.",
1149
+ MAMBA2_START_DOCSTRING,
1150
+ )
1151
+ class IBS2Model(Mamba2PreTrainedModel):
1152
+ def __init__(self, config):
1153
+ super().__init__(config)
1154
+
1155
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
1156
+ self.layers = nn.ModuleList([IBS2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
1157
+
1158
+ self.gradient_checkpointing = False
1159
+ self.norm_f = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
1160
+ # Initialize weights and apply final processing
1161
+ self._register_load_state_dict_pre_hook(self.load_hook)
1162
+ self.post_init()
1163
+
1164
+ def load_hook(self, state_dict, prefix, *args):
1165
+ for k in state_dict:
1166
+ if "embedding." in k:
1167
+ state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
1168
+ break
1169
+
1170
+ def get_input_embeddings(self):
1171
+ return self.embeddings
1172
+
1173
+ def set_input_embeddings(self, new_embeddings):
1174
+ self.embeddings = new_embeddings
1175
+
1176
+ @add_start_docstrings_to_model_forward(MAMBA2_INPUTS_DOCSTRING)
1177
+ @add_code_sample_docstrings(
1178
+ checkpoint=_CHECKPOINT_FOR_DOC,
1179
+ output_type=Mamba2Output,
1180
+ config_class=_CONFIG_FOR_DOC,
1181
+ )
1182
+ def forward(
1183
+ self,
1184
+ input_ids: Optional[torch.LongTensor] = None,
1185
+ inputs_embeds: Optional[torch.LongTensor] = None,
1186
+ cache_params: Optional[Mamba2Cache] = None,
1187
+ use_cache: Optional[bool] = None,
1188
+ output_hidden_states: Optional[bool] = None,
1189
+ return_dict: Optional[bool] = None,
1190
+ cache_position: Optional[torch.LongTensor] = None,
1191
+ attention_mask: Optional[torch.Tensor] = None,
1192
+ **kwargs,
1193
+ ) -> Union[Tuple, Mamba2Output]:
1194
+ output_hidden_states = (
1195
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1196
+ )
1197
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
1198
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1199
+
1200
+ if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
1201
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1202
+
1203
+ if inputs_embeds is None:
1204
+ inputs_embeds = self.embeddings(input_ids)
1205
+
1206
+ if self.gradient_checkpointing and self.training and use_cache:
1207
+ use_cache = False
1208
+
1209
+ if use_cache:
1210
+ if cache_params is None:
1211
+ cache_params = Mamba2Cache(
1212
+ self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
1213
+ )
1214
+ cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
1215
+ elif cache_position is None:
1216
+ # cases when we do manual forward instead of using `model.generate` which will initiate
1217
+ # `cache_position` and makes sure it is not None, throw error here instead of doing some
1218
+ # hack to conjecture the current cache position
1219
+ raise ValueError(
1220
+ "You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, "
1221
+ "you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will "
1222
+ "be initialized for you automatically"
1223
+ )
1224
+ else:
1225
+ cache_params = None
1226
+
1227
+ hidden_states = inputs_embeds
1228
+ all_hidden_states = () if output_hidden_states else None
1229
+ for mixer_block in self.layers:
1230
+ if self.gradient_checkpointing and self.training:
1231
+ hidden_states = self._gradient_checkpointing_func(
1232
+ mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask
1233
+ )
1234
+ else:
1235
+ hidden_states = mixer_block(
1236
+ hidden_states,
1237
+ cache_params=cache_params,
1238
+ cache_position=cache_position,
1239
+ attention_mask=attention_mask,
1240
+ )
1241
+
1242
+ if output_hidden_states:
1243
+ all_hidden_states = all_hidden_states + (hidden_states,)
1244
+
1245
+ hidden_states = self.norm_f(hidden_states)
1246
+
1247
+ if output_hidden_states:
1248
+ all_hidden_states = all_hidden_states + (hidden_states,)
1249
+
1250
+ if not return_dict:
1251
+ return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
1252
+
1253
+ return Mamba2Output(
1254
+ last_hidden_state=hidden_states,
1255
+ cache_params=cache_params if use_cache else None,
1256
+ hidden_states=all_hidden_states,
1257
+ )
1258
+
1259
+ class IBS2ForClassification(Mamba2PreTrainedModel):
1260
+ _tied_weights_keys = []
1261
+
1262
+ def __init__(self, config):
1263
+ super().__init__(config)
1264
+ self.backbone = IBS2Model(config)
1265
+ self.cls_head = nn.Linear(config.hidden_size, config.num_classes, bias=False)
1266
+ # Initialize weights and apply final processing
1267
+ self.post_init()
1268
+
1269
+ def forward(
1270
+ self,
1271
+ input_ids: Optional[torch.LongTensor] = None,
1272
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1273
+ cache_params: Optional[Mamba2Cache] = None,
1274
+ labels: Optional[torch.LongTensor] = None,
1275
+ output_hidden_states: Optional[bool] = None,
1276
+ return_dict: Optional[bool] = None,
1277
+ use_cache: Optional[bool] = None,
1278
+ cache_position: Optional[torch.Tensor] = None,
1279
+ attention_mask: Optional[torch.Tensor] = None,
1280
+ **kwargs, # for now we need this for generation
1281
+ ):
1282
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1283
+
1284
+ mamba2_outputs = self.backbone(
1285
+ input_ids,
1286
+ cache_params=cache_params,
1287
+ inputs_embeds=inputs_embeds,
1288
+ output_hidden_states=output_hidden_states,
1289
+ return_dict=return_dict,
1290
+ use_cache=use_cache,
1291
+ cache_position=cache_position,
1292
+ attention_mask=attention_mask,
1293
+ )
1294
+ hidden_states = mamba2_outputs[0]
1295
+
1296
+ logits = self.cls_head(hidden_states.to(self.cls_head.weight.dtype)).float()
1297
+
1298
+ loss = None
1299
+ if labels is not None:
1300
+ labels = labels.to(logits.device)
1301
+ loss_fct = CrossEntropyLoss()
1302
+ loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
1303
+
1304
+ if not return_dict:
1305
+ output = (logits,) + mamba2_outputs[1:]
1306
+ return ((loss,) + output) if loss is not None else output
1307
+
1308
+ return loss
1309
+ # return Mamba2CausalLMOutput(
1310
+ # loss=loss,
1311
+ # logits=logits,
1312
+ # cache_params=mamba2_outputs.cache_params,
1313
+ # hidden_states=mamba2_outputs.hidden_states,
1314
+ # )
1315
+
1316
+ @add_start_docstrings(
1317
+ """
1318
+ The MAMBA2 Model transformer with a language modeling head on top (linear layer with weights not tied to the input
1319
+ embeddings).
1320
+ """,
1321
+ MAMBA2_START_DOCSTRING,
1322
+ )
1323
+ class IBS2ForCausalLM(Mamba2PreTrainedModel, GenerationMixin):
1324
+ _tied_weights_keys = []
1325
+
1326
+ def __init__(self, config):
1327
+ super().__init__(config)
1328
+ self.backbone = IBS2Model(config)
1329
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1330
+ # Initialize weights and apply final processing
1331
+ self.post_init()
1332
+
1333
+ def get_output_embeddings(self):
1334
+ return self.lm_head
1335
+
1336
+ def set_output_embeddings(self, new_embeddings):
1337
+ self.lm_head = new_embeddings
1338
+
1339
+ def get_input_embeddings(self):
1340
+ return self.backbone.get_input_embeddings()
1341
+
1342
+ def set_input_embeddings(self, new_embeddings):
1343
+ return self.backbone.set_input_embeddings(new_embeddings)
1344
+
1345
+ def prepare_inputs_for_generation(
1346
+ self,
1347
+ input_ids,
1348
+ inputs_embeds=None,
1349
+ use_cache=None,
1350
+ cache_params: Optional[Mamba2Cache] = None,
1351
+ cache_position: Optional[torch.LongTensor] = None,
1352
+ attention_mask: Optional[torch.Tensor] = None,
1353
+ **kwargs,
1354
+ ):
1355
+ # Overwitten -- uses `cache_params` as opposed to `past_key_values`
1356
+
1357
+ if use_cache:
1358
+ # `cache_position` should have been initialized in `generate`
1359
+ if cache_position is None:
1360
+ raise ValueError(
1361
+ "`cache_position` should not be None as it should have been initialized in "
1362
+ "`model.generate`, you are responsible for passing in a valid `cache_position` if "
1363
+ "you are calling `prepare_inputs_for_generation` directly with `use_cache=True`"
1364
+ )
1365
+ if cache_position[0] > 0:
1366
+ input_ids = input_ids[:, -1][..., None]
1367
+
1368
+ if attention_mask is not None:
1369
+ attention_mask = None
1370
+ else:
1371
+ # we initialize the `cache_position` to full size of `conv_states` at prefill stage
1372
+ # considering padding will be applied when input length is shorter, and truncation
1373
+ # will be applied when it is longer, so it will be equivalent to always have it match
1374
+ # the length of `cache_params.conv_states`, which is `config.conv_kernel`
1375
+ cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device)
1376
+
1377
+ if inputs_embeds is not None and cache_params is None:
1378
+ model_inputs = {"inputs_embeds": inputs_embeds}
1379
+ else:
1380
+ model_inputs = {"input_ids": input_ids}
1381
+
1382
+ model_inputs.update(
1383
+ {
1384
+ "attention_mask": attention_mask,
1385
+ "cache_params": cache_params,
1386
+ "use_cache": use_cache,
1387
+ "cache_position": cache_position,
1388
+ }
1389
+ )
1390
+ return model_inputs
1391
+
1392
+ @add_start_docstrings_to_model_forward(MAMBA2_INPUTS_DOCSTRING)
1393
+ @add_code_sample_docstrings(
1394
+ checkpoint=_CHECKPOINT_FOR_DOC,
1395
+ output_type=Mamba2CausalLMOutput,
1396
+ config_class=_CONFIG_FOR_DOC,
1397
+ )
1398
+ def forward(
1399
+ self,
1400
+ input_ids: Optional[torch.LongTensor] = None,
1401
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1402
+ cache_params: Optional[Mamba2Cache] = None,
1403
+ labels: Optional[torch.LongTensor] = None,
1404
+ output_hidden_states: Optional[bool] = None,
1405
+ return_dict: Optional[bool] = None,
1406
+ use_cache: Optional[bool] = None,
1407
+ cache_position: Optional[torch.Tensor] = None,
1408
+ attention_mask: Optional[torch.Tensor] = None,
1409
+ **kwargs, # for now we need this for generation
1410
+ ) -> Union[Tuple, Mamba2CausalLMOutput]:
1411
+ r"""
1412
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1413
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1414
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1415
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1416
+ """
1417
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1418
+
1419
+ mamba2_outputs = self.backbone(
1420
+ input_ids,
1421
+ cache_params=cache_params,
1422
+ inputs_embeds=inputs_embeds,
1423
+ output_hidden_states=output_hidden_states,
1424
+ return_dict=return_dict,
1425
+ use_cache=use_cache,
1426
+ cache_position=cache_position,
1427
+ attention_mask=attention_mask,
1428
+ )
1429
+ hidden_states = mamba2_outputs[0]
1430
+
1431
+ logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
1432
+
1433
+ loss = None
1434
+ if labels is not None:
1435
+ # move labels to correct device to enable model parallelism
1436
+ labels = labels.to(logits.device)
1437
+ # Shift so that tokens < n predict n
1438
+ shift_logits = logits[..., :-1, :].contiguous()
1439
+ shift_labels = labels[..., 1:].contiguous()
1440
+ # Flatten the tokens
1441
+ loss_fct = CrossEntropyLoss()
1442
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1443
+
1444
+ if not return_dict:
1445
+ output = (logits,) + mamba2_outputs[1:]
1446
+ return ((loss,) + output) if loss is not None else output
1447
+
1448
+ return Mamba2CausalLMOutput(
1449
+ loss=loss,
1450
+ logits=logits,
1451
+ cache_params=mamba2_outputs.cache_params,
1452
+ hidden_states=mamba2_outputs.hidden_states,
1453
+ )
1454
+
1455
+
1456
+ __all__ = ["IBS2ForCausalLM", "IBS2Model", "Mamba2PreTrainedModel", "IBS2Block", "IBS2ForClassification"]
1457
+
pytorch_model-00001-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f87d5313fd2baea17acf628de1b2922c42563b4999c10e12dc3f8bb9073ee75
3
+ size 4922747462
pytorch_model-00002-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:220fee199c4c00bd513b86585915fa89057a080aa9af6a0a6742a7b48711813a
3
+ size 4824198060
pytorch_model-00003-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:245bf3b887b6ce842a330c220e26d5c5d74452a9a0feaa2dec460de4dea24242
3
+ size 4824214794
pytorch_model-00004-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:834b44b5afc90469d12f4072c558db13aec273d6659a0fec88cd04d730917614
3
+ size 4824198060
pytorch_model-00005-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf50bc90604dc0f7523a05dc68130e1e82ac89b9560208cccc5aca7cdc7814f
3
+ size 4824198060
pytorch_model-00006-of-00006.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a981ad272e9ed171dd03a82b720ade1b1d35724dc0b61867c7a778fedc62f57
3
+ size 4922314420
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 29141663744
4
+ },
5
+ "weight_map": {
6
+ "backbone.embeddings.weight": "pytorch_model-00001-of-00006.bin",
7
+ "backbone.layers.0.mixer.A_log": "pytorch_model-00001-of-00006.bin",
8
+ "backbone.layers.0.mixer.D": "pytorch_model-00001-of-00006.bin",
9
+ "backbone.layers.0.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
10
+ "backbone.layers.0.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
11
+ "backbone.layers.0.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
12
+ "backbone.layers.0.mixer.ib_proj.weight": "pytorch_model-00001-of-00006.bin",
13
+ "backbone.layers.0.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
14
+ "backbone.layers.0.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
15
+ "backbone.layers.0.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
16
+ "backbone.layers.0.norm.weight": "pytorch_model-00001-of-00006.bin",
17
+ "backbone.layers.1.mixer.A_log": "pytorch_model-00001-of-00006.bin",
18
+ "backbone.layers.1.mixer.D": "pytorch_model-00001-of-00006.bin",
19
+ "backbone.layers.1.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
20
+ "backbone.layers.1.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
21
+ "backbone.layers.1.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
22
+ "backbone.layers.1.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
23
+ "backbone.layers.1.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
24
+ "backbone.layers.1.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
25
+ "backbone.layers.1.norm.weight": "pytorch_model-00001-of-00006.bin",
26
+ "backbone.layers.10.mixer.A_log": "pytorch_model-00001-of-00006.bin",
27
+ "backbone.layers.10.mixer.D": "pytorch_model-00001-of-00006.bin",
28
+ "backbone.layers.10.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
29
+ "backbone.layers.10.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
30
+ "backbone.layers.10.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
31
+ "backbone.layers.10.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
32
+ "backbone.layers.10.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
33
+ "backbone.layers.10.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
34
+ "backbone.layers.10.norm.weight": "pytorch_model-00001-of-00006.bin",
35
+ "backbone.layers.11.mixer.A_log": "pytorch_model-00002-of-00006.bin",
36
+ "backbone.layers.11.mixer.D": "pytorch_model-00002-of-00006.bin",
37
+ "backbone.layers.11.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
38
+ "backbone.layers.11.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
39
+ "backbone.layers.11.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
40
+ "backbone.layers.11.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
41
+ "backbone.layers.11.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
42
+ "backbone.layers.11.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
43
+ "backbone.layers.11.norm.weight": "pytorch_model-00002-of-00006.bin",
44
+ "backbone.layers.12.mixer.A_log": "pytorch_model-00002-of-00006.bin",
45
+ "backbone.layers.12.mixer.D": "pytorch_model-00002-of-00006.bin",
46
+ "backbone.layers.12.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
47
+ "backbone.layers.12.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
48
+ "backbone.layers.12.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
49
+ "backbone.layers.12.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
50
+ "backbone.layers.12.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
51
+ "backbone.layers.12.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
52
+ "backbone.layers.12.norm.weight": "pytorch_model-00002-of-00006.bin",
53
+ "backbone.layers.13.mixer.A_log": "pytorch_model-00002-of-00006.bin",
54
+ "backbone.layers.13.mixer.D": "pytorch_model-00002-of-00006.bin",
55
+ "backbone.layers.13.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
56
+ "backbone.layers.13.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
57
+ "backbone.layers.13.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
58
+ "backbone.layers.13.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
59
+ "backbone.layers.13.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
60
+ "backbone.layers.13.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
61
+ "backbone.layers.13.norm.weight": "pytorch_model-00002-of-00006.bin",
62
+ "backbone.layers.14.mixer.A_log": "pytorch_model-00002-of-00006.bin",
63
+ "backbone.layers.14.mixer.D": "pytorch_model-00002-of-00006.bin",
64
+ "backbone.layers.14.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
65
+ "backbone.layers.14.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
66
+ "backbone.layers.14.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
67
+ "backbone.layers.14.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
68
+ "backbone.layers.14.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
69
+ "backbone.layers.14.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
70
+ "backbone.layers.14.norm.weight": "pytorch_model-00002-of-00006.bin",
71
+ "backbone.layers.15.mixer.A_log": "pytorch_model-00002-of-00006.bin",
72
+ "backbone.layers.15.mixer.D": "pytorch_model-00002-of-00006.bin",
73
+ "backbone.layers.15.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
74
+ "backbone.layers.15.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
75
+ "backbone.layers.15.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
76
+ "backbone.layers.15.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
77
+ "backbone.layers.15.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
78
+ "backbone.layers.15.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
79
+ "backbone.layers.15.norm.weight": "pytorch_model-00002-of-00006.bin",
80
+ "backbone.layers.16.mixer.A_log": "pytorch_model-00002-of-00006.bin",
81
+ "backbone.layers.16.mixer.D": "pytorch_model-00002-of-00006.bin",
82
+ "backbone.layers.16.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
83
+ "backbone.layers.16.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
84
+ "backbone.layers.16.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
85
+ "backbone.layers.16.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
86
+ "backbone.layers.16.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
87
+ "backbone.layers.16.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
88
+ "backbone.layers.16.norm.weight": "pytorch_model-00002-of-00006.bin",
89
+ "backbone.layers.17.mixer.A_log": "pytorch_model-00002-of-00006.bin",
90
+ "backbone.layers.17.mixer.D": "pytorch_model-00002-of-00006.bin",
91
+ "backbone.layers.17.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
92
+ "backbone.layers.17.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
93
+ "backbone.layers.17.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
94
+ "backbone.layers.17.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
95
+ "backbone.layers.17.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
96
+ "backbone.layers.17.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
97
+ "backbone.layers.17.norm.weight": "pytorch_model-00002-of-00006.bin",
98
+ "backbone.layers.18.mixer.A_log": "pytorch_model-00002-of-00006.bin",
99
+ "backbone.layers.18.mixer.D": "pytorch_model-00002-of-00006.bin",
100
+ "backbone.layers.18.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
101
+ "backbone.layers.18.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
102
+ "backbone.layers.18.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
103
+ "backbone.layers.18.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
104
+ "backbone.layers.18.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
105
+ "backbone.layers.18.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
106
+ "backbone.layers.18.norm.weight": "pytorch_model-00002-of-00006.bin",
107
+ "backbone.layers.19.mixer.A_log": "pytorch_model-00002-of-00006.bin",
108
+ "backbone.layers.19.mixer.D": "pytorch_model-00002-of-00006.bin",
109
+ "backbone.layers.19.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
110
+ "backbone.layers.19.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
111
+ "backbone.layers.19.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
112
+ "backbone.layers.19.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
113
+ "backbone.layers.19.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
114
+ "backbone.layers.19.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
115
+ "backbone.layers.19.norm.weight": "pytorch_model-00002-of-00006.bin",
116
+ "backbone.layers.2.mixer.A_log": "pytorch_model-00001-of-00006.bin",
117
+ "backbone.layers.2.mixer.D": "pytorch_model-00001-of-00006.bin",
118
+ "backbone.layers.2.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
119
+ "backbone.layers.2.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
120
+ "backbone.layers.2.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
121
+ "backbone.layers.2.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
122
+ "backbone.layers.2.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
123
+ "backbone.layers.2.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
124
+ "backbone.layers.2.norm.weight": "pytorch_model-00001-of-00006.bin",
125
+ "backbone.layers.20.mixer.A_log": "pytorch_model-00002-of-00006.bin",
126
+ "backbone.layers.20.mixer.D": "pytorch_model-00002-of-00006.bin",
127
+ "backbone.layers.20.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
128
+ "backbone.layers.20.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
129
+ "backbone.layers.20.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
130
+ "backbone.layers.20.mixer.in_proj.weight": "pytorch_model-00002-of-00006.bin",
131
+ "backbone.layers.20.mixer.norm.weight": "pytorch_model-00002-of-00006.bin",
132
+ "backbone.layers.20.mixer.out_proj.weight": "pytorch_model-00002-of-00006.bin",
133
+ "backbone.layers.20.norm.weight": "pytorch_model-00002-of-00006.bin",
134
+ "backbone.layers.21.mixer.A_log": "pytorch_model-00002-of-00006.bin",
135
+ "backbone.layers.21.mixer.D": "pytorch_model-00002-of-00006.bin",
136
+ "backbone.layers.21.mixer.conv1d.bias": "pytorch_model-00002-of-00006.bin",
137
+ "backbone.layers.21.mixer.conv1d.weight": "pytorch_model-00002-of-00006.bin",
138
+ "backbone.layers.21.mixer.dt_bias": "pytorch_model-00002-of-00006.bin",
139
+ "backbone.layers.21.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
140
+ "backbone.layers.21.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
141
+ "backbone.layers.21.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
142
+ "backbone.layers.21.norm.weight": "pytorch_model-00002-of-00006.bin",
143
+ "backbone.layers.22.mixer.A_log": "pytorch_model-00003-of-00006.bin",
144
+ "backbone.layers.22.mixer.D": "pytorch_model-00003-of-00006.bin",
145
+ "backbone.layers.22.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
146
+ "backbone.layers.22.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
147
+ "backbone.layers.22.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
148
+ "backbone.layers.22.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
149
+ "backbone.layers.22.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
150
+ "backbone.layers.22.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
151
+ "backbone.layers.22.norm.weight": "pytorch_model-00003-of-00006.bin",
152
+ "backbone.layers.23.mixer.A_log": "pytorch_model-00003-of-00006.bin",
153
+ "backbone.layers.23.mixer.D": "pytorch_model-00003-of-00006.bin",
154
+ "backbone.layers.23.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
155
+ "backbone.layers.23.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
156
+ "backbone.layers.23.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
157
+ "backbone.layers.23.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
158
+ "backbone.layers.23.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
159
+ "backbone.layers.23.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
160
+ "backbone.layers.23.norm.weight": "pytorch_model-00003-of-00006.bin",
161
+ "backbone.layers.24.mixer.A_log": "pytorch_model-00003-of-00006.bin",
162
+ "backbone.layers.24.mixer.D": "pytorch_model-00003-of-00006.bin",
163
+ "backbone.layers.24.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
164
+ "backbone.layers.24.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
165
+ "backbone.layers.24.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
166
+ "backbone.layers.24.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
167
+ "backbone.layers.24.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
168
+ "backbone.layers.24.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
169
+ "backbone.layers.24.norm.weight": "pytorch_model-00003-of-00006.bin",
170
+ "backbone.layers.25.mixer.A_log": "pytorch_model-00003-of-00006.bin",
171
+ "backbone.layers.25.mixer.D": "pytorch_model-00003-of-00006.bin",
172
+ "backbone.layers.25.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
173
+ "backbone.layers.25.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
174
+ "backbone.layers.25.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
175
+ "backbone.layers.25.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
176
+ "backbone.layers.25.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
177
+ "backbone.layers.25.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
178
+ "backbone.layers.25.norm.weight": "pytorch_model-00003-of-00006.bin",
179
+ "backbone.layers.26.mixer.A_log": "pytorch_model-00003-of-00006.bin",
180
+ "backbone.layers.26.mixer.D": "pytorch_model-00003-of-00006.bin",
181
+ "backbone.layers.26.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
182
+ "backbone.layers.26.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
183
+ "backbone.layers.26.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
184
+ "backbone.layers.26.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
185
+ "backbone.layers.26.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
186
+ "backbone.layers.26.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
187
+ "backbone.layers.26.norm.weight": "pytorch_model-00003-of-00006.bin",
188
+ "backbone.layers.27.mixer.A_log": "pytorch_model-00003-of-00006.bin",
189
+ "backbone.layers.27.mixer.D": "pytorch_model-00003-of-00006.bin",
190
+ "backbone.layers.27.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
191
+ "backbone.layers.27.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
192
+ "backbone.layers.27.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
193
+ "backbone.layers.27.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
194
+ "backbone.layers.27.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
195
+ "backbone.layers.27.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
196
+ "backbone.layers.27.norm.weight": "pytorch_model-00003-of-00006.bin",
197
+ "backbone.layers.28.mixer.A_log": "pytorch_model-00003-of-00006.bin",
198
+ "backbone.layers.28.mixer.D": "pytorch_model-00003-of-00006.bin",
199
+ "backbone.layers.28.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
200
+ "backbone.layers.28.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
201
+ "backbone.layers.28.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
202
+ "backbone.layers.28.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
203
+ "backbone.layers.28.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
204
+ "backbone.layers.28.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
205
+ "backbone.layers.28.norm.weight": "pytorch_model-00003-of-00006.bin",
206
+ "backbone.layers.29.mixer.A_log": "pytorch_model-00003-of-00006.bin",
207
+ "backbone.layers.29.mixer.D": "pytorch_model-00003-of-00006.bin",
208
+ "backbone.layers.29.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
209
+ "backbone.layers.29.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
210
+ "backbone.layers.29.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
211
+ "backbone.layers.29.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
212
+ "backbone.layers.29.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
213
+ "backbone.layers.29.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
214
+ "backbone.layers.29.norm.weight": "pytorch_model-00003-of-00006.bin",
215
+ "backbone.layers.3.mixer.A_log": "pytorch_model-00001-of-00006.bin",
216
+ "backbone.layers.3.mixer.D": "pytorch_model-00001-of-00006.bin",
217
+ "backbone.layers.3.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
218
+ "backbone.layers.3.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
219
+ "backbone.layers.3.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
220
+ "backbone.layers.3.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
221
+ "backbone.layers.3.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
222
+ "backbone.layers.3.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
223
+ "backbone.layers.3.norm.weight": "pytorch_model-00001-of-00006.bin",
224
+ "backbone.layers.30.mixer.A_log": "pytorch_model-00003-of-00006.bin",
225
+ "backbone.layers.30.mixer.D": "pytorch_model-00003-of-00006.bin",
226
+ "backbone.layers.30.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
227
+ "backbone.layers.30.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
228
+ "backbone.layers.30.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
229
+ "backbone.layers.30.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
230
+ "backbone.layers.30.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
231
+ "backbone.layers.30.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
232
+ "backbone.layers.30.norm.weight": "pytorch_model-00003-of-00006.bin",
233
+ "backbone.layers.31.mixer.A_log": "pytorch_model-00003-of-00006.bin",
234
+ "backbone.layers.31.mixer.D": "pytorch_model-00003-of-00006.bin",
235
+ "backbone.layers.31.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
236
+ "backbone.layers.31.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
237
+ "backbone.layers.31.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
238
+ "backbone.layers.31.mixer.ib_proj.weight": "pytorch_model-00003-of-00006.bin",
239
+ "backbone.layers.31.mixer.in_proj.weight": "pytorch_model-00003-of-00006.bin",
240
+ "backbone.layers.31.mixer.norm.weight": "pytorch_model-00003-of-00006.bin",
241
+ "backbone.layers.31.mixer.out_proj.weight": "pytorch_model-00003-of-00006.bin",
242
+ "backbone.layers.31.norm.weight": "pytorch_model-00003-of-00006.bin",
243
+ "backbone.layers.32.mixer.A_log": "pytorch_model-00003-of-00006.bin",
244
+ "backbone.layers.32.mixer.D": "pytorch_model-00003-of-00006.bin",
245
+ "backbone.layers.32.mixer.conv1d.bias": "pytorch_model-00003-of-00006.bin",
246
+ "backbone.layers.32.mixer.conv1d.weight": "pytorch_model-00003-of-00006.bin",
247
+ "backbone.layers.32.mixer.dt_bias": "pytorch_model-00003-of-00006.bin",
248
+ "backbone.layers.32.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
249
+ "backbone.layers.32.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
250
+ "backbone.layers.32.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
251
+ "backbone.layers.32.norm.weight": "pytorch_model-00003-of-00006.bin",
252
+ "backbone.layers.33.mixer.A_log": "pytorch_model-00004-of-00006.bin",
253
+ "backbone.layers.33.mixer.D": "pytorch_model-00004-of-00006.bin",
254
+ "backbone.layers.33.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
255
+ "backbone.layers.33.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
256
+ "backbone.layers.33.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
257
+ "backbone.layers.33.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
258
+ "backbone.layers.33.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
259
+ "backbone.layers.33.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
260
+ "backbone.layers.33.norm.weight": "pytorch_model-00004-of-00006.bin",
261
+ "backbone.layers.34.mixer.A_log": "pytorch_model-00004-of-00006.bin",
262
+ "backbone.layers.34.mixer.D": "pytorch_model-00004-of-00006.bin",
263
+ "backbone.layers.34.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
264
+ "backbone.layers.34.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
265
+ "backbone.layers.34.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
266
+ "backbone.layers.34.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
267
+ "backbone.layers.34.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
268
+ "backbone.layers.34.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
269
+ "backbone.layers.34.norm.weight": "pytorch_model-00004-of-00006.bin",
270
+ "backbone.layers.35.mixer.A_log": "pytorch_model-00004-of-00006.bin",
271
+ "backbone.layers.35.mixer.D": "pytorch_model-00004-of-00006.bin",
272
+ "backbone.layers.35.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
273
+ "backbone.layers.35.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
274
+ "backbone.layers.35.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
275
+ "backbone.layers.35.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
276
+ "backbone.layers.35.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
277
+ "backbone.layers.35.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
278
+ "backbone.layers.35.norm.weight": "pytorch_model-00004-of-00006.bin",
279
+ "backbone.layers.36.mixer.A_log": "pytorch_model-00004-of-00006.bin",
280
+ "backbone.layers.36.mixer.D": "pytorch_model-00004-of-00006.bin",
281
+ "backbone.layers.36.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
282
+ "backbone.layers.36.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
283
+ "backbone.layers.36.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
284
+ "backbone.layers.36.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
285
+ "backbone.layers.36.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
286
+ "backbone.layers.36.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
287
+ "backbone.layers.36.norm.weight": "pytorch_model-00004-of-00006.bin",
288
+ "backbone.layers.37.mixer.A_log": "pytorch_model-00004-of-00006.bin",
289
+ "backbone.layers.37.mixer.D": "pytorch_model-00004-of-00006.bin",
290
+ "backbone.layers.37.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
291
+ "backbone.layers.37.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
292
+ "backbone.layers.37.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
293
+ "backbone.layers.37.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
294
+ "backbone.layers.37.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
295
+ "backbone.layers.37.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
296
+ "backbone.layers.37.norm.weight": "pytorch_model-00004-of-00006.bin",
297
+ "backbone.layers.38.mixer.A_log": "pytorch_model-00004-of-00006.bin",
298
+ "backbone.layers.38.mixer.D": "pytorch_model-00004-of-00006.bin",
299
+ "backbone.layers.38.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
300
+ "backbone.layers.38.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
301
+ "backbone.layers.38.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
302
+ "backbone.layers.38.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
303
+ "backbone.layers.38.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
304
+ "backbone.layers.38.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
305
+ "backbone.layers.38.norm.weight": "pytorch_model-00004-of-00006.bin",
306
+ "backbone.layers.39.mixer.A_log": "pytorch_model-00004-of-00006.bin",
307
+ "backbone.layers.39.mixer.D": "pytorch_model-00004-of-00006.bin",
308
+ "backbone.layers.39.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
309
+ "backbone.layers.39.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
310
+ "backbone.layers.39.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
311
+ "backbone.layers.39.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
312
+ "backbone.layers.39.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
313
+ "backbone.layers.39.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
314
+ "backbone.layers.39.norm.weight": "pytorch_model-00004-of-00006.bin",
315
+ "backbone.layers.4.mixer.A_log": "pytorch_model-00001-of-00006.bin",
316
+ "backbone.layers.4.mixer.D": "pytorch_model-00001-of-00006.bin",
317
+ "backbone.layers.4.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
318
+ "backbone.layers.4.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
319
+ "backbone.layers.4.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
320
+ "backbone.layers.4.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
321
+ "backbone.layers.4.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
322
+ "backbone.layers.4.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
323
+ "backbone.layers.4.norm.weight": "pytorch_model-00001-of-00006.bin",
324
+ "backbone.layers.40.mixer.A_log": "pytorch_model-00004-of-00006.bin",
325
+ "backbone.layers.40.mixer.D": "pytorch_model-00004-of-00006.bin",
326
+ "backbone.layers.40.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
327
+ "backbone.layers.40.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
328
+ "backbone.layers.40.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
329
+ "backbone.layers.40.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
330
+ "backbone.layers.40.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
331
+ "backbone.layers.40.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
332
+ "backbone.layers.40.norm.weight": "pytorch_model-00004-of-00006.bin",
333
+ "backbone.layers.41.mixer.A_log": "pytorch_model-00004-of-00006.bin",
334
+ "backbone.layers.41.mixer.D": "pytorch_model-00004-of-00006.bin",
335
+ "backbone.layers.41.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
336
+ "backbone.layers.41.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
337
+ "backbone.layers.41.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
338
+ "backbone.layers.41.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
339
+ "backbone.layers.41.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
340
+ "backbone.layers.41.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
341
+ "backbone.layers.41.norm.weight": "pytorch_model-00004-of-00006.bin",
342
+ "backbone.layers.42.mixer.A_log": "pytorch_model-00004-of-00006.bin",
343
+ "backbone.layers.42.mixer.D": "pytorch_model-00004-of-00006.bin",
344
+ "backbone.layers.42.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
345
+ "backbone.layers.42.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
346
+ "backbone.layers.42.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
347
+ "backbone.layers.42.mixer.in_proj.weight": "pytorch_model-00004-of-00006.bin",
348
+ "backbone.layers.42.mixer.norm.weight": "pytorch_model-00004-of-00006.bin",
349
+ "backbone.layers.42.mixer.out_proj.weight": "pytorch_model-00004-of-00006.bin",
350
+ "backbone.layers.42.norm.weight": "pytorch_model-00004-of-00006.bin",
351
+ "backbone.layers.43.mixer.A_log": "pytorch_model-00004-of-00006.bin",
352
+ "backbone.layers.43.mixer.D": "pytorch_model-00004-of-00006.bin",
353
+ "backbone.layers.43.mixer.conv1d.bias": "pytorch_model-00004-of-00006.bin",
354
+ "backbone.layers.43.mixer.conv1d.weight": "pytorch_model-00004-of-00006.bin",
355
+ "backbone.layers.43.mixer.dt_bias": "pytorch_model-00004-of-00006.bin",
356
+ "backbone.layers.43.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
357
+ "backbone.layers.43.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
358
+ "backbone.layers.43.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
359
+ "backbone.layers.43.norm.weight": "pytorch_model-00004-of-00006.bin",
360
+ "backbone.layers.44.mixer.A_log": "pytorch_model-00005-of-00006.bin",
361
+ "backbone.layers.44.mixer.D": "pytorch_model-00005-of-00006.bin",
362
+ "backbone.layers.44.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
363
+ "backbone.layers.44.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
364
+ "backbone.layers.44.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
365
+ "backbone.layers.44.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
366
+ "backbone.layers.44.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
367
+ "backbone.layers.44.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
368
+ "backbone.layers.44.norm.weight": "pytorch_model-00005-of-00006.bin",
369
+ "backbone.layers.45.mixer.A_log": "pytorch_model-00005-of-00006.bin",
370
+ "backbone.layers.45.mixer.D": "pytorch_model-00005-of-00006.bin",
371
+ "backbone.layers.45.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
372
+ "backbone.layers.45.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
373
+ "backbone.layers.45.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
374
+ "backbone.layers.45.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
375
+ "backbone.layers.45.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
376
+ "backbone.layers.45.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
377
+ "backbone.layers.45.norm.weight": "pytorch_model-00005-of-00006.bin",
378
+ "backbone.layers.46.mixer.A_log": "pytorch_model-00005-of-00006.bin",
379
+ "backbone.layers.46.mixer.D": "pytorch_model-00005-of-00006.bin",
380
+ "backbone.layers.46.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
381
+ "backbone.layers.46.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
382
+ "backbone.layers.46.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
383
+ "backbone.layers.46.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
384
+ "backbone.layers.46.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
385
+ "backbone.layers.46.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
386
+ "backbone.layers.46.norm.weight": "pytorch_model-00005-of-00006.bin",
387
+ "backbone.layers.47.mixer.A_log": "pytorch_model-00005-of-00006.bin",
388
+ "backbone.layers.47.mixer.D": "pytorch_model-00005-of-00006.bin",
389
+ "backbone.layers.47.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
390
+ "backbone.layers.47.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
391
+ "backbone.layers.47.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
392
+ "backbone.layers.47.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
393
+ "backbone.layers.47.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
394
+ "backbone.layers.47.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
395
+ "backbone.layers.47.norm.weight": "pytorch_model-00005-of-00006.bin",
396
+ "backbone.layers.48.mixer.A_log": "pytorch_model-00005-of-00006.bin",
397
+ "backbone.layers.48.mixer.D": "pytorch_model-00005-of-00006.bin",
398
+ "backbone.layers.48.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
399
+ "backbone.layers.48.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
400
+ "backbone.layers.48.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
401
+ "backbone.layers.48.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
402
+ "backbone.layers.48.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
403
+ "backbone.layers.48.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
404
+ "backbone.layers.48.norm.weight": "pytorch_model-00005-of-00006.bin",
405
+ "backbone.layers.49.mixer.A_log": "pytorch_model-00005-of-00006.bin",
406
+ "backbone.layers.49.mixer.D": "pytorch_model-00005-of-00006.bin",
407
+ "backbone.layers.49.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
408
+ "backbone.layers.49.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
409
+ "backbone.layers.49.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
410
+ "backbone.layers.49.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
411
+ "backbone.layers.49.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
412
+ "backbone.layers.49.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
413
+ "backbone.layers.49.norm.weight": "pytorch_model-00005-of-00006.bin",
414
+ "backbone.layers.5.mixer.A_log": "pytorch_model-00001-of-00006.bin",
415
+ "backbone.layers.5.mixer.D": "pytorch_model-00001-of-00006.bin",
416
+ "backbone.layers.5.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
417
+ "backbone.layers.5.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
418
+ "backbone.layers.5.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
419
+ "backbone.layers.5.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
420
+ "backbone.layers.5.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
421
+ "backbone.layers.5.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
422
+ "backbone.layers.5.norm.weight": "pytorch_model-00001-of-00006.bin",
423
+ "backbone.layers.50.mixer.A_log": "pytorch_model-00005-of-00006.bin",
424
+ "backbone.layers.50.mixer.D": "pytorch_model-00005-of-00006.bin",
425
+ "backbone.layers.50.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
426
+ "backbone.layers.50.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
427
+ "backbone.layers.50.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
428
+ "backbone.layers.50.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
429
+ "backbone.layers.50.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
430
+ "backbone.layers.50.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
431
+ "backbone.layers.50.norm.weight": "pytorch_model-00005-of-00006.bin",
432
+ "backbone.layers.51.mixer.A_log": "pytorch_model-00005-of-00006.bin",
433
+ "backbone.layers.51.mixer.D": "pytorch_model-00005-of-00006.bin",
434
+ "backbone.layers.51.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
435
+ "backbone.layers.51.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
436
+ "backbone.layers.51.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
437
+ "backbone.layers.51.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
438
+ "backbone.layers.51.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
439
+ "backbone.layers.51.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
440
+ "backbone.layers.51.norm.weight": "pytorch_model-00005-of-00006.bin",
441
+ "backbone.layers.52.mixer.A_log": "pytorch_model-00005-of-00006.bin",
442
+ "backbone.layers.52.mixer.D": "pytorch_model-00005-of-00006.bin",
443
+ "backbone.layers.52.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
444
+ "backbone.layers.52.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
445
+ "backbone.layers.52.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
446
+ "backbone.layers.52.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
447
+ "backbone.layers.52.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
448
+ "backbone.layers.52.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
449
+ "backbone.layers.52.norm.weight": "pytorch_model-00005-of-00006.bin",
450
+ "backbone.layers.53.mixer.A_log": "pytorch_model-00005-of-00006.bin",
451
+ "backbone.layers.53.mixer.D": "pytorch_model-00005-of-00006.bin",
452
+ "backbone.layers.53.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
453
+ "backbone.layers.53.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
454
+ "backbone.layers.53.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
455
+ "backbone.layers.53.mixer.in_proj.weight": "pytorch_model-00005-of-00006.bin",
456
+ "backbone.layers.53.mixer.norm.weight": "pytorch_model-00005-of-00006.bin",
457
+ "backbone.layers.53.mixer.out_proj.weight": "pytorch_model-00005-of-00006.bin",
458
+ "backbone.layers.53.norm.weight": "pytorch_model-00005-of-00006.bin",
459
+ "backbone.layers.54.mixer.A_log": "pytorch_model-00005-of-00006.bin",
460
+ "backbone.layers.54.mixer.D": "pytorch_model-00005-of-00006.bin",
461
+ "backbone.layers.54.mixer.conv1d.bias": "pytorch_model-00005-of-00006.bin",
462
+ "backbone.layers.54.mixer.conv1d.weight": "pytorch_model-00005-of-00006.bin",
463
+ "backbone.layers.54.mixer.dt_bias": "pytorch_model-00005-of-00006.bin",
464
+ "backbone.layers.54.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
465
+ "backbone.layers.54.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
466
+ "backbone.layers.54.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
467
+ "backbone.layers.54.norm.weight": "pytorch_model-00005-of-00006.bin",
468
+ "backbone.layers.55.mixer.A_log": "pytorch_model-00006-of-00006.bin",
469
+ "backbone.layers.55.mixer.D": "pytorch_model-00006-of-00006.bin",
470
+ "backbone.layers.55.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
471
+ "backbone.layers.55.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
472
+ "backbone.layers.55.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
473
+ "backbone.layers.55.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
474
+ "backbone.layers.55.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
475
+ "backbone.layers.55.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
476
+ "backbone.layers.55.norm.weight": "pytorch_model-00006-of-00006.bin",
477
+ "backbone.layers.56.mixer.A_log": "pytorch_model-00006-of-00006.bin",
478
+ "backbone.layers.56.mixer.D": "pytorch_model-00006-of-00006.bin",
479
+ "backbone.layers.56.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
480
+ "backbone.layers.56.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
481
+ "backbone.layers.56.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
482
+ "backbone.layers.56.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
483
+ "backbone.layers.56.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
484
+ "backbone.layers.56.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
485
+ "backbone.layers.56.norm.weight": "pytorch_model-00006-of-00006.bin",
486
+ "backbone.layers.57.mixer.A_log": "pytorch_model-00006-of-00006.bin",
487
+ "backbone.layers.57.mixer.D": "pytorch_model-00006-of-00006.bin",
488
+ "backbone.layers.57.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
489
+ "backbone.layers.57.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
490
+ "backbone.layers.57.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
491
+ "backbone.layers.57.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
492
+ "backbone.layers.57.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
493
+ "backbone.layers.57.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
494
+ "backbone.layers.57.norm.weight": "pytorch_model-00006-of-00006.bin",
495
+ "backbone.layers.58.mixer.A_log": "pytorch_model-00006-of-00006.bin",
496
+ "backbone.layers.58.mixer.D": "pytorch_model-00006-of-00006.bin",
497
+ "backbone.layers.58.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
498
+ "backbone.layers.58.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
499
+ "backbone.layers.58.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
500
+ "backbone.layers.58.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
501
+ "backbone.layers.58.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
502
+ "backbone.layers.58.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
503
+ "backbone.layers.58.norm.weight": "pytorch_model-00006-of-00006.bin",
504
+ "backbone.layers.59.mixer.A_log": "pytorch_model-00006-of-00006.bin",
505
+ "backbone.layers.59.mixer.D": "pytorch_model-00006-of-00006.bin",
506
+ "backbone.layers.59.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
507
+ "backbone.layers.59.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
508
+ "backbone.layers.59.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
509
+ "backbone.layers.59.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
510
+ "backbone.layers.59.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
511
+ "backbone.layers.59.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
512
+ "backbone.layers.59.norm.weight": "pytorch_model-00006-of-00006.bin",
513
+ "backbone.layers.6.mixer.A_log": "pytorch_model-00001-of-00006.bin",
514
+ "backbone.layers.6.mixer.D": "pytorch_model-00001-of-00006.bin",
515
+ "backbone.layers.6.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
516
+ "backbone.layers.6.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
517
+ "backbone.layers.6.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
518
+ "backbone.layers.6.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
519
+ "backbone.layers.6.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
520
+ "backbone.layers.6.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
521
+ "backbone.layers.6.norm.weight": "pytorch_model-00001-of-00006.bin",
522
+ "backbone.layers.60.mixer.A_log": "pytorch_model-00006-of-00006.bin",
523
+ "backbone.layers.60.mixer.D": "pytorch_model-00006-of-00006.bin",
524
+ "backbone.layers.60.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
525
+ "backbone.layers.60.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
526
+ "backbone.layers.60.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
527
+ "backbone.layers.60.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
528
+ "backbone.layers.60.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
529
+ "backbone.layers.60.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
530
+ "backbone.layers.60.norm.weight": "pytorch_model-00006-of-00006.bin",
531
+ "backbone.layers.61.mixer.A_log": "pytorch_model-00006-of-00006.bin",
532
+ "backbone.layers.61.mixer.D": "pytorch_model-00006-of-00006.bin",
533
+ "backbone.layers.61.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
534
+ "backbone.layers.61.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
535
+ "backbone.layers.61.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
536
+ "backbone.layers.61.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
537
+ "backbone.layers.61.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
538
+ "backbone.layers.61.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
539
+ "backbone.layers.61.norm.weight": "pytorch_model-00006-of-00006.bin",
540
+ "backbone.layers.62.mixer.A_log": "pytorch_model-00006-of-00006.bin",
541
+ "backbone.layers.62.mixer.D": "pytorch_model-00006-of-00006.bin",
542
+ "backbone.layers.62.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
543
+ "backbone.layers.62.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
544
+ "backbone.layers.62.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
545
+ "backbone.layers.62.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
546
+ "backbone.layers.62.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
547
+ "backbone.layers.62.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
548
+ "backbone.layers.62.norm.weight": "pytorch_model-00006-of-00006.bin",
549
+ "backbone.layers.63.mixer.A_log": "pytorch_model-00006-of-00006.bin",
550
+ "backbone.layers.63.mixer.D": "pytorch_model-00006-of-00006.bin",
551
+ "backbone.layers.63.mixer.conv1d.bias": "pytorch_model-00006-of-00006.bin",
552
+ "backbone.layers.63.mixer.conv1d.weight": "pytorch_model-00006-of-00006.bin",
553
+ "backbone.layers.63.mixer.dt_bias": "pytorch_model-00006-of-00006.bin",
554
+ "backbone.layers.63.mixer.ib_proj.weight": "pytorch_model-00006-of-00006.bin",
555
+ "backbone.layers.63.mixer.in_proj.weight": "pytorch_model-00006-of-00006.bin",
556
+ "backbone.layers.63.mixer.norm.weight": "pytorch_model-00006-of-00006.bin",
557
+ "backbone.layers.63.mixer.out_proj.weight": "pytorch_model-00006-of-00006.bin",
558
+ "backbone.layers.63.norm.weight": "pytorch_model-00006-of-00006.bin",
559
+ "backbone.layers.7.mixer.A_log": "pytorch_model-00001-of-00006.bin",
560
+ "backbone.layers.7.mixer.D": "pytorch_model-00001-of-00006.bin",
561
+ "backbone.layers.7.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
562
+ "backbone.layers.7.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
563
+ "backbone.layers.7.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
564
+ "backbone.layers.7.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
565
+ "backbone.layers.7.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
566
+ "backbone.layers.7.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
567
+ "backbone.layers.7.norm.weight": "pytorch_model-00001-of-00006.bin",
568
+ "backbone.layers.8.mixer.A_log": "pytorch_model-00001-of-00006.bin",
569
+ "backbone.layers.8.mixer.D": "pytorch_model-00001-of-00006.bin",
570
+ "backbone.layers.8.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
571
+ "backbone.layers.8.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
572
+ "backbone.layers.8.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
573
+ "backbone.layers.8.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
574
+ "backbone.layers.8.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
575
+ "backbone.layers.8.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
576
+ "backbone.layers.8.norm.weight": "pytorch_model-00001-of-00006.bin",
577
+ "backbone.layers.9.mixer.A_log": "pytorch_model-00001-of-00006.bin",
578
+ "backbone.layers.9.mixer.D": "pytorch_model-00001-of-00006.bin",
579
+ "backbone.layers.9.mixer.conv1d.bias": "pytorch_model-00001-of-00006.bin",
580
+ "backbone.layers.9.mixer.conv1d.weight": "pytorch_model-00001-of-00006.bin",
581
+ "backbone.layers.9.mixer.dt_bias": "pytorch_model-00001-of-00006.bin",
582
+ "backbone.layers.9.mixer.in_proj.weight": "pytorch_model-00001-of-00006.bin",
583
+ "backbone.layers.9.mixer.norm.weight": "pytorch_model-00001-of-00006.bin",
584
+ "backbone.layers.9.mixer.out_proj.weight": "pytorch_model-00001-of-00006.bin",
585
+ "backbone.layers.9.norm.weight": "pytorch_model-00001-of-00006.bin",
586
+ "backbone.norm_f.weight": "pytorch_model-00006-of-00006.bin",
587
+ "lm_head.weight": "pytorch_model-00006-of-00006.bin"
588
+ }
589
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f95e28944c062244741268596badc900df86c7f5ded05088d2da22a7379e06
3
+ size 587583
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff