zhouxingshi commited on
Commit
f730608
·
1 Parent(s): c0fbc4b

Add model definitions

Browse files
benchmarks/cifar/models/cnncert.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ def tanh_5layer_5_3():
5
+ layers = [
6
+ nn.Conv2d(3, 5, kernel_size=(3, 3)),
7
+ nn.Tanh(),
8
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
9
+ nn.Tanh(),
10
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
11
+ nn.Tanh(),
12
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
13
+ nn.Tanh(),
14
+ nn.Flatten(start_dim=1, end_dim=-1),
15
+ nn.Linear(in_features=2880, out_features=10, bias=True),
16
+ ]
17
+ return nn.Sequential(*layers)
18
+
19
+ def tanh_7layer_5_3():
20
+ layers = [
21
+ nn.Conv2d(3, 5, kernel_size=(3, 3)),
22
+ nn.Tanh(),
23
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
24
+ nn.Tanh(),
25
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
26
+ nn.Tanh(),
27
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
28
+ nn.Tanh(),
29
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
30
+ nn.Tanh(),
31
+ nn.Conv2d(5, 5, kernel_size=(3, 3)),
32
+ nn.Tanh(),
33
+ nn.Flatten(start_dim=1, end_dim=-1),
34
+ nn.Linear(in_features=2000, out_features=10, bias=True),
35
+ ]
36
+ return nn.Sequential(*layers)
benchmarks/cifar/models/gelu.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import numpy as np
3
+ import torch
4
+
5
+ class GELUOp(torch.autograd.Function):
6
+ @staticmethod
7
+ def symbolic(g, x):
8
+ return g.op('custom::Gelu', x)
9
+
10
+ @staticmethod
11
+ def forward(ctx, x):
12
+ ctx.save_for_backward(x)
13
+ return torch.nn.functional.gelu(x)
14
+
15
+ @staticmethod
16
+ def backward(ctx, grad_output):
17
+ x, = ctx.saved_tensors
18
+ grad_input = grad_output.clone()
19
+ grad = 0.5 * (1 + torch.erf(x / np.sqrt(2))) + x * torch.exp(-0.5 * x ** 2) / np.sqrt(2 * torch.pi)
20
+ return grad_input * grad
21
+
22
+ class GELU(nn.Module):
23
+ def forward(self, x):
24
+ return GELUOp.apply(x)
25
+
26
+ def gelu_fc(in_ch=3, in_dim=32, width=100, depth=4, omega=0.3, num_classes=10):
27
+ layers = [nn.Flatten(), nn.Linear(in_ch*in_dim**2, width), GELU()]
28
+ for _ in range(depth - 1):
29
+ layers.extend([nn.Linear(width, width), GELU()])
30
+ layers.append(nn.Linear(width, num_classes))
31
+ return nn.Sequential(*layers)
32
+
33
+
34
+ def gelu_4fc_100(in_ch=3, in_dim=32):
35
+ return gelu_fc(in_ch, in_dim, width=100, depth=4)
36
+
37
+
38
+ def gelu_4fc_200(in_ch=3, in_dim=32):
39
+ return gelu_fc(in_ch, in_dim, width=200, depth=4)
40
+
41
+
42
+ def gelu_4fc_500(in_ch=3, in_dim=32):
43
+ return gelu_fc(in_ch, in_dim, width=500, depth=4)
benchmarks/cifar/models/lstm.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class LSTM(nn.Module):
6
+ def __init__(self, in_channels=3, width=100, patch_size=8, num_classes=10):
7
+ super().__init__()
8
+
9
+ self.patch_size = patch_size
10
+ self.width = width
11
+ self.num_classes = num_classes
12
+
13
+ self.projection = nn.Conv2d(
14
+ in_channels, width, kernel_size=patch_size, stride=patch_size)
15
+ self.cell_f = nn.LSTMCell(width, width)
16
+ self.fc = nn.Linear(width, num_classes)
17
+
18
+ def forward(self, x):
19
+ embed = self.projection(x)
20
+ embed = torch.flatten(embed, 2).permute(0, 2, 1)
21
+ h_f = torch.zeros(x.shape[0], self.width, device=x.device)
22
+ c_f = h_f.clone()
23
+ for i in range(embed.shape[1]):
24
+ h_f, c_f = self.cell_f(embed[:, i], (h_f, c_f))
25
+ logits = self.fc(h_f)
26
+ return logits
27
+
28
+
29
+ def LSTM_patch_16_32(in_ch=3, in_dim=32):
30
+ assert in_ch == 3 and in_dim == 32
31
+ return LSTM(width=32, patch_size=16)
32
+
33
+ def LSTM_patch_16_64(in_ch=3, in_dim=32):
34
+ assert in_ch == 3 and in_dim == 32
35
+ return LSTM(width=64, patch_size=16)
benchmarks/cifar/models/resnet2b_gelu.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+
6
+
7
+ class GELUOp(torch.autograd.Function):
8
+ @staticmethod
9
+ def symbolic(g, x):
10
+ return g.op('custom::Gelu', x)
11
+
12
+ @staticmethod
13
+ def forward(ctx, x):
14
+ ctx.save_for_backward(x)
15
+ return torch.nn.functional.gelu(x)
16
+
17
+ @staticmethod
18
+ def backward(ctx, grad_output):
19
+ x, = ctx.saved_tensors
20
+ grad_input = grad_output.clone()
21
+ grad = 0.5 * (1 + torch.erf(x / np.sqrt(2))) + x * torch.exp(-0.5 * x ** 2) / np.sqrt(2 * torch.pi)
22
+ return grad_input * grad
23
+
24
+ def gelu(x):
25
+ return GELUOp.apply(x)
26
+
27
+
28
+ class BasicBlock(nn.Module):
29
+ expansion = 1
30
+
31
+ def __init__(self, in_planes, planes, stride=1, bn=True, kernel=3, part_gelu=False):
32
+ super(BasicBlock, self).__init__()
33
+ self.bn = bn
34
+ if part_gelu:
35
+ self.act = F.relu
36
+ else:
37
+ self.act = gelu
38
+ if kernel == 3:
39
+ self.conv1 = nn.Conv2d(
40
+ in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=(not self.bn))
41
+ if self.bn:
42
+ self.bn1 = nn.BatchNorm2d(planes)
43
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
44
+ stride=1, padding=1, bias=(not self.bn))
45
+ elif kernel == 2:
46
+ self.conv1 = nn.Conv2d(
47
+ in_planes, planes, kernel_size=2, stride=stride, padding=1, bias=(not self.bn))
48
+ if self.bn:
49
+ self.bn1 = nn.BatchNorm2d(planes)
50
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=2,
51
+ stride=1, padding=0, bias=(not self.bn))
52
+ elif kernel == 1:
53
+ self.conv1 = nn.Conv2d(
54
+ in_planes, planes, kernel_size=1, stride=stride, padding=0, bias=(not self.bn))
55
+ if self.bn:
56
+ self.bn1 = nn.BatchNorm2d(planes)
57
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=1,
58
+ stride=1, padding=0, bias=(not self.bn))
59
+ else:
60
+ exit("kernel not supported!")
61
+
62
+ if self.bn:
63
+ self.bn2 = nn.BatchNorm2d(planes)
64
+
65
+ self.shortcut = nn.Sequential()
66
+ if stride != 1 or in_planes != self.expansion*planes:
67
+ if self.bn:
68
+ self.shortcut = nn.Sequential(
69
+ nn.Conv2d(in_planes, self.expansion*planes,
70
+ kernel_size=1, stride=stride, bias=(not self.bn)),
71
+ nn.BatchNorm2d(self.expansion*planes)
72
+ )
73
+ else:
74
+ self.shortcut = nn.Sequential(
75
+ nn.Conv2d(in_planes, self.expansion*planes,
76
+ kernel_size=1, stride=stride, bias=(not self.bn)),
77
+ )
78
+
79
+ def forward(self, x):
80
+ if self.bn:
81
+ out = self.act(self.bn1(self.conv1(x)))
82
+ out = self.bn2(self.conv2(out))
83
+ else:
84
+ out = self.act(self.conv1(x))
85
+ out = self.conv2(out)
86
+ out += self.shortcut(x)
87
+ out = self.act(out)
88
+ return out
89
+
90
+
91
+
92
+ class ResNet5(nn.Module):
93
+ def __init__(self, block, num_blocks=2, num_classes=10, in_planes=64, bn=True, last_layer="avg", part_gelu=False):
94
+ super(ResNet5, self).__init__()
95
+ self.in_planes = in_planes
96
+ self.bn = bn
97
+ self.last_layer = last_layer
98
+ if part_gelu:
99
+ self.act = F.relu
100
+ else:
101
+ self.act = gelu
102
+ self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3,
103
+ stride=2, padding=1, bias=not self.bn)
104
+ if self.bn: self.bn1 = nn.BatchNorm2d(in_planes)
105
+ self.layer1 = self._make_layer(block, in_planes*2, num_blocks, stride=2, bn=bn, kernel=3, part_gelu=part_gelu)
106
+ if self.last_layer == "avg":
107
+ self.avg2d = nn.AvgPool2d(4)
108
+ self.linear = nn.Linear(in_planes * 8 * block.expansion, num_classes)
109
+ elif self.last_layer == "dense":
110
+ self.linear1 = nn.Linear(in_planes * 8 * block.expansion * 16, 100)
111
+ self.linear2 = nn.Linear(100, num_classes)
112
+ else:
113
+ exit("last_layer type not supported!")
114
+
115
+ def _make_layer(self, block, planes, num_blocks, stride, bn, kernel, part_gelu):
116
+ strides = [stride] + [1]*(num_blocks-1)
117
+ layers = []
118
+ for stride in strides:
119
+ layers.append(block(self.in_planes, planes, stride, bn, kernel, part_gelu))
120
+ self.in_planes = planes * block.expansion
121
+ return nn.Sequential(*layers)
122
+
123
+ def forward(self, x):
124
+ if self.bn:
125
+ out = self.act(self.bn1(self.conv1(x)))
126
+ else:
127
+ out = self.act(self.conv1(x))
128
+ out = self.layer1(out)
129
+ if self.last_layer == "avg":
130
+ out = self.avg2d(out)
131
+ out = torch.flatten(out, 1)
132
+ out = self.linear(out)
133
+ elif self.last_layer == "dense":
134
+ out = torch.flatten(out, 1)
135
+ out = gelu(self.linear1(out))
136
+ out = self.linear2(out)
137
+ return out
138
+
139
+
140
+ def resnet2b_gelu_4():
141
+ return ResNet5(BasicBlock, num_blocks=2, in_planes=4, bn=False, last_layer="dense")
142
+
143
+ def resnet2b_gelu_8():
144
+ return ResNet5(BasicBlock, num_blocks=2, in_planes=8, bn=False, last_layer="dense")
145
+
146
+ def resnet2b_part_gelu_4():
147
+ return ResNet5(BasicBlock, num_blocks=2, in_planes=4, bn=False, last_layer="dense", part_gelu=True)
148
+
149
+ def resnet2b_part_gelu_8():
150
+ return ResNet5(BasicBlock, num_blocks=2, in_planes=8, bn=False, last_layer="dense", part_gelu=True)
benchmarks/cifar/models/sigmoid.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ def sigmoid_fc(in_ch=3, in_dim=32, width=100, depth=4, num_classes=10):
5
+ layers = [nn.Flatten(), nn.Linear(in_ch*in_dim**2, width), nn.Sigmoid()]
6
+ for _ in range(depth - 1):
7
+ layers.extend([nn.Linear(width, width), nn.Sigmoid()])
8
+ layers.append(nn.Linear(width, num_classes))
9
+ return nn.Sequential(*layers)
10
+
11
+
12
+ def sigmoid_4fc_100(in_ch=3, in_dim=32):
13
+ return sigmoid_fc(in_ch, in_dim, width=100, depth=4)
14
+
15
+
16
+ def sigmoid_4fc_500(in_ch=3, in_dim=32):
17
+ return sigmoid_fc(in_ch, in_dim, width=500, depth=4)
18
+
19
+
20
+ def sigmoid_6fc_100(in_ch=3, in_dim=32):
21
+ return sigmoid_fc(in_ch, in_dim, width=100, depth=6)
22
+
23
+
24
+ def sigmoid_6fc_200(in_ch=3, in_dim=32):
25
+ return sigmoid_fc(in_ch, in_dim, width=200, depth=6)
benchmarks/cifar/models/sin.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch
3
+
4
+ class Sin_layer(nn.Module):
5
+ def __init__(self, omega=0.3):
6
+ super().__init__()
7
+ self.omega = omega
8
+
9
+ def forward(self, x):
10
+ return torch.sin(self.omega * x)
11
+
12
+
13
+ def sin_fc(in_ch=3, in_dim=32, width=100, depth=4, num_classes=10):
14
+ layers = [nn.Flatten(), nn.Linear(in_ch*in_dim**2, width), Sin_layer()]
15
+ for _ in range(depth - 1):
16
+ layers.extend([nn.Linear(width, width), Sin_layer()])
17
+ layers.append(nn.Linear(width, num_classes))
18
+ return nn.Sequential(*layers)
19
+
20
+ def sin_fc_sigmoid(in_ch=3, in_dim=32, width=100, depth=4, omega=0.3, num_classes=10):
21
+ layers = [nn.Flatten(), nn.Linear(in_ch*in_dim**2, width), nn.Sigmoid()]
22
+ for _ in range(depth - 2):
23
+ layers.extend([nn.Linear(width, width), Sin_layer(omega=omega)])
24
+ layers.extend([nn.Linear(width, width), nn.Sigmoid()])
25
+ layers.append(nn.Linear(width, num_classes))
26
+ return nn.Sequential(*layers)
27
+
28
+
29
+ def sin_4fc_100(in_ch=3, in_dim=32):
30
+ return sin_fc(in_ch, in_dim, width=100, depth=4)
31
+
32
+
33
+ def sin_4fc_100_sigmoid(in_ch=3, in_dim=32):
34
+ return sin_fc_sigmoid(in_ch, in_dim, width=100, depth=4, omega=1)
35
+
36
+
37
+ def sin_4fc_200(in_ch=3, in_dim=32):
38
+ return sin_fc(in_ch, in_dim, width=200, depth=4)
39
+
40
+
41
+ def sin_4fc_500(in_ch=3, in_dim=32):
42
+ return sin_fc(in_ch, in_dim, width=500, depth=4)
benchmarks/cifar/models/tanh.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+ def tanh_fc(in_ch=3, in_dim=32, width=100, depth=4, num_classes=10):
5
+ layers = [nn.Flatten(), nn.Linear(in_ch*in_dim**2, width), nn.Tanh()]
6
+ for _ in range(depth - 1):
7
+ layers.extend([nn.Linear(width, width), nn.Tanh()])
8
+ layers.append(nn.Linear(width, num_classes))
9
+ return nn.Sequential(*layers)
10
+
11
+
12
+ def tanh_4fc_100(in_ch=3, in_dim=32):
13
+ return tanh_fc(in_ch, in_dim, width=100, depth=4)
14
+
15
+
16
+ def tanh_6fc_100(in_ch=3, in_dim=32):
17
+ return tanh_fc(in_ch, in_dim, width=100, depth=6)
18
+
benchmarks/cifar/models/vit.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from torch import nn
4
+ import math
5
+ from torch import Tensor
6
+
7
+
8
+ class Reduce(nn.Module):
9
+ def forward(self, x):
10
+ return x.mean(dim=1)
11
+
12
+
13
+ class PatchEmbedding(nn.Module):
14
+ def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768, img_size: int = 224):
15
+ self.patch_size = patch_size
16
+ super().__init__()
17
+ self.projection = nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size)
18
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, emb_size))
19
+ self.positions = nn.Parameter(torch.zeros((img_size // patch_size) * (img_size // patch_size) + 1, emb_size))
20
+
21
+
22
+ def forward(self, x: Tensor) -> Tensor:
23
+ b, _, _, _ = x.shape
24
+ x = self.projection(x)
25
+ x = x.flatten(2).transpose(1, 2)
26
+ cls_tokens = self.cls_token.expand(b, -1, -1)
27
+ # prepend the cls token to the input
28
+ x = torch.cat([cls_tokens, x], dim=1)
29
+ # add position embedding
30
+ x += self.positions
31
+ return x
32
+
33
+
34
+ class MultiHeadAttention(nn.Module):
35
+ def __init__(self, d_model = 123, num_heads = 6, dropout = 0.):
36
+ super().__init__()
37
+ assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
38
+
39
+ self.d_model = d_model
40
+ self.num_heads = num_heads
41
+ self.head_dim = d_model // num_heads
42
+
43
+ self.query = nn.Linear(d_model, d_model)
44
+ self.key = nn.Linear(d_model, d_model)
45
+ self.value = nn.Linear(d_model, d_model)
46
+ self.out = nn.Linear(d_model, d_model)
47
+
48
+ self.dropout = nn.Dropout(dropout)
49
+
50
+ def split_heads(self, x, batch_size):
51
+ x = x.view(batch_size, -1, self.num_heads, self.head_dim)
52
+ return x.transpose(1, 2)
53
+
54
+ def forward(self, x):
55
+ batch_size = x.size(0)
56
+
57
+ query = self.query(x)
58
+ key = self.key(x)
59
+ value = self.value(x)
60
+
61
+ query = self.split_heads(query, batch_size)
62
+ key = self.split_heads(key, batch_size)
63
+ value = self.split_heads(value, batch_size)
64
+
65
+ scores = torch.matmul(query, key.transpose(-2, -1))
66
+ scores = scores / 8
67
+
68
+ attn_weights = F.softmax(scores, dim=-1)
69
+ attn_weights = self.dropout(attn_weights)
70
+
71
+ attn_output = torch.matmul(attn_weights, value)
72
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.d_model)
73
+ attn_output = self.out(attn_output)
74
+
75
+ return attn_output
76
+
77
+
78
+ class ResidualAdd(nn.Module):
79
+ def __init__(self, fn):
80
+ super().__init__()
81
+ self.fn = fn
82
+
83
+ def forward(self, x, **kwargs):
84
+ res = x
85
+ x = self.fn(x, **kwargs)
86
+ x += res
87
+ return x
88
+
89
+
90
+ class FeedForwardBlock(nn.Sequential):
91
+ def __init__(self, emb_size: int, expansion: int = 2, drop_p: float = 0.):
92
+ super().__init__(
93
+ nn.Linear(emb_size, expansion * emb_size),
94
+ nn.ReLU(),
95
+ nn.Dropout(drop_p),
96
+ nn.Linear(expansion * emb_size, emb_size),
97
+ )
98
+
99
+
100
+ class TransformerEncoderBlock(nn.Sequential):
101
+ def __init__(self,
102
+ emb_size: int = 768,
103
+ drop_p: float = 0.,
104
+ forward_expansion: int = 2,
105
+ forward_drop_p: float = 0.,
106
+ ** kwargs):
107
+ super().__init__(
108
+ ResidualAdd(nn.Sequential(
109
+ nn.LayerNorm(emb_size),
110
+ MultiHeadAttention(emb_size, **kwargs),
111
+ nn.Dropout(drop_p)
112
+ )),
113
+ ResidualAdd(nn.Sequential(
114
+ nn.LayerNorm(emb_size),
115
+ FeedForwardBlock(
116
+ emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
117
+ nn.Dropout(drop_p)
118
+ )
119
+ ))
120
+
121
+
122
+ class TransformerEncoder(nn.Sequential):
123
+ def __init__(self, depth: int = 1, **kwargs):
124
+ super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
125
+
126
+
127
+ class ClassificationHead(nn.Sequential):
128
+ def __init__(self, emb_size: int = 768, n_classes: int = 1000):
129
+ super().__init__(
130
+ Reduce(),
131
+ nn.LayerNorm(emb_size),
132
+ nn.Linear(emb_size, n_classes))
133
+
134
+
135
+ class ViT_1_3(nn.Sequential):
136
+ def __init__(self,
137
+ in_channels: int = 3,
138
+ num_heads: int = 3,
139
+ patch_size: int = 16,
140
+ emb_size: int = 48,
141
+ img_size: int = 32,
142
+ depth: int = 1,
143
+ n_classes: int = 10,
144
+ **kwargs):
145
+ super().__init__(
146
+ PatchEmbedding(in_channels, patch_size, emb_size, img_size),
147
+ TransformerEncoder(depth, emb_size=emb_size, num_heads=num_heads, **kwargs),
148
+ ClassificationHead(emb_size, n_classes)
149
+ )
150
+
151
+
152
+ class ViT_1_6(nn.Sequential):
153
+ def __init__(self,
154
+ in_channels: int = 3,
155
+ num_heads: int = 6,
156
+ patch_size: int = 16,
157
+ emb_size: int = 96,
158
+ img_size: int = 32,
159
+ depth: int = 1,
160
+ n_classes: int = 10,
161
+ **kwargs):
162
+ super().__init__(
163
+ PatchEmbedding(in_channels, patch_size, emb_size, img_size),
164
+ TransformerEncoder(depth, emb_size=emb_size, num_heads=num_heads, **kwargs),
165
+ ClassificationHead(emb_size, n_classes)
166
+ )
167
+
168
+
169
+ class ViT_2_3(nn.Sequential):
170
+ def __init__(self,
171
+ in_channels: int = 3,
172
+ num_heads: int = 3,
173
+ patch_size: int = 16,
174
+ emb_size: int = 48,
175
+ img_size: int = 32,
176
+ depth: int = 2,
177
+ n_classes: int = 10,
178
+ **kwargs):
179
+ super().__init__(
180
+ PatchEmbedding(in_channels, patch_size, emb_size, img_size),
181
+ TransformerEncoder(depth, emb_size=emb_size, num_heads=num_heads, **kwargs),
182
+ ClassificationHead(emb_size, n_classes)
183
+ )
184
+
185
+
186
+ class ViT_2_6(nn.Sequential):
187
+ def __init__(self,
188
+ in_channels: int = 3,
189
+ num_heads: int = 6,
190
+ patch_size: int = 16,
191
+ emb_size: int = 48,
192
+ img_size: int = 32,
193
+ depth: int = 2,
194
+ n_classes: int = 10,
195
+ **kwargs):
196
+ super().__init__(
197
+ PatchEmbedding(in_channels, patch_size, emb_size, img_size),
198
+ TransformerEncoder(depth, emb_size=emb_size, num_heads=num_heads, **kwargs),
199
+ ClassificationHead(emb_size, n_classes)
200
+ )