Spaces:
Runtime error
Runtime error
Update
Browse files- handetect/__pycache__/models.cpython-310.pyc +0 -0
- handetect/__pycache__/predict.cpython-310.pyc +0 -0
- handetect/adjust_hyperparameter.py +2 -5
- handetect/evaluation.py +1 -1
- handetect/main.py +10 -12
- handetect/models.py +6 -333
- handetect/predict.py +1 -1
handetect/__pycache__/models.cpython-310.pyc
CHANGED
Binary files a/handetect/__pycache__/models.cpython-310.pyc and b/handetect/__pycache__/models.cpython-310.pyc differ
|
|
handetect/__pycache__/predict.cpython-310.pyc
CHANGED
Binary files a/handetect/__pycache__/predict.cpython-310.pyc and b/handetect/__pycache__/predict.cpython-310.pyc differ
|
|
handetect/adjust_hyperparameter.py
CHANGED
@@ -24,9 +24,6 @@ AUG_DATA_DIR = r"data/train/augmented/Task " + str(TASK)
|
|
24 |
NUM_CLASSES = len(os.listdir(ORIG_DATA_DIR))
|
25 |
VAL_RESIZE_SIZE = 232
|
26 |
|
27 |
-
# Load and preprocess the data
|
28 |
-
data_dir = r"data/train/Task 1"
|
29 |
-
|
30 |
def resize_for_validation(image):
|
31 |
return transforms.Resize((VAL_RESIZE_SIZE, VAL_RESIZE_SIZE))(image)
|
32 |
|
@@ -84,7 +81,7 @@ valid_loader = DataLoader(
|
|
84 |
)
|
85 |
|
86 |
# Initialize model, criterion, optimizer, and scheduler
|
87 |
-
model =
|
88 |
model = model.to(DEVICE)
|
89 |
criterion = nn.CrossEntropyLoss()
|
90 |
# Adam optimizer
|
@@ -112,7 +109,7 @@ def objective(trial):
|
|
112 |
batch_size = trial.suggest_categorical("batch_size", [16, 32, 64])
|
113 |
|
114 |
# Modify the model and optimizer using suggested hyperparameters
|
115 |
-
model =
|
116 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
117 |
|
118 |
for epoch in range(NUM_EPOCHS):
|
|
|
24 |
NUM_CLASSES = len(os.listdir(ORIG_DATA_DIR))
|
25 |
VAL_RESIZE_SIZE = 232
|
26 |
|
|
|
|
|
|
|
27 |
def resize_for_validation(image):
|
28 |
return transforms.Resize((VAL_RESIZE_SIZE, VAL_RESIZE_SIZE))(image)
|
29 |
|
|
|
81 |
)
|
82 |
|
83 |
# Initialize model, criterion, optimizer, and scheduler
|
84 |
+
model = mobilenet_v2(pretrained=False, num_classes=NUM_CLASSES)
|
85 |
model = model.to(DEVICE)
|
86 |
criterion = nn.CrossEntropyLoss()
|
87 |
# Adam optimizer
|
|
|
109 |
batch_size = trial.suggest_categorical("batch_size", [16, 32, 64])
|
110 |
|
111 |
# Modify the model and optimizer using suggested hyperparameters
|
112 |
+
model = mobilenet_v2(pretrained=False, num_classes=NUM_CLASSES).to(DEVICE)
|
113 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
114 |
|
115 |
for epoch in range(NUM_EPOCHS):
|
handetect/evaluation.py
CHANGED
@@ -23,7 +23,7 @@ print(images)
|
|
23 |
true_classs = []
|
24 |
predicted_labels = []
|
25 |
|
26 |
-
model =
|
27 |
model.load_state_dict(torch.load(model_checkpoint_path, map_location=DEVICE))
|
28 |
model.eval()
|
29 |
model = model.to(DEVICE)
|
|
|
23 |
true_classs = []
|
24 |
predicted_labels = []
|
25 |
|
26 |
+
model = mobilenet_v2(pretrained=False, num_classes=NUM_CLASSES)
|
27 |
model.load_state_dict(torch.load(model_checkpoint_path, map_location=DEVICE))
|
28 |
model.eval()
|
29 |
model = model.to(DEVICE)
|
handetect/main.py
CHANGED
@@ -11,15 +11,17 @@ from scipy.ndimage import gaussian_filter1d
|
|
11 |
from torch.utils.tensorboard import SummaryWriter #print to tensorboard
|
12 |
from torchvision.utils import make_grid
|
13 |
|
14 |
-
torch.cuda.empty_cache()
|
|
|
|
|
|
|
15 |
|
16 |
-
writer = SummaryWriter()
|
17 |
|
18 |
# Constants
|
19 |
RANDOM_SEED = 123
|
20 |
-
BATCH_SIZE =
|
21 |
NUM_EPOCHS = 100
|
22 |
-
LEARNING_RATE = 0.
|
23 |
STEP_SIZE = 10
|
24 |
GAMMA = 0.5
|
25 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
@@ -54,8 +56,6 @@ print("Classes: ", original_dataset.classes)
|
|
54 |
print("Length of original dataset: ", len(original_dataset))
|
55 |
print("Length of augmented dataset: ", len(augmented_dataset))
|
56 |
print("Length of total dataset: ", len(dataset))
|
57 |
-
print("Classes: ", original_dataset.classes)
|
58 |
-
|
59 |
|
60 |
# Custom dataset class
|
61 |
class CustomDataset(Dataset):
|
@@ -84,15 +84,13 @@ valid_loader = DataLoader(
|
|
84 |
)
|
85 |
|
86 |
# Initialize model, criterion, optimizer, and scheduler
|
87 |
-
model =
|
88 |
model = model.to(DEVICE)
|
89 |
criterion = nn.CrossEntropyLoss()
|
90 |
# Adam optimizer
|
91 |
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
92 |
-
#
|
93 |
-
scheduler = optim.lr_scheduler.
|
94 |
-
optimizer, mode="min", factor=0.1, patience=10, verbose=True
|
95 |
-
)
|
96 |
|
97 |
# Lists to store training and validation loss history
|
98 |
TRAIN_LOSS_HIST = []
|
@@ -145,7 +143,7 @@ for epoch in range(NUM_EPOCHS):
|
|
145 |
# Learning rate scheduling
|
146 |
lr_1 = optimizer.param_groups[0]["lr"]
|
147 |
print("Learning Rate: {:.15f}".format(lr_1))
|
148 |
-
scheduler.step(
|
149 |
|
150 |
# Validation loop
|
151 |
model.eval() # Set model to evaluation mode
|
|
|
11 |
from torch.utils.tensorboard import SummaryWriter #print to tensorboard
|
12 |
from torchvision.utils import make_grid
|
13 |
|
14 |
+
# torch.cuda.empty_cache()
|
15 |
+
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
|
16 |
+
|
17 |
+
writer = SummaryWriter(log_dir='runs/Task1')
|
18 |
|
|
|
19 |
|
20 |
# Constants
|
21 |
RANDOM_SEED = 123
|
22 |
+
BATCH_SIZE = 16
|
23 |
NUM_EPOCHS = 100
|
24 |
+
LEARNING_RATE = 0.030215994618918267
|
25 |
STEP_SIZE = 10
|
26 |
GAMMA = 0.5
|
27 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
56 |
print("Length of original dataset: ", len(original_dataset))
|
57 |
print("Length of augmented dataset: ", len(augmented_dataset))
|
58 |
print("Length of total dataset: ", len(dataset))
|
|
|
|
|
59 |
|
60 |
# Custom dataset class
|
61 |
class CustomDataset(Dataset):
|
|
|
84 |
)
|
85 |
|
86 |
# Initialize model, criterion, optimizer, and scheduler
|
87 |
+
model = mobilenet_v3_small(num_classes=NUM_CLASSES)
|
88 |
model = model.to(DEVICE)
|
89 |
criterion = nn.CrossEntropyLoss()
|
90 |
# Adam optimizer
|
91 |
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
92 |
+
# StepLR scheduler
|
93 |
+
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
|
|
|
|
|
94 |
|
95 |
# Lists to store training and validation loss history
|
96 |
TRAIN_LOSS_HIST = []
|
|
|
143 |
# Learning rate scheduling
|
144 |
lr_1 = optimizer.param_groups[0]["lr"]
|
145 |
print("Learning Rate: {:.15f}".format(lr_1))
|
146 |
+
scheduler.step()
|
147 |
|
148 |
# Validation loop
|
149 |
model.eval() # Set model to evaluation mode
|
handetect/models.py
CHANGED
@@ -7,336 +7,9 @@ from torchvision.models import resnet50
|
|
7 |
from torchvision.models import resnet18
|
8 |
from torchvision.models import squeezenet1_0
|
9 |
from torchvision.models import vgg16
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
super(Bottleneck, self).__init__()
|
17 |
-
# hmm,ex 1x1 convolution to reduce channels (intermediate channels)
|
18 |
-
self.conv1 = torch.nn.Conv2d(
|
19 |
-
in_channels, out_channels, kernel_size=1, stride=1, padding=0
|
20 |
-
)
|
21 |
-
self.batch_norm1 = torch.nn.BatchNorm2d(out_channels)
|
22 |
-
# 3x3 convolution with specified stride
|
23 |
-
self.conv2 = torch.nn.Conv2d(
|
24 |
-
out_channels, out_channels, kernel_size=3, stride=stride, padding=1
|
25 |
-
)
|
26 |
-
self.batch_norm2 = torch.nn.BatchNorm2d(out_channels)
|
27 |
-
# and then leh,1x1 expand back
|
28 |
-
self.conv3 = torch.nn.Conv2d(
|
29 |
-
out_channels,
|
30 |
-
out_channels * self.expansion,
|
31 |
-
kernel_size=1,
|
32 |
-
stride=1,
|
33 |
-
padding=0,
|
34 |
-
)
|
35 |
-
self.batch_norm3 = torch.nn.BatchNorm2d(out_channels * self.expansion)
|
36 |
-
|
37 |
-
self.i_downsample = i_downsample
|
38 |
-
self.stride = stride
|
39 |
-
self.relu = torch.nn.ReLU()
|
40 |
-
|
41 |
-
##forward the input x through the network,haiyaa
|
42 |
-
def forward(self, x):
|
43 |
-
identity = x.clone()
|
44 |
-
x = self.relu(self.batch_norm1(self.conv1(x)))
|
45 |
-
|
46 |
-
x = self.relu(self.batch_norm2(self.conv2(x)))
|
47 |
-
|
48 |
-
x = self.conv3(x)
|
49 |
-
x = self.batch_norm3(x)
|
50 |
-
|
51 |
-
# downsample if needed
|
52 |
-
if self.i_downsample is not None:
|
53 |
-
identity = self.i_downsample(identity)
|
54 |
-
# add identity
|
55 |
-
x += identity
|
56 |
-
x = self.relu(x)
|
57 |
-
|
58 |
-
return x
|
59 |
-
|
60 |
-
|
61 |
-
# we no use this first,but we can just copy this whole class and apply to resnet16 and etc
|
62 |
-
class Block(torch.nn.Module):
|
63 |
-
expansion = 1
|
64 |
-
|
65 |
-
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
|
66 |
-
super(Block, self).__init__()
|
67 |
-
|
68 |
-
self.conv1 = torch.nn.Conv2d(
|
69 |
-
in_channels,
|
70 |
-
out_channels,
|
71 |
-
kernel_size=3,
|
72 |
-
padding=1,
|
73 |
-
stride=stride,
|
74 |
-
bias=False,
|
75 |
-
)
|
76 |
-
self.batch_norm1 = torch.nn.BatchNorm2d(out_channels)
|
77 |
-
self.conv2 = torch.nn.Conv2d(
|
78 |
-
out_channels,
|
79 |
-
out_channels,
|
80 |
-
kernel_size=3,
|
81 |
-
padding=1,
|
82 |
-
stride=stride,
|
83 |
-
bias=False,
|
84 |
-
)
|
85 |
-
self.batch_norm2 = torch.nn.BatchNorm2d(out_channels)
|
86 |
-
|
87 |
-
self.i_downsample = i_downsample
|
88 |
-
self.stride = stride
|
89 |
-
self.relu = torch.nn.ReLU()
|
90 |
-
|
91 |
-
def forward(self, x):
|
92 |
-
identity = x.clone()
|
93 |
-
|
94 |
-
x = self.relu(self.batch_norm2(self.conv1(x)))
|
95 |
-
x = self.batch_norm2(self.conv2(x))
|
96 |
-
|
97 |
-
if self.i_downsample is not None:
|
98 |
-
identity = self.i_downsample(identity)
|
99 |
-
print(x.shape)
|
100 |
-
print(identity.shape)
|
101 |
-
x += identity
|
102 |
-
x = self.relu(x)
|
103 |
-
return x
|
104 |
-
|
105 |
-
|
106 |
-
class ResNet(torch.nn.Module):
|
107 |
-
def __init__(self, ResBlock, layer_list, num_classes, num_channels=3):
|
108 |
-
super(ResNet, self).__init__()
|
109 |
-
self.in_channels = 64
|
110 |
-
# intial conv layaer
|
111 |
-
self.conv1 = torch.nn.Conv2d(
|
112 |
-
num_channels, 64, kernel_size=7, stride=2, padding=3, bias=False
|
113 |
-
)
|
114 |
-
self.batch_norm1 = torch.nn.BatchNorm2d(64)
|
115 |
-
self.relu = torch.nn.ReLU()
|
116 |
-
self.max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
117 |
-
# residual block(layers),each block got three three layer,total 4 blocks
|
118 |
-
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
|
119 |
-
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
|
120 |
-
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
|
121 |
-
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
|
122 |
-
|
123 |
-
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
|
124 |
-
self.fc = torch.nn.Linear(512 * ResBlock.expansion, num_classes)
|
125 |
-
|
126 |
-
def forward(self, x):
|
127 |
-
x = self.relu(self.batch_norm1(self.conv1(x)))
|
128 |
-
x = self.max_pool(x)
|
129 |
-
|
130 |
-
x = self.layer1(x)
|
131 |
-
x = self.layer2(x)
|
132 |
-
x = self.layer3(x)
|
133 |
-
x = self.layer4(x)
|
134 |
-
|
135 |
-
x = self.avgpool(x)
|
136 |
-
x = x.reshape(x.shape[0], -1)
|
137 |
-
x = self.fc(x)
|
138 |
-
|
139 |
-
return x
|
140 |
-
|
141 |
-
def _make_layer(self, ResBlock, blocks, planes, stride=1):
|
142 |
-
# plane is the number of output channel
|
143 |
-
ii_downsample = None
|
144 |
-
layers = []
|
145 |
-
|
146 |
-
if stride != 1 or self.in_channels != planes * ResBlock.expansion:
|
147 |
-
ii_downsample = torch.nn.Sequential(
|
148 |
-
torch.nn.Conv2d(
|
149 |
-
self.in_channels,
|
150 |
-
planes * ResBlock.expansion,
|
151 |
-
kernel_size=1,
|
152 |
-
stride=stride,
|
153 |
-
),
|
154 |
-
torch.nn.BatchNorm2d(planes * ResBlock.expansion),
|
155 |
-
)
|
156 |
-
|
157 |
-
layers.append(
|
158 |
-
ResBlock(
|
159 |
-
self.in_channels, planes, i_downsample=ii_downsample, stride=stride
|
160 |
-
)
|
161 |
-
)
|
162 |
-
self.in_channels = planes * ResBlock.expansion
|
163 |
-
|
164 |
-
for i in range(blocks - 1):
|
165 |
-
layers.append(ResBlock(self.in_channels, planes))
|
166 |
-
|
167 |
-
return torch.nn.Sequential(*layers)
|
168 |
-
|
169 |
-
|
170 |
-
##list here leh is the number of residual block in each layer
|
171 |
-
def ResNet50(num_classes, channels=3):
|
172 |
-
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes, channels)
|
173 |
-
|
174 |
-
|
175 |
-
# VGG16 model
|
176 |
-
class VGG16(torch.nn.Module):
|
177 |
-
def __init__(self, num_classes):
|
178 |
-
super().__init__()
|
179 |
-
|
180 |
-
self.block_1 = torch.nn.Sequential(
|
181 |
-
torch.nn.Conv2d(
|
182 |
-
in_channels=3,
|
183 |
-
out_channels=64,
|
184 |
-
kernel_size=(3, 3),
|
185 |
-
stride=(1, 1),
|
186 |
-
padding=1,
|
187 |
-
),
|
188 |
-
torch.nn.ReLU(),
|
189 |
-
torch.nn.Conv2d(
|
190 |
-
in_channels=64,
|
191 |
-
out_channels=64,
|
192 |
-
kernel_size=(3, 3),
|
193 |
-
stride=(1, 1),
|
194 |
-
padding=1,
|
195 |
-
),
|
196 |
-
torch.nn.ReLU(),
|
197 |
-
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
|
198 |
-
)
|
199 |
-
|
200 |
-
self.block_2 = torch.nn.Sequential(
|
201 |
-
torch.nn.Conv2d(
|
202 |
-
in_channels=64,
|
203 |
-
out_channels=128,
|
204 |
-
kernel_size=(3, 3),
|
205 |
-
stride=(1, 1),
|
206 |
-
padding=1,
|
207 |
-
),
|
208 |
-
torch.nn.ReLU(),
|
209 |
-
torch.nn.Conv2d(
|
210 |
-
in_channels=128,
|
211 |
-
out_channels=128,
|
212 |
-
kernel_size=(3, 3),
|
213 |
-
stride=(1, 1),
|
214 |
-
padding=1,
|
215 |
-
),
|
216 |
-
torch.nn.ReLU(),
|
217 |
-
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
|
218 |
-
)
|
219 |
-
|
220 |
-
self.block_3 = torch.nn.Sequential(
|
221 |
-
torch.nn.Conv2d(
|
222 |
-
in_channels=128,
|
223 |
-
out_channels=256,
|
224 |
-
kernel_size=(3, 3),
|
225 |
-
stride=(1, 1),
|
226 |
-
padding=1,
|
227 |
-
),
|
228 |
-
torch.nn.ReLU(),
|
229 |
-
torch.nn.Conv2d(
|
230 |
-
in_channels=256,
|
231 |
-
out_channels=256,
|
232 |
-
kernel_size=(3, 3),
|
233 |
-
stride=(1, 1),
|
234 |
-
padding=1,
|
235 |
-
),
|
236 |
-
torch.nn.ReLU(),
|
237 |
-
torch.nn.Conv2d(
|
238 |
-
in_channels=256,
|
239 |
-
out_channels=256,
|
240 |
-
kernel_size=(3, 3),
|
241 |
-
stride=(1, 1),
|
242 |
-
padding=1,
|
243 |
-
),
|
244 |
-
torch.nn.ReLU(),
|
245 |
-
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
|
246 |
-
)
|
247 |
-
|
248 |
-
self.block_4 = torch.nn.Sequential(
|
249 |
-
torch.nn.Conv2d(
|
250 |
-
in_channels=256,
|
251 |
-
out_channels=512,
|
252 |
-
kernel_size=(3, 3),
|
253 |
-
stride=(1, 1),
|
254 |
-
padding=1,
|
255 |
-
),
|
256 |
-
torch.nn.ReLU(),
|
257 |
-
torch.nn.Conv2d(
|
258 |
-
in_channels=512,
|
259 |
-
out_channels=512,
|
260 |
-
kernel_size=(3, 3),
|
261 |
-
stride=(1, 1),
|
262 |
-
padding=1,
|
263 |
-
),
|
264 |
-
torch.nn.ReLU(),
|
265 |
-
torch.nn.Conv2d(
|
266 |
-
in_channels=512,
|
267 |
-
out_channels=512,
|
268 |
-
kernel_size=(3, 3),
|
269 |
-
stride=(1, 1),
|
270 |
-
padding=1,
|
271 |
-
),
|
272 |
-
torch.nn.ReLU(),
|
273 |
-
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
|
274 |
-
)
|
275 |
-
|
276 |
-
self.block_5 = torch.nn.Sequential(
|
277 |
-
torch.nn.Conv2d(
|
278 |
-
in_channels=512,
|
279 |
-
out_channels=512,
|
280 |
-
kernel_size=(3, 3),
|
281 |
-
stride=(1, 1),
|
282 |
-
padding=1,
|
283 |
-
),
|
284 |
-
torch.nn.ReLU(),
|
285 |
-
torch.nn.Conv2d(
|
286 |
-
in_channels=512,
|
287 |
-
out_channels=512,
|
288 |
-
kernel_size=(3, 3),
|
289 |
-
stride=(1, 1),
|
290 |
-
padding=1,
|
291 |
-
),
|
292 |
-
torch.nn.ReLU(),
|
293 |
-
torch.nn.Conv2d(
|
294 |
-
in_channels=512,
|
295 |
-
out_channels=512,
|
296 |
-
kernel_size=(3, 3),
|
297 |
-
stride=(1, 1),
|
298 |
-
padding=1,
|
299 |
-
),
|
300 |
-
torch.nn.ReLU(),
|
301 |
-
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
|
302 |
-
)
|
303 |
-
|
304 |
-
height, width = 3, 3
|
305 |
-
self.classifier = torch.nn.Sequential(
|
306 |
-
torch.nn.Linear(512 * height * width, 4096),
|
307 |
-
torch.nn.ReLU(True),
|
308 |
-
torch.nn.Dropout(p=0.5),
|
309 |
-
torch.nn.Linear(4096, 4096),
|
310 |
-
torch.nn.ReLU(True),
|
311 |
-
torch.nn.Dropout(p=0.5),
|
312 |
-
torch.nn.Linear(4096, num_classes),
|
313 |
-
)
|
314 |
-
|
315 |
-
for m in self.modules():
|
316 |
-
if isinstance(m, torch.torch.nn.Conv2d) or isinstance(
|
317 |
-
m, torch.torch.nn.Linear
|
318 |
-
):
|
319 |
-
torch.nn.init.kaiming_uniform_(
|
320 |
-
m.weight, mode="fan_in", nonlinearity="relu"
|
321 |
-
)
|
322 |
-
if m.bias is not None:
|
323 |
-
m.bias.detach().zero_()
|
324 |
-
|
325 |
-
self.avgpool = torch.nn.AdaptiveAvgPool2d((height, width))
|
326 |
-
|
327 |
-
def forward(self, x):
|
328 |
-
x = self.block_1(x)
|
329 |
-
x = self.block_2(x)
|
330 |
-
x = self.block_3(x)
|
331 |
-
x = self.block_4(x)
|
332 |
-
x = self.block_5(x)
|
333 |
-
x = self.avgpool(x)
|
334 |
-
x = x.view(x.size(0), -1) # flatten
|
335 |
-
|
336 |
-
logits = self.classifier(x)
|
337 |
-
# probas = F.softmax(logits, dim=1)
|
338 |
-
|
339 |
-
return logits
|
340 |
-
|
341 |
-
|
342 |
-
# ResNet18 model
|
|
|
7 |
from torchvision.models import resnet18
|
8 |
from torchvision.models import squeezenet1_0
|
9 |
from torchvision.models import vgg16
|
10 |
+
from torchvision.models import alexnet
|
11 |
+
from torchvision.models import densenet121
|
12 |
+
from torchvision.models import googlenet
|
13 |
+
from torchvision.models import inception_v3
|
14 |
+
from torchvision.models import mobilenet_v2
|
15 |
+
from torchvision.models import mobilenet_v3_small
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
handetect/predict.py
CHANGED
@@ -25,7 +25,7 @@ preprocess = transforms.Compose(
|
|
25 |
)
|
26 |
|
27 |
# Load your model (change this according to your model definition)
|
28 |
-
model =
|
29 |
model.load_state_dict(
|
30 |
torch.load(model_checkpoint_path, map_location=DEVICE)
|
31 |
) # Load the model on the same device
|
|
|
25 |
)
|
26 |
|
27 |
# Load your model (change this according to your model definition)
|
28 |
+
model = squeezenet1_0(pretrained=False, num_classes=NUM_CLASSES)
|
29 |
model.load_state_dict(
|
30 |
torch.load(model_checkpoint_path, map_location=DEVICE)
|
31 |
) # Load the model on the same device
|