Spaces:
Runtime error
Runtime error
Update
Browse files- configs.py +4 -4
- train.py +5 -5
- tuning.py +24 -4
configs.py
CHANGED
@@ -6,10 +6,10 @@ from models import *
|
|
6 |
|
7 |
# Constants
|
8 |
RANDOM_SEED = 123
|
9 |
-
BATCH_SIZE =
|
10 |
NUM_EPOCHS = 100
|
11 |
-
LEARNING_RATE =
|
12 |
-
OPTIMIZER_NAME = "
|
13 |
STEP_SIZE = 10
|
14 |
GAMMA = 0.5
|
15 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
@@ -22,7 +22,7 @@ NUM_CLASSES = 7
|
|
22 |
# Define classes as listdir of augmented data
|
23 |
CLASSES = os.listdir("data/train/augmented/Task 1/")
|
24 |
MODEL_SAVE_PATH = "output/checkpoints/model.pth"
|
25 |
-
MODEL =
|
26 |
|
27 |
print(CLASSES)
|
28 |
|
|
|
6 |
|
7 |
# Constants
|
8 |
RANDOM_SEED = 123
|
9 |
+
BATCH_SIZE = 64
|
10 |
NUM_EPOCHS = 100
|
11 |
+
LEARNING_RATE = 1.6317268278715415e-05
|
12 |
+
OPTIMIZER_NAME = "Adam"
|
13 |
STEP_SIZE = 10
|
14 |
GAMMA = 0.5
|
15 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
22 |
# Define classes as listdir of augmented data
|
23 |
CLASSES = os.listdir("data/train/augmented/Task 1/")
|
24 |
MODEL_SAVE_PATH = "output/checkpoints/model.pth"
|
25 |
+
MODEL = googlenet(num_classes=NUM_CLASSES)
|
26 |
|
27 |
print(CLASSES)
|
28 |
|
train.py
CHANGED
@@ -2,15 +2,11 @@ import os
|
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
import torch.optim as optim
|
5 |
-
from torchvision.transforms import transforms
|
6 |
-
from torch.utils.data import DataLoader
|
7 |
-
from torchvision.utils import make_grid
|
8 |
import matplotlib.pyplot as plt
|
9 |
from models import *
|
10 |
from torch.utils.tensorboard import SummaryWriter
|
11 |
from configs import *
|
12 |
import data_loader
|
13 |
-
import numpy as np
|
14 |
|
15 |
# Set up TensorBoard writer
|
16 |
writer = SummaryWriter(log_dir="output/tensorboard/training")
|
@@ -54,6 +50,7 @@ VAL_ACC_HIST = []
|
|
54 |
# Training loop
|
55 |
for epoch in range(NUM_EPOCHS):
|
56 |
print(f"[Epoch: {epoch + 1}]")
|
|
|
57 |
MODEL.train() # Set model to training mode
|
58 |
running_loss = 0.0
|
59 |
total_train = 0
|
@@ -62,7 +59,10 @@ for epoch in range(NUM_EPOCHS):
|
|
62 |
for i, (inputs, labels) in enumerate(train_loader, 0):
|
63 |
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
|
64 |
optimizer.zero_grad()
|
65 |
-
|
|
|
|
|
|
|
66 |
loss = criterion(outputs, labels)
|
67 |
loss.backward()
|
68 |
if OPTIMIZER_NAME == "LBFGS":
|
|
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
import torch.optim as optim
|
|
|
|
|
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
from models import *
|
7 |
from torch.utils.tensorboard import SummaryWriter
|
8 |
from configs import *
|
9 |
import data_loader
|
|
|
10 |
|
11 |
# Set up TensorBoard writer
|
12 |
writer = SummaryWriter(log_dir="output/tensorboard/training")
|
|
|
50 |
# Training loop
|
51 |
for epoch in range(NUM_EPOCHS):
|
52 |
print(f"[Epoch: {epoch + 1}]")
|
53 |
+
print("Learning rate:", scheduler.get_last_lr()[0])
|
54 |
MODEL.train() # Set model to training mode
|
55 |
running_loss = 0.0
|
56 |
total_train = 0
|
|
|
59 |
for i, (inputs, labels) in enumerate(train_loader, 0):
|
60 |
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
|
61 |
optimizer.zero_grad()
|
62 |
+
if MODEL.__class__.__name__ == "GoogLeNet": # the shit GoogLeNet has a different output
|
63 |
+
outputs = MODEL(inputs).logits
|
64 |
+
else:
|
65 |
+
outputs = MODEL(inputs)
|
66 |
loss = criterion(outputs, labels)
|
67 |
loss.backward()
|
68 |
if OPTIMIZER_NAME == "LBFGS":
|
tuning.py
CHANGED
@@ -7,12 +7,15 @@ import torch.optim as optim
|
|
7 |
import torch.utils.data
|
8 |
from configs import *
|
9 |
import data_loader
|
|
|
10 |
|
11 |
optuna.logging.set_verbosity(optuna.logging.DEBUG)
|
12 |
|
13 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
14 |
EPOCHS = 10
|
15 |
|
|
|
|
|
16 |
|
17 |
def create_data_loaders(batch_size):
|
18 |
# Create or modify data loaders with the specified batch size
|
@@ -21,7 +24,6 @@ def create_data_loaders(batch_size):
|
|
21 |
)
|
22 |
return train_loader, valid_loader
|
23 |
|
24 |
-
|
25 |
def objective(trial, model=MODEL):
|
26 |
# Generate the model.
|
27 |
model = model.to(DEVICE)
|
@@ -34,7 +36,7 @@ def objective(trial, model=MODEL):
|
|
34 |
|
35 |
# Generate the optimizer.
|
36 |
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "SGD"])
|
37 |
-
lr = trial.suggest_float("lr", 1e-5, 1e-
|
38 |
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)
|
39 |
criterion = nn.CrossEntropyLoss()
|
40 |
|
@@ -45,7 +47,10 @@ def objective(trial, model=MODEL):
|
|
45 |
for batch_idx, (data, target) in enumerate(train_loader, 0):
|
46 |
data, target = data.to(DEVICE), target.to(DEVICE)
|
47 |
optimizer.zero_grad()
|
48 |
-
|
|
|
|
|
|
|
49 |
loss = criterion(output, target)
|
50 |
loss.backward()
|
51 |
if optimizer_name == "LBFGS":
|
@@ -66,6 +71,19 @@ def objective(trial, model=MODEL):
|
|
66 |
|
67 |
accuracy = correct / len(valid_loader.dataset)
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
# Print hyperparameters and accuracy
|
70 |
print("Hyperparameters: ", trial.params)
|
71 |
print("Accuracy: ", accuracy)
|
@@ -77,7 +95,6 @@ def objective(trial, model=MODEL):
|
|
77 |
|
78 |
return accuracy
|
79 |
|
80 |
-
|
81 |
if __name__ == "__main__":
|
82 |
pruner = optuna.pruners.HyperbandPruner()
|
83 |
study = optuna.create_study(direction="maximize", pruner=pruner, study_name="handetect")
|
@@ -99,3 +116,6 @@ if __name__ == "__main__":
|
|
99 |
print(" Params: ")
|
100 |
for key, value in trial.params.items():
|
101 |
print(" {}: {}".format(key, value))
|
|
|
|
|
|
|
|
7 |
import torch.utils.data
|
8 |
from configs import *
|
9 |
import data_loader
|
10 |
+
from torch.utils.tensorboard import SummaryWriter
|
11 |
|
12 |
optuna.logging.set_verbosity(optuna.logging.DEBUG)
|
13 |
|
14 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
EPOCHS = 10
|
16 |
|
17 |
+
# Create a TensorBoard writer
|
18 |
+
writer = SummaryWriter(log_dir="output/tensorboard/tuning/", )
|
19 |
|
20 |
def create_data_loaders(batch_size):
|
21 |
# Create or modify data loaders with the specified batch size
|
|
|
24 |
)
|
25 |
return train_loader, valid_loader
|
26 |
|
|
|
27 |
def objective(trial, model=MODEL):
|
28 |
# Generate the model.
|
29 |
model = model.to(DEVICE)
|
|
|
36 |
|
37 |
# Generate the optimizer.
|
38 |
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "SGD"])
|
39 |
+
lr = trial.suggest_float("lr", 1e-5, 1e-3, log=True)
|
40 |
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)
|
41 |
criterion = nn.CrossEntropyLoss()
|
42 |
|
|
|
47 |
for batch_idx, (data, target) in enumerate(train_loader, 0):
|
48 |
data, target = data.to(DEVICE), target.to(DEVICE)
|
49 |
optimizer.zero_grad()
|
50 |
+
if model.__class__.__name__ == "GoogLeNet": # the shit GoogLeNet has a different output
|
51 |
+
output = model(data).logits
|
52 |
+
else:
|
53 |
+
output = model(data)
|
54 |
loss = criterion(output, target)
|
55 |
loss.backward()
|
56 |
if optimizer_name == "LBFGS":
|
|
|
71 |
|
72 |
accuracy = correct / len(valid_loader.dataset)
|
73 |
|
74 |
+
# Log hyperparameters and accuracy to TensorBoard
|
75 |
+
writer.add_scalar("Accuracy", accuracy, trial.number)
|
76 |
+
writer.add_hparams(
|
77 |
+
{
|
78 |
+
"batch_size": batch_size,
|
79 |
+
"optimizer": optimizer_name,
|
80 |
+
"lr": lr
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"accuracy": accuracy
|
84 |
+
}
|
85 |
+
)
|
86 |
+
|
87 |
# Print hyperparameters and accuracy
|
88 |
print("Hyperparameters: ", trial.params)
|
89 |
print("Accuracy: ", accuracy)
|
|
|
95 |
|
96 |
return accuracy
|
97 |
|
|
|
98 |
if __name__ == "__main__":
|
99 |
pruner = optuna.pruners.HyperbandPruner()
|
100 |
study = optuna.create_study(direction="maximize", pruner=pruner, study_name="handetect")
|
|
|
116 |
print(" Params: ")
|
117 |
for key, value in trial.params.items():
|
118 |
print(" {}: {}".format(key, value))
|
119 |
+
|
120 |
+
# Close TensorBoard writer
|
121 |
+
writer.close()
|