File size: 6,954 Bytes
97dcf92
92bf372
97dcf92
 
 
92bf372
97dcf92
 
 
 
92bf372
97dcf92
 
 
 
 
 
92bf372
97dcf92
 
 
 
 
 
 
 
92bf372
97dcf92
 
92bf372
97dcf92
 
92bf372
 
97dcf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92bf372
 
97dcf92
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
import sys
import torch
import torch.nn as nn
from PIL import Image
import os
from configs import *
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import random
from itertools import product

random.seed(RANDOM_SEED)
torch.cuda.manual_seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
print("PyTorch Seed:", torch.initial_seed())
print("Random Seed:", random.getstate()[1][0])
print("PyTorch CUDA Seed:", torch.cuda.initial_seed())

# Define your model paths
# Load your pre-trained models
model2 = EfficientNetB2WithDropout(num_classes=NUM_CLASSES).to(DEVICE)
model2.load_state_dict(torch.load("output/checkpoints/EfficientNetB2WithDropout.pth"))
model1 = SqueezeNet1_0WithSE(num_classes=NUM_CLASSES).to(DEVICE)
model1.load_state_dict(torch.load("output/checkpoints/SqueezeNet1_0WithSE.pth"))
model3 = MobileNetV2WithDropout(num_classes=NUM_CLASSES).to(DEVICE)
model3.load_state_dict(torch.load("output\checkpoints\MobileNetV2WithDropout.pth"))

# Define the class labels
class_labels = CLASSES

# Define your test data folder path
test_data_folder = "data/test/Task 1/"


# Put models in evaluation mode
def set_models_eval(models):
    for model in models:
        model.eval()


# Define the ensemble model using a list of models
class WeightedVoteEnsemble(nn.Module):
    def __init__(self, models, weights):
        super(WeightedVoteEnsemble, self).__init__()
        self.models = models
        self.weights = weights

    def forward(self, x):
        predictions = [model(x) for model in self.models]
        weighted_predictions = torch.stack(
            [w * pred for w, pred in zip(self.weights, predictions)], dim=0
        )
        avg_predictions = weighted_predictions.sum(dim=0)
        return avg_predictions


def ensemble_predictions(models, image):
    all_predictions = []

    with torch.no_grad():
        for model in models:
            output = model(image)
            all_predictions.append(output)

    return torch.stack(all_predictions, dim=0).mean(dim=0)


# Load a single image and make predictions
def evaluate_image(models, image_path, transform=preprocess):
    image = Image.open(image_path).convert("RGB")
    image = transform(image).unsqueeze(0)
    image = image.to(DEVICE)
    outputs = ensemble_predictions(models, image)

    return outputs.argmax(dim=1).item()


# Evaluate and plot a confusion matrix for an ensemble of models
def evaluate_and_plot_confusion_matrix(models, test_data_folder):
    all_predictions = []
    true_labels = []

    with torch.no_grad():
        for class_label in class_labels:
            class_path = os.path.join(test_data_folder, class_label)
            for image_file in os.listdir(class_path):
                image_path = os.path.join(class_path, image_file)
                # print(image_path)
                predicted_label = evaluate_image(models, image_path, preprocess)
                all_predictions.append(predicted_label)
                true_labels.append(class_labels.index(class_label))

    # Print accuracy
    accuracy = (
        (torch.tensor(all_predictions) == torch.tensor(true_labels)).float().mean()
    )
    print("Accuracy:", accuracy)

    # Create the confusion matrix
    cm = confusion_matrix(true_labels, all_predictions)

    # Plot the confusion matrix
    display = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=class_labels)
    display.plot(cmap=plt.cm.Blues, values_format="d")

    # Show the plot
    plt.show()

    return accuracy

# Set the models to evaluation mode
set_models_eval([model1, model2, model3])

# Define different weight configurations
# [SqueezeNet, EfficientNetB2WithDropout, MobileNetV2WithDropout]
weights_configurations = [
    # Random set of weights using random.random() and all weights sum to 1
    [
        random.randrange(1, 10) / 10,
        random.randrange(1, 10) / 10,
        random.randrange(1, 10) / 10,
    ],
]


## NOTE OF PREVIOUS WEIGHTS
# Best weights: [0.2, 0.3, 0.5] with accuracy: 0.9428571462631226 at iteration: 15 with torch seed: 28434738589300 and random seed: 3188652458777471118 and torch cuda seed: None


best_weights = {
    "weights": 0,
    "accuracy": 0,
    "iteration": 0,
    "torch_seed": 0,
    "random_seed": 0,
    "torch_cuda_seed": 0,
}

i = 0

# weights_hist = []

target_sum = 1.0
number_of_numbers = 3
lower_limit = 0.20
upper_limit = 0.9
step = 0.1

valid_combinations = []

# Generate all unique combinations of three numbers with values to two decimal places
range_values = list(range(int(lower_limit * 100), int(upper_limit * 100) + 1))
for combo in product(range_values, repeat=number_of_numbers):
    combo_float = [x / 100.0 for x in combo]

    # Check if the sum of the numbers is equal to 1
    if sum(combo_float) == target_sum:
        valid_combinations.append(combo_float)

# Calculate the total number of possibilities
total_possibilities = len(valid_combinations)

print("Total number of possibilities:", total_possibilities)

valid_combinations = [[0.37, 0.34, 0.29]]

for weights in valid_combinations:
    # while True:
    print("---------------------------")
    print("Iteration:", i)
    # Should iterate until all possible weights are exhausted
    # Create an ensemble model with weighted voting

    random.seed(RANDOM_SEED)
    torch.cuda.manual_seed(RANDOM_SEED)
    torch.manual_seed(RANDOM_SEED)
    # print("PyTorch Seed:", torch.initial_seed())
    # weights_hist.append(weights)
    weighted_vote_ensemble_model = WeightedVoteEnsemble(
        # [model1, model2, model3], weights
        [model1, model2, model3],
        weights,
    )
    # print("Weights:", weights)
    print("Weights:", weights)
    # Call the evaluate_and_plot_confusion_matrix function with your models and test data folder
    accuracy = evaluate_and_plot_confusion_matrix(
        [weighted_vote_ensemble_model], test_data_folder
    )
    # Convert tensor to float
    accuracy = accuracy.item()
    if accuracy > best_weights["accuracy"]:
        # best_weights["weights"] = weights
        best_weights["weights"] = weights
        best_weights["accuracy"] = accuracy
        best_weights["iteration"] = i
        best_weights["torch_seed"] = torch.initial_seed()
        seed = random.randrange(sys.maxsize)
        rng = random.Random(seed)
        best_weights["random_seed"] = seed
        best_weights["torch_cuda_seed"] = torch.cuda.initial_seed()

    print(
        "Best weights:",
        best_weights["weights"],
        "with accuracy:",
        best_weights["accuracy"],
        "at iteration:",
        best_weights["iteration"],
        "with torch seed:",
        best_weights["torch_seed"],
        "and random seed:",
        best_weights["random_seed"],
        "and torch cuda seed:",
        best_weights["torch_cuda_seed"],
    )
    i += 1


torch.save(
    weighted_vote_ensemble_model.state_dict(),
    "output/checkpoints/WeightedVoteEnsemble.pth",
)