hank1996 commited on
Commit
9b7f993
·
1 Parent(s): 07cde38

Create new file

Browse files
Files changed (1) hide show
  1. utils/loss.py +1158 -0
utils/loss.py ADDED
@@ -0,0 +1,1158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy
8
+ from utils.torch_utils import is_parallel
9
+
10
+
11
+ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
12
+ # return positive, negative label smoothing BCE targets
13
+ return 1.0 - 0.5 * eps, 0.5 * eps
14
+
15
+
16
+ class BCEBlurWithLogitsLoss(nn.Module):
17
+ # BCEwithLogitLoss() with reduced missing label effects.
18
+ def __init__(self, alpha=0.05):
19
+ super(BCEBlurWithLogitsLoss, self).__init__()
20
+ self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
21
+ self.alpha = alpha
22
+
23
+ def forward(self, pred, true):
24
+ loss = self.loss_fcn(pred, true)
25
+ pred = torch.sigmoid(pred) # prob from logits
26
+ dx = pred - true # reduce only missing label effects
27
+ # dx = (pred - true).abs() # reduce missing label and false label effects
28
+ alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
29
+ loss *= alpha_factor
30
+ return loss.mean()
31
+
32
+
33
+ class SigmoidBin(nn.Module):
34
+ stride = None # strides computed during build
35
+ export = False # onnx export
36
+
37
+ def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0):
38
+ super(SigmoidBin, self).__init__()
39
+
40
+ self.bin_count = bin_count
41
+ self.length = bin_count + 1
42
+ self.min = min
43
+ self.max = max
44
+ self.scale = float(max - min)
45
+ self.shift = self.scale / 2.0
46
+
47
+ self.use_loss_regression = use_loss_regression
48
+ self.use_fw_regression = use_fw_regression
49
+ self.reg_scale = reg_scale
50
+ self.BCE_weight = BCE_weight
51
+
52
+ start = min + (self.scale/2.0) / self.bin_count
53
+ end = max - (self.scale/2.0) / self.bin_count
54
+ step = self.scale / self.bin_count
55
+ self.step = step
56
+ #print(f" start = {start}, end = {end}, step = {step} ")
57
+
58
+ bins = torch.range(start, end + 0.0001, step).float()
59
+ self.register_buffer('bins', bins)
60
+
61
+
62
+ self.cp = 1.0 - 0.5 * smooth_eps
63
+ self.cn = 0.5 * smooth_eps
64
+
65
+ self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight]))
66
+ self.MSELoss = nn.MSELoss()
67
+
68
+ def get_length(self):
69
+ return self.length
70
+
71
+ def forward(self, pred):
72
+ assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
73
+
74
+ pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step
75
+ pred_bin = pred[..., 1:(1+self.bin_count)]
76
+
77
+ _, bin_idx = torch.max(pred_bin, dim=-1)
78
+ bin_bias = self.bins[bin_idx]
79
+
80
+ if self.use_fw_regression:
81
+ result = pred_reg + bin_bias
82
+ else:
83
+ result = bin_bias
84
+ result = result.clamp(min=self.min, max=self.max)
85
+
86
+ return result
87
+
88
+
89
+ def training_loss(self, pred, target):
90
+ assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
91
+ assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0])
92
+ device = pred.device
93
+
94
+ pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step
95
+ pred_bin = pred[..., 1:(1+self.bin_count)]
96
+
97
+ diff_bin_target = torch.abs(target[..., None] - self.bins)
98
+ _, bin_idx = torch.min(diff_bin_target, dim=-1)
99
+
100
+ bin_bias = self.bins[bin_idx]
101
+ bin_bias.requires_grad = False
102
+ result = pred_reg + bin_bias
103
+
104
+ target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets
105
+ n = pred.shape[0]
106
+ target_bins[range(n), bin_idx] = self.cp
107
+
108
+ loss_bin = self.BCEbins(pred_bin, target_bins) # BCE
109
+
110
+ if self.use_loss_regression:
111
+ loss_regression = self.MSELoss(result, target) # MSE
112
+ loss = loss_bin + loss_regression
113
+ else:
114
+ loss = loss_bin
115
+
116
+ out_result = result.clamp(min=self.min, max=self.max)
117
+
118
+ return loss, out_result
119
+
120
+
121
+ class FocalLoss(nn.Module):
122
+ # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
123
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
124
+ super(FocalLoss, self).__init__()
125
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
126
+ self.gamma = gamma
127
+ self.alpha = alpha
128
+ self.reduction = loss_fcn.reduction
129
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
130
+
131
+ def forward(self, pred, true):
132
+ loss = self.loss_fcn(pred, true)
133
+ # p_t = torch.exp(-loss)
134
+ # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
135
+
136
+ # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
137
+ pred_prob = torch.sigmoid(pred) # prob from logits
138
+ p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
139
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
140
+ modulating_factor = (1.0 - p_t) ** self.gamma
141
+ loss *= alpha_factor * modulating_factor
142
+
143
+ if self.reduction == 'mean':
144
+ return loss.mean()
145
+ elif self.reduction == 'sum':
146
+ return loss.sum()
147
+ else: # 'none'
148
+ return loss
149
+
150
+
151
+ class QFocalLoss(nn.Module):
152
+ # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
153
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
154
+ super(QFocalLoss, self).__init__()
155
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
156
+ self.gamma = gamma
157
+ self.alpha = alpha
158
+ self.reduction = loss_fcn.reduction
159
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
160
+
161
+ def forward(self, pred, true):
162
+ loss = self.loss_fcn(pred, true)
163
+
164
+ pred_prob = torch.sigmoid(pred) # prob from logits
165
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
166
+ modulating_factor = torch.abs(true - pred_prob) ** self.gamma
167
+ loss *= alpha_factor * modulating_factor
168
+
169
+ if self.reduction == 'mean':
170
+ return loss.mean()
171
+ elif self.reduction == 'sum':
172
+ return loss.sum()
173
+ else: # 'none'
174
+ return loss
175
+
176
+ class RankSort(torch.autograd.Function):
177
+ @staticmethod
178
+ def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
179
+
180
+ classification_grads=torch.zeros(logits.shape).cuda()
181
+
182
+ #Filter fg logits
183
+ fg_labels = (targets > 0.)
184
+ fg_logits = logits[fg_labels]
185
+ fg_targets = targets[fg_labels]
186
+ fg_num = len(fg_logits)
187
+
188
+ #Do not use bg with scores less than minimum fg logit
189
+ #since changing its score does not have an effect on precision
190
+ threshold_logit = torch.min(fg_logits)-delta_RS
191
+ relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
192
+
193
+ relevant_bg_logits = logits[relevant_bg_labels]
194
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
195
+ sorting_error=torch.zeros(fg_num).cuda()
196
+ ranking_error=torch.zeros(fg_num).cuda()
197
+ fg_grad=torch.zeros(fg_num).cuda()
198
+
199
+ #sort the fg logits
200
+ order=torch.argsort(fg_logits)
201
+ #Loops over each positive following the order
202
+ for ii in order:
203
+ # Difference Transforms (x_ij)
204
+ fg_relations=fg_logits-fg_logits[ii]
205
+ bg_relations=relevant_bg_logits-fg_logits[ii]
206
+
207
+ if delta_RS > 0:
208
+ fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
209
+ bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
210
+ else:
211
+ fg_relations = (fg_relations >= 0).float()
212
+ bg_relations = (bg_relations >= 0).float()
213
+
214
+ # Rank of ii among pos and false positive number (bg with larger scores)
215
+ rank_pos=torch.sum(fg_relations)
216
+ FP_num=torch.sum(bg_relations)
217
+
218
+ # Rank of ii among all examples
219
+ rank=rank_pos+FP_num
220
+
221
+ # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
222
+ ranking_error[ii]=FP_num/rank
223
+
224
+ # Current sorting error of example ii. (Eq. 7)
225
+ current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
226
+
227
+ #Find examples in the target sorted order for example ii
228
+ iou_relations = (fg_targets >= fg_targets[ii])
229
+ target_sorted_order = iou_relations * fg_relations
230
+
231
+ #The rank of ii among positives in sorted order
232
+ rank_pos_target = torch.sum(target_sorted_order)
233
+
234
+ #Compute target sorting error. (Eq. 8)
235
+ #Since target ranking error is 0, this is also total target error
236
+ target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
237
+
238
+ #Compute sorting error on example ii
239
+ sorting_error[ii] = current_sorting_error - target_sorting_error
240
+
241
+ #Identity Update for Ranking Error
242
+ if FP_num > eps:
243
+ #For ii the update is the ranking error
244
+ fg_grad[ii] -= ranking_error[ii]
245
+ #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
246
+ relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
247
+
248
+ #Find the positives that are misranked (the cause of the error)
249
+ #These are the ones with smaller IoU but larger logits
250
+ missorted_examples = (~ iou_relations) * fg_relations
251
+
252
+ #Denominotor of sorting pmf
253
+ sorting_pmf_denom = torch.sum(missorted_examples)
254
+
255
+ #Identity Update for Sorting Error
256
+ if sorting_pmf_denom > eps:
257
+ #For ii the update is the sorting error
258
+ fg_grad[ii] -= sorting_error[ii]
259
+ #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
260
+ fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
261
+
262
+ #Normalize gradients by number of positives
263
+ classification_grads[fg_labels]= (fg_grad/fg_num)
264
+ classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
265
+
266
+ ctx.save_for_backward(classification_grads)
267
+
268
+ return ranking_error.mean(), sorting_error.mean()
269
+
270
+ @staticmethod
271
+ def backward(ctx, out_grad1, out_grad2):
272
+ g1, =ctx.saved_tensors
273
+ return g1*out_grad1, None, None, None
274
+
275
+ class aLRPLoss(torch.autograd.Function):
276
+ @staticmethod
277
+ def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
278
+ classification_grads=torch.zeros(logits.shape).cuda()
279
+
280
+ #Filter fg logits
281
+ fg_labels = (targets == 1)
282
+ fg_logits = logits[fg_labels]
283
+ fg_num = len(fg_logits)
284
+
285
+ #Do not use bg with scores less than minimum fg logit
286
+ #since changing its score does not have an effect on precision
287
+ threshold_logit = torch.min(fg_logits)-delta
288
+
289
+ #Get valid bg logits
290
+ relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
291
+ relevant_bg_logits=logits[relevant_bg_labels]
292
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
293
+ rank=torch.zeros(fg_num).cuda()
294
+ prec=torch.zeros(fg_num).cuda()
295
+ fg_grad=torch.zeros(fg_num).cuda()
296
+
297
+ max_prec=0
298
+ #sort the fg logits
299
+ order=torch.argsort(fg_logits)
300
+ #Loops over each positive following the order
301
+ for ii in order:
302
+ #x_ij s as score differences with fgs
303
+ fg_relations=fg_logits-fg_logits[ii]
304
+ #Apply piecewise linear function and determine relations with fgs
305
+ fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
306
+ #Discard i=j in the summation in rank_pos
307
+ fg_relations[ii]=0
308
+
309
+ #x_ij s as score differences with bgs
310
+ bg_relations=relevant_bg_logits-fg_logits[ii]
311
+ #Apply piecewise linear function and determine relations with bgs
312
+ bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
313
+
314
+ #Compute the rank of the example within fgs and number of bgs with larger scores
315
+ rank_pos=1+torch.sum(fg_relations)
316
+ FP_num=torch.sum(bg_relations)
317
+ #Store the total since it is normalizer also for aLRP Regression error
318
+ rank[ii]=rank_pos+FP_num
319
+
320
+ #Compute precision for this example to compute classification loss
321
+ prec[ii]=rank_pos/rank[ii]
322
+ #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
323
+ if FP_num > eps:
324
+ fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
325
+ relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
326
+
327
+ #aLRP with grad formulation fg gradient
328
+ classification_grads[fg_labels]= fg_grad
329
+ #aLRP with grad formulation bg gradient
330
+ classification_grads[relevant_bg_labels]= relevant_bg_grad
331
+
332
+ classification_grads /= (fg_num)
333
+
334
+ cls_loss=1-prec.mean()
335
+ ctx.save_for_backward(classification_grads)
336
+
337
+ return cls_loss, rank, order
338
+
339
+ @staticmethod
340
+ def backward(ctx, out_grad1, out_grad2, out_grad3):
341
+ g1, =ctx.saved_tensors
342
+ return g1*out_grad1, None, None, None, None
343
+
344
+
345
+ class APLoss(torch.autograd.Function):
346
+ @staticmethod
347
+ def forward(ctx, logits, targets, delta=1.):
348
+ classification_grads=torch.zeros(logits.shape).cuda()
349
+
350
+ #Filter fg logits
351
+ fg_labels = (targets == 1)
352
+ fg_logits = logits[fg_labels]
353
+ fg_num = len(fg_logits)
354
+
355
+ #Do not use bg with scores less than minimum fg logit
356
+ #since changing its score does not have an effect on precision
357
+ threshold_logit = torch.min(fg_logits)-delta
358
+
359
+ #Get valid bg logits
360
+ relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
361
+ relevant_bg_logits=logits[relevant_bg_labels]
362
+ relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
363
+ rank=torch.zeros(fg_num).cuda()
364
+ prec=torch.zeros(fg_num).cuda()
365
+ fg_grad=torch.zeros(fg_num).cuda()
366
+
367
+ max_prec=0
368
+ #sort the fg logits
369
+ order=torch.argsort(fg_logits)
370
+ #Loops over each positive following the order
371
+ for ii in order:
372
+ #x_ij s as score differences with fgs
373
+ fg_relations=fg_logits-fg_logits[ii]
374
+ #Apply piecewise linear function and determine relations with fgs
375
+ fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
376
+ #Discard i=j in the summation in rank_pos
377
+ fg_relations[ii]=0
378
+
379
+ #x_ij s as score differences with bgs
380
+ bg_relations=relevant_bg_logits-fg_logits[ii]
381
+ #Apply piecewise linear function and determine relations with bgs
382
+ bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
383
+
384
+ #Compute the rank of the example within fgs and number of bgs with larger scores
385
+ rank_pos=1+torch.sum(fg_relations)
386
+ FP_num=torch.sum(bg_relations)
387
+ #Store the total since it is normalizer also for aLRP Regression error
388
+ rank[ii]=rank_pos+FP_num
389
+
390
+ #Compute precision for this example
391
+ current_prec=rank_pos/rank[ii]
392
+
393
+ #Compute interpolated AP and store gradients for relevant bg examples
394
+ if (max_prec<=current_prec):
395
+ max_prec=current_prec
396
+ relevant_bg_grad += (bg_relations/rank[ii])
397
+ else:
398
+ relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
399
+
400
+ #Store fg gradients
401
+ fg_grad[ii]=-(1-max_prec)
402
+ prec[ii]=max_prec
403
+
404
+ #aLRP with grad formulation fg gradient
405
+ classification_grads[fg_labels]= fg_grad
406
+ #aLRP with grad formulation bg gradient
407
+ classification_grads[relevant_bg_labels]= relevant_bg_grad
408
+
409
+ classification_grads /= fg_num
410
+
411
+ cls_loss=1-prec.mean()
412
+ ctx.save_for_backward(classification_grads)
413
+
414
+ return cls_loss
415
+
416
+ @staticmethod
417
+ def backward(ctx, out_grad1):
418
+ g1, =ctx.saved_tensors
419
+ return g1*out_grad1, None, None
420
+
421
+
422
+ class ComputeLoss:
423
+ # Compute losses
424
+ def __init__(self, model, autobalance=False):
425
+ super(ComputeLoss, self).__init__()
426
+ device = next(model.parameters()).device # get model device
427
+ h = model.hyp # hyperparameters
428
+
429
+ # Define criteria
430
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
431
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
432
+
433
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
434
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
435
+
436
+ # Focal loss
437
+ g = h['fl_gamma'] # focal loss gamma
438
+ if g > 0:
439
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
440
+
441
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
442
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
443
+ #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7
444
+ #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7
445
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
446
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
447
+ for k in 'na', 'nc', 'nl', 'anchors':
448
+ setattr(self, k, getattr(det, k))
449
+
450
+ def __call__(self, p, targets): # predictions, targets, model
451
+ device = targets.device
452
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
453
+ tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
454
+
455
+ # Losses
456
+ for i, pi in enumerate(p): # layer index, layer predictions
457
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
458
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
459
+
460
+ n = b.shape[0] # number of targets
461
+ if n:
462
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
463
+
464
+ # Regression
465
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
466
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
467
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
468
+ iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
469
+ lbox += (1.0 - iou).mean() # iou loss
470
+
471
+ # Objectness
472
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
473
+
474
+ # Classification
475
+ if self.nc > 1: # cls loss (only if multiple classes)
476
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
477
+ t[range(n), tcls[i]] = self.cp
478
+ #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)
479
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
480
+
481
+ # Append targets to text file
482
+ # with open('targets.txt', 'a') as file:
483
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
484
+
485
+ obji = self.BCEobj(pi[..., 4], tobj)
486
+ lobj += obji * self.balance[i] # obj loss
487
+ if self.autobalance:
488
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
489
+
490
+ if self.autobalance:
491
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
492
+ lbox *= self.hyp['box']
493
+ lobj *= self.hyp['obj']
494
+ lcls *= self.hyp['cls']
495
+ bs = tobj.shape[0] # batch size
496
+
497
+ loss = lbox + lobj + lcls
498
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
499
+
500
+ def build_targets(self, p, targets):
501
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
502
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
503
+ tcls, tbox, indices, anch = [], [], [], []
504
+ gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
505
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
506
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
507
+
508
+ g = 0.5 # bias
509
+ off = torch.tensor([[0, 0],
510
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
511
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
512
+ ], device=targets.device).float() * g # offsets
513
+
514
+ for i in range(self.nl):
515
+ anchors = self.anchors[i]
516
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
517
+
518
+ # Match targets to anchors
519
+ t = targets * gain
520
+ if nt:
521
+ # Matches
522
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
523
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
524
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
525
+ t = t[j] # filter
526
+
527
+ # Offsets
528
+ gxy = t[:, 2:4] # grid xy
529
+ gxi = gain[[2, 3]] - gxy # inverse
530
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
531
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
532
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
533
+ t = t.repeat((5, 1, 1))[j]
534
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
535
+ else:
536
+ t = targets[0]
537
+ offsets = 0
538
+
539
+ # Define
540
+ b, c = t[:, :2].long().T # image, class
541
+ gxy = t[:, 2:4] # grid xy
542
+ gwh = t[:, 4:6] # grid wh
543
+ gij = (gxy - offsets).long()
544
+ gi, gj = gij.T # grid xy indices
545
+
546
+ # Append
547
+ a = t[:, 6].long() # anchor indices
548
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
549
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
550
+ anch.append(anchors[a]) # anchors
551
+ tcls.append(c) # class
552
+
553
+ return tcls, tbox, indices, anch
554
+
555
+
556
+ class ComputeLossOTA:
557
+ # Compute losses
558
+ def __init__(self, model, autobalance=False):
559
+ super(ComputeLossOTA, self).__init__()
560
+ device = next(model.parameters()).device # get model device
561
+ h = model.hyp # hyperparameters
562
+
563
+ # Define criteria
564
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
565
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
566
+
567
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
568
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
569
+
570
+ # Focal loss
571
+ g = h['fl_gamma'] # focal loss gamma
572
+ if g > 0:
573
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
574
+
575
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
576
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
577
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
578
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
579
+ for k in 'na', 'nc', 'nl', 'anchors', 'stride':
580
+ setattr(self, k, getattr(det, k))
581
+
582
+ def __call__(self, p, targets, imgs): # predictions, targets, model
583
+ device = targets.device
584
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
585
+ bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
586
+ pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
587
+
588
+
589
+ # Losses
590
+ for i, pi in enumerate(p): # layer index, layer predictions
591
+ b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
592
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
593
+
594
+ n = b.shape[0] # number of targets
595
+ if n:
596
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
597
+
598
+ # Regression
599
+ grid = torch.stack([gi, gj], dim=1)
600
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
601
+ #pxy = ps[:, :2].sigmoid() * 3. - 1.
602
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
603
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
604
+ selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
605
+ selected_tbox[:, :2] -= grid
606
+ iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
607
+ lbox += (1.0 - iou).mean() # iou loss
608
+
609
+ # Objectness
610
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
611
+
612
+ # Classification
613
+ selected_tcls = targets[i][:, 1].long()
614
+ if self.nc > 1: # cls loss (only if multiple classes)
615
+ t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
616
+ t[range(n), selected_tcls] = self.cp
617
+ lcls += self.BCEcls(ps[:, 5:], t) # BCE
618
+
619
+ # Append targets to text file
620
+ # with open('targets.txt', 'a') as file:
621
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
622
+
623
+ obji = self.BCEobj(pi[..., 4], tobj)
624
+ lobj += obji * self.balance[i] # obj loss
625
+ if self.autobalance:
626
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
627
+
628
+ if self.autobalance:
629
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
630
+ lbox *= self.hyp['box']
631
+ lobj *= self.hyp['obj']
632
+ lcls *= self.hyp['cls']
633
+ bs = tobj.shape[0] # batch size
634
+
635
+ loss = lbox + lobj + lcls
636
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
637
+
638
+ def build_targets(self, p, targets, imgs):
639
+
640
+ #indices, anch = self.find_positive(p, targets)
641
+ indices, anch = self.find_3_positive(p, targets)
642
+ #indices, anch = self.find_4_positive(p, targets)
643
+ #indices, anch = self.find_5_positive(p, targets)
644
+ #indices, anch = self.find_9_positive(p, targets)
645
+
646
+ matching_bs = [[] for pp in p]
647
+ matching_as = [[] for pp in p]
648
+ matching_gjs = [[] for pp in p]
649
+ matching_gis = [[] for pp in p]
650
+ matching_targets = [[] for pp in p]
651
+ matching_anchs = [[] for pp in p]
652
+
653
+ nl = len(p)
654
+
655
+ for batch_idx in range(p[0].shape[0]):
656
+
657
+ b_idx = targets[:, 0]==batch_idx
658
+ this_target = targets[b_idx]
659
+ if this_target.shape[0] == 0:
660
+ continue
661
+
662
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
663
+ txyxy = xywh2xyxy(txywh)
664
+
665
+ pxyxys = []
666
+ p_cls = []
667
+ p_obj = []
668
+ from_which_layer = []
669
+ all_b = []
670
+ all_a = []
671
+ all_gj = []
672
+ all_gi = []
673
+ all_anch = []
674
+
675
+ for i, pi in enumerate(p):
676
+
677
+ b, a, gj, gi = indices[i]
678
+ idx = (b == batch_idx)
679
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
680
+ all_b.append(b)
681
+ all_a.append(a)
682
+ all_gj.append(gj)
683
+ all_gi.append(gi)
684
+ all_anch.append(anch[i][idx])
685
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
686
+
687
+ fg_pred = pi[b, a, gj, gi]
688
+ p_obj.append(fg_pred[:, 4:5])
689
+ p_cls.append(fg_pred[:, 5:])
690
+
691
+ grid = torch.stack([gi, gj], dim=1)
692
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
693
+ #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
694
+ pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
695
+ pxywh = torch.cat([pxy, pwh], dim=-1)
696
+ pxyxy = xywh2xyxy(pxywh)
697
+ pxyxys.append(pxyxy)
698
+
699
+ pxyxys = torch.cat(pxyxys, dim=0)
700
+ if pxyxys.shape[0] == 0:
701
+ continue
702
+ p_obj = torch.cat(p_obj, dim=0)
703
+ p_cls = torch.cat(p_cls, dim=0)
704
+ from_which_layer = torch.cat(from_which_layer, dim=0)
705
+ all_b = torch.cat(all_b, dim=0)
706
+ all_a = torch.cat(all_a, dim=0)
707
+ all_gj = torch.cat(all_gj, dim=0)
708
+ all_gi = torch.cat(all_gi, dim=0)
709
+ all_anch = torch.cat(all_anch, dim=0)
710
+
711
+ pair_wise_iou = box_iou(txyxy, pxyxys)
712
+
713
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
714
+
715
+ top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
716
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
717
+
718
+ gt_cls_per_image = (
719
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
720
+ .float()
721
+ .unsqueeze(1)
722
+ .repeat(1, pxyxys.shape[0], 1)
723
+ )
724
+
725
+ num_gt = this_target.shape[0]
726
+ cls_preds_ = (
727
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
728
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
729
+ )
730
+
731
+ y = cls_preds_.sqrt_()
732
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
733
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
734
+ ).sum(-1)
735
+ del cls_preds_
736
+
737
+ cost = (
738
+ pair_wise_cls_loss
739
+ + 3.0 * pair_wise_iou_loss
740
+ )
741
+
742
+ matching_matrix = torch.zeros_like(cost)
743
+
744
+ for gt_idx in range(num_gt):
745
+ _, pos_idx = torch.topk(
746
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
747
+ )
748
+ matching_matrix[gt_idx][pos_idx] = 1.0
749
+
750
+ del top_k, dynamic_ks
751
+ anchor_matching_gt = matching_matrix.sum(0)
752
+ if (anchor_matching_gt > 1).sum() > 0:
753
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
754
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
755
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
756
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
757
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
758
+
759
+ from_which_layer = from_which_layer[fg_mask_inboxes]
760
+ all_b = all_b[fg_mask_inboxes]
761
+ all_a = all_a[fg_mask_inboxes]
762
+ all_gj = all_gj[fg_mask_inboxes]
763
+ all_gi = all_gi[fg_mask_inboxes]
764
+ all_anch = all_anch[fg_mask_inboxes]
765
+
766
+ this_target = this_target[matched_gt_inds]
767
+
768
+ for i in range(nl):
769
+ layer_idx = from_which_layer == i
770
+ matching_bs[i].append(all_b[layer_idx])
771
+ matching_as[i].append(all_a[layer_idx])
772
+ matching_gjs[i].append(all_gj[layer_idx])
773
+ matching_gis[i].append(all_gi[layer_idx])
774
+ matching_targets[i].append(this_target[layer_idx])
775
+ matching_anchs[i].append(all_anch[layer_idx])
776
+
777
+ for i in range(nl):
778
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
779
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
780
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
781
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
782
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
783
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
784
+
785
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
786
+
787
+ def find_3_positive(self, p, targets):
788
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
789
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
790
+ indices, anch = [], []
791
+ gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
792
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
793
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
794
+
795
+ g = 0.5 # bias
796
+ off = torch.tensor([[0, 0],
797
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
798
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
799
+ ], device=targets.device).float() * g # offsets
800
+
801
+ for i in range(self.nl):
802
+ anchors = self.anchors[i]
803
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
804
+
805
+ # Match targets to anchors
806
+ t = targets * gain
807
+ if nt:
808
+ # Matches
809
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
810
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
811
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
812
+ t = t[j] # filter
813
+
814
+ # Offsets
815
+ gxy = t[:, 2:4] # grid xy
816
+ gxi = gain[[2, 3]] - gxy # inverse
817
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
818
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
819
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
820
+ t = t.repeat((5, 1, 1))[j]
821
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
822
+ else:
823
+ t = targets[0]
824
+ offsets = 0
825
+
826
+ # Define
827
+ b, c = t[:, :2].long().T # image, class
828
+ gxy = t[:, 2:4] # grid xy
829
+ gwh = t[:, 4:6] # grid wh
830
+ gij = (gxy - offsets).long()
831
+ gi, gj = gij.T # grid xy indices
832
+
833
+ # Append
834
+ a = t[:, 6].long() # anchor indices
835
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
836
+ anch.append(anchors[a]) # anchors
837
+
838
+ return indices, anch
839
+
840
+
841
+ class ComputeLossBinOTA:
842
+ # Compute losses
843
+ def __init__(self, model, autobalance=False):
844
+ super(ComputeLossBinOTA, self).__init__()
845
+ device = next(model.parameters()).device # get model device
846
+ h = model.hyp # hyperparameters
847
+
848
+ # Define criteria
849
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
850
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
851
+ #MSEangle = nn.MSELoss().to(device)
852
+
853
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
854
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
855
+
856
+ # Focal loss
857
+ g = h['fl_gamma'] # focal loss gamma
858
+ if g > 0:
859
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
860
+
861
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
862
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
863
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
864
+ self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
865
+ for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count':
866
+ setattr(self, k, getattr(det, k))
867
+
868
+ #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device)
869
+ wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device)
870
+ #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device)
871
+ self.wh_bin_sigmoid = wh_bin_sigmoid
872
+
873
+ def __call__(self, p, targets, imgs): # predictions, targets, model
874
+ device = targets.device
875
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
876
+ bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
877
+ pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
878
+
879
+
880
+ # Losses
881
+ for i, pi in enumerate(p): # layer index, layer predictions
882
+ b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
883
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
884
+
885
+ obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2
886
+
887
+ n = b.shape[0] # number of targets
888
+ if n:
889
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
890
+
891
+ # Regression
892
+ grid = torch.stack([gi, gj], dim=1)
893
+ selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
894
+ selected_tbox[:, :2] -= grid
895
+
896
+ #pxy = ps[:, :2].sigmoid() * 2. - 0.5
897
+ ##pxy = ps[:, :2].sigmoid() * 3. - 1.
898
+ #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
899
+ #pbox = torch.cat((pxy, pwh), 1) # predicted box
900
+
901
+ #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0])
902
+ #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1])
903
+ w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0])
904
+ h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1])
905
+
906
+ pw *= anchors[i][..., 0]
907
+ ph *= anchors[i][..., 1]
908
+
909
+ px = ps[:, 0].sigmoid() * 2. - 0.5
910
+ py = ps[:, 1].sigmoid() * 2. - 0.5
911
+
912
+ lbox += w_loss + h_loss # + x_loss + y_loss
913
+
914
+ #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n")
915
+
916
+ pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box
917
+
918
+
919
+
920
+
921
+ iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
922
+ lbox += (1.0 - iou).mean() # iou loss
923
+
924
+ # Objectness
925
+ tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
926
+
927
+ # Classification
928
+ selected_tcls = targets[i][:, 1].long()
929
+ if self.nc > 1: # cls loss (only if multiple classes)
930
+ t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets
931
+ t[range(n), selected_tcls] = self.cp
932
+ lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE
933
+
934
+ # Append targets to text file
935
+ # with open('targets.txt', 'a') as file:
936
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
937
+
938
+ obji = self.BCEobj(pi[..., obj_idx], tobj)
939
+ lobj += obji * self.balance[i] # obj loss
940
+ if self.autobalance:
941
+ self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
942
+
943
+ if self.autobalance:
944
+ self.balance = [x / self.balance[self.ssi] for x in self.balance]
945
+ lbox *= self.hyp['box']
946
+ lobj *= self.hyp['obj']
947
+ lcls *= self.hyp['cls']
948
+ bs = tobj.shape[0] # batch size
949
+
950
+ loss = lbox + lobj + lcls
951
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
952
+
953
+ def build_targets(self, p, targets, imgs):
954
+
955
+ #indices, anch = self.find_positive(p, targets)
956
+ indices, anch = self.find_3_positive(p, targets)
957
+ #indices, anch = self.find_4_positive(p, targets)
958
+ #indices, anch = self.find_5_positive(p, targets)
959
+ #indices, anch = self.find_9_positive(p, targets)
960
+
961
+ matching_bs = [[] for pp in p]
962
+ matching_as = [[] for pp in p]
963
+ matching_gjs = [[] for pp in p]
964
+ matching_gis = [[] for pp in p]
965
+ matching_targets = [[] for pp in p]
966
+ matching_anchs = [[] for pp in p]
967
+
968
+ nl = len(p)
969
+
970
+ for batch_idx in range(p[0].shape[0]):
971
+
972
+ b_idx = targets[:, 0]==batch_idx
973
+ this_target = targets[b_idx]
974
+ if this_target.shape[0] == 0:
975
+ continue
976
+
977
+ txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
978
+ txyxy = xywh2xyxy(txywh)
979
+
980
+ pxyxys = []
981
+ p_cls = []
982
+ p_obj = []
983
+ from_which_layer = []
984
+ all_b = []
985
+ all_a = []
986
+ all_gj = []
987
+ all_gi = []
988
+ all_anch = []
989
+
990
+ for i, pi in enumerate(p):
991
+
992
+ obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2
993
+
994
+ b, a, gj, gi = indices[i]
995
+ idx = (b == batch_idx)
996
+ b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
997
+ all_b.append(b)
998
+ all_a.append(a)
999
+ all_gj.append(gj)
1000
+ all_gi.append(gi)
1001
+ all_anch.append(anch[i][idx])
1002
+ from_which_layer.append(torch.ones(size=(len(b),)) * i)
1003
+
1004
+ fg_pred = pi[b, a, gj, gi]
1005
+ p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)])
1006
+ p_cls.append(fg_pred[:, (obj_idx+1):])
1007
+
1008
+ grid = torch.stack([gi, gj], dim=1)
1009
+ pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
1010
+ #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
1011
+ pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i]
1012
+ ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i]
1013
+
1014
+ pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1)
1015
+ pxyxy = xywh2xyxy(pxywh)
1016
+ pxyxys.append(pxyxy)
1017
+
1018
+ pxyxys = torch.cat(pxyxys, dim=0)
1019
+ if pxyxys.shape[0] == 0:
1020
+ continue
1021
+ p_obj = torch.cat(p_obj, dim=0)
1022
+ p_cls = torch.cat(p_cls, dim=0)
1023
+ from_which_layer = torch.cat(from_which_layer, dim=0)
1024
+ all_b = torch.cat(all_b, dim=0)
1025
+ all_a = torch.cat(all_a, dim=0)
1026
+ all_gj = torch.cat(all_gj, dim=0)
1027
+ all_gi = torch.cat(all_gi, dim=0)
1028
+ all_anch = torch.cat(all_anch, dim=0)
1029
+
1030
+ pair_wise_iou = box_iou(txyxy, pxyxys)
1031
+
1032
+ pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
1033
+
1034
+ top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
1035
+ dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
1036
+
1037
+ gt_cls_per_image = (
1038
+ F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
1039
+ .float()
1040
+ .unsqueeze(1)
1041
+ .repeat(1, pxyxys.shape[0], 1)
1042
+ )
1043
+
1044
+ num_gt = this_target.shape[0]
1045
+ cls_preds_ = (
1046
+ p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1047
+ * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
1048
+ )
1049
+
1050
+ y = cls_preds_.sqrt_()
1051
+ pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
1052
+ torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
1053
+ ).sum(-1)
1054
+ del cls_preds_
1055
+
1056
+ cost = (
1057
+ pair_wise_cls_loss
1058
+ + 3.0 * pair_wise_iou_loss
1059
+ )
1060
+
1061
+ matching_matrix = torch.zeros_like(cost)
1062
+
1063
+ for gt_idx in range(num_gt):
1064
+ _, pos_idx = torch.topk(
1065
+ cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
1066
+ )
1067
+ matching_matrix[gt_idx][pos_idx] = 1.0
1068
+
1069
+ del top_k, dynamic_ks
1070
+ anchor_matching_gt = matching_matrix.sum(0)
1071
+ if (anchor_matching_gt > 1).sum() > 0:
1072
+ _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
1073
+ matching_matrix[:, anchor_matching_gt > 1] *= 0.0
1074
+ matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
1075
+ fg_mask_inboxes = matching_matrix.sum(0) > 0.0
1076
+ matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
1077
+
1078
+ from_which_layer = from_which_layer[fg_mask_inboxes]
1079
+ all_b = all_b[fg_mask_inboxes]
1080
+ all_a = all_a[fg_mask_inboxes]
1081
+ all_gj = all_gj[fg_mask_inboxes]
1082
+ all_gi = all_gi[fg_mask_inboxes]
1083
+ all_anch = all_anch[fg_mask_inboxes]
1084
+
1085
+ this_target = this_target[matched_gt_inds]
1086
+
1087
+ for i in range(nl):
1088
+ layer_idx = from_which_layer == i
1089
+ matching_bs[i].append(all_b[layer_idx])
1090
+ matching_as[i].append(all_a[layer_idx])
1091
+ matching_gjs[i].append(all_gj[layer_idx])
1092
+ matching_gis[i].append(all_gi[layer_idx])
1093
+ matching_targets[i].append(this_target[layer_idx])
1094
+ matching_anchs[i].append(all_anch[layer_idx])
1095
+
1096
+ for i in range(nl):
1097
+ matching_bs[i] = torch.cat(matching_bs[i], dim=0)
1098
+ matching_as[i] = torch.cat(matching_as[i], dim=0)
1099
+ matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
1100
+ matching_gis[i] = torch.cat(matching_gis[i], dim=0)
1101
+ matching_targets[i] = torch.cat(matching_targets[i], dim=0)
1102
+ matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
1103
+
1104
+ return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
1105
+
1106
+ def find_3_positive(self, p, targets):
1107
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
1108
+ na, nt = self.na, targets.shape[0] # number of anchors, targets
1109
+ indices, anch = [], []
1110
+ gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
1111
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
1112
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
1113
+
1114
+ g = 0.5 # bias
1115
+ off = torch.tensor([[0, 0],
1116
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
1117
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
1118
+ ], device=targets.device).float() * g # offsets
1119
+
1120
+ for i in range(self.nl):
1121
+ anchors = self.anchors[i]
1122
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
1123
+
1124
+ # Match targets to anchors
1125
+ t = targets * gain
1126
+ if nt:
1127
+ # Matches
1128
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
1129
+ j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
1130
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
1131
+ t = t[j] # filter
1132
+
1133
+ # Offsets
1134
+ gxy = t[:, 2:4] # grid xy
1135
+ gxi = gain[[2, 3]] - gxy # inverse
1136
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
1137
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
1138
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
1139
+ t = t.repeat((5, 1, 1))[j]
1140
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
1141
+ else:
1142
+ t = targets[0]
1143
+ offsets = 0
1144
+
1145
+ # Define
1146
+ b, c = t[:, :2].long().T # image, class
1147
+ gxy = t[:, 2:4] # grid xy
1148
+ gwh = t[:, 4:6] # grid wh
1149
+ gij = (gxy - offsets).long()
1150
+ gi, gj = gij.T # grid xy indices
1151
+
1152
+ # Append
1153
+ a = t[:, 6].long() # anchor indices
1154
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
1155
+ anch.append(anchors[a]) # anchors
1156
+
1157
+ return indices, anch
1158
+