hank1996 commited on
Commit
b89d5de
·
1 Parent(s): 8730aa2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -88
app.py CHANGED
@@ -80,6 +80,88 @@ def detect(img,model):
80
  #stride = int(model.stride.max()) # model stride
81
  #imgsz = check_img_size(imgsz, s=stride) # check img_size
82
  print(weights)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  if weights == 'yolop.pt':
84
  weights = 'End-to-end.pth'
85
  print(weights)
@@ -183,94 +265,6 @@ def detect(img,model):
183
 
184
  print('Done. (%.3fs)' % (time.time() - t0))
185
  print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
186
-
187
-
188
-
189
-
190
- if weights == 'yolopv2.pt':
191
-
192
-
193
- stride =32
194
- model = torch.jit.load(weights,map_location=device)
195
- model.eval()
196
-
197
- # Set Dataloader
198
- vid_path, vid_writer = None, None
199
- dataset = LoadImages(source, img_size=imgsz, stride=stride)
200
-
201
- # Run inference
202
- if device.type != 'cpu':
203
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
204
- t0 = time.time()
205
- for path, img, im0s, vid_cap in dataset:
206
- img = torch.from_numpy(img).to(device)
207
- img = img.half() if half else img.float() # uint8 to fp16/32
208
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
209
- print(img.shape)
210
-
211
- if img.ndimension() == 3:
212
- img = img.unsqueeze(0)
213
-
214
- # Inference
215
- t1 = time_synchronized()
216
- [pred,anchor_grid],seg,ll= model(img)
217
- t2 = time_synchronized()
218
-
219
- # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
220
- # but this problem will not appear in offical version
221
- tw1 = time_synchronized()
222
- pred = split_for_trace_model(pred,anchor_grid)
223
- tw2 = time_synchronized()
224
-
225
- # Apply NMS
226
- t3 = time_synchronized()
227
- pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
228
- t4 = time_synchronized()
229
-
230
- da_seg_mask = driving_area_mask(seg)
231
- ll_seg_mask = lane_line_mask(ll)
232
-
233
- print(da_seg_mask.shape)
234
- # Process detections
235
- for i, det in enumerate(pred): # detections per image
236
-
237
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
238
-
239
- p = Path(p) # to Path
240
- #save_path = str(save_dir / p.name) # img.jpg
241
- #txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
242
- s += '%gx%g ' % img.shape[2:] # print string
243
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
244
- if len(det):
245
- # Rescale boxes from img_size to im0 size
246
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
247
-
248
- # Print results
249
- #for c in det[:, -1].unique():
250
- #n = (det[:, -1] == c).sum() # detections per class
251
- #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
252
-
253
- # Write results
254
- for *xyxy, conf, cls in reversed(det):
255
- if save_txt: # Write to file
256
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
257
- line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
258
-
259
-
260
- if save_img : # Add bbox to image
261
- plot_one_box(xyxy, im0, line_thickness=3)
262
-
263
- # Print time (inference)
264
- print(f'{s}Done. ({t2 - t1:.3f}s)')
265
- show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
266
-
267
-
268
- #inf_time.update(t2-t1,img.size(0))
269
- #nms_time.update(t4-t3,img.size(0))
270
- #waste_time.update(tw2-tw1,img.size(0))
271
- #print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
272
- #print(f'Done. ({time.time() - t0:.3f}s)')
273
- #print(im0.shape)
274
 
275
  return Image.fromarray(im0[:,:,::-1])
276
 
 
80
  #stride = int(model.stride.max()) # model stride
81
  #imgsz = check_img_size(imgsz, s=stride) # check img_size
82
  print(weights)
83
+ if weights == 'yolopv2.pt':
84
+ stride =32
85
+ model = torch.jit.load(weights,map_location=device)
86
+ model.eval()
87
+
88
+ # Set Dataloader
89
+ vid_path, vid_writer = None, None
90
+ dataset = LoadImages(source, img_size=imgsz, stride=stride)
91
+
92
+ # Run inference
93
+ if device.type != 'cpu':
94
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
95
+ t0 = time.time()
96
+ for path, img, im0s, vid_cap in dataset:
97
+ img = torch.from_numpy(img).to(device)
98
+ img = img.half() if half else img.float() # uint8 to fp16/32
99
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
100
+ print(img.shape)
101
+
102
+ if img.ndimension() == 3:
103
+ img = img.unsqueeze(0)
104
+
105
+ # Inference
106
+ t1 = time_synchronized()
107
+ [pred,anchor_grid],seg,ll= model(img)
108
+ t2 = time_synchronized()
109
+
110
+ # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
111
+ # but this problem will not appear in offical version
112
+ tw1 = time_synchronized()
113
+ pred = split_for_trace_model(pred,anchor_grid)
114
+ tw2 = time_synchronized()
115
+
116
+ # Apply NMS
117
+ t3 = time_synchronized()
118
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
119
+ t4 = time_synchronized()
120
+
121
+ da_seg_mask = driving_area_mask(seg)
122
+ ll_seg_mask = lane_line_mask(ll)
123
+
124
+ print(da_seg_mask.shape)
125
+ # Process detections
126
+ for i, det in enumerate(pred): # detections per image
127
+
128
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
129
+
130
+ p = Path(p) # to Path
131
+ #save_path = str(save_dir / p.name) # img.jpg
132
+ #txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
133
+ s += '%gx%g ' % img.shape[2:] # print string
134
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
135
+ if len(det):
136
+ # Rescale boxes from img_size to im0 size
137
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
138
+
139
+ # Print results
140
+ #for c in det[:, -1].unique():
141
+ #n = (det[:, -1] == c).sum() # detections per class
142
+ #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
143
+
144
+ # Write results
145
+ for *xyxy, conf, cls in reversed(det):
146
+ if save_txt: # Write to file
147
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
148
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
149
+
150
+
151
+ if save_img : # Add bbox to image
152
+ plot_one_box(xyxy, im0, line_thickness=3)
153
+
154
+ # Print time (inference)
155
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
156
+ show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
157
+
158
+
159
+ #inf_time.update(t2-t1,img.size(0))
160
+ #nms_time.update(t4-t3,img.size(0))
161
+ #waste_time.update(tw2-tw1,img.size(0))
162
+ #print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
163
+ #print(f'Done. ({time.time() - t0:.3f}s)')
164
+ #print(im0.shape)
165
  if weights == 'yolop.pt':
166
  weights = 'End-to-end.pth'
167
  print(weights)
 
265
 
266
  print('Done. (%.3fs)' % (time.time() - t0))
267
  print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  return Image.fromarray(im0[:,:,::-1])
270