hank1996 commited on
Commit
5eb9018
·
1 Parent(s): 73d7439

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -103
app.py CHANGED
@@ -82,109 +82,110 @@ def detect(img,model):
82
  #model = attempt_load(weights, map_location=device) # load FP32 model
83
  #stride = int(model.stride.max()) # model stride
84
  #imgsz = check_img_size(imgsz, s=stride) # check img_size
85
- #print(weights)
86
- stride =32
87
- model = torch.jit.load(weights,map_location=device)
88
- model.eval()
89
-
90
- # Set Dataloader
91
- vid_path, vid_writer = None, None
92
- dataset = LoadImages(source, img_size=imgsz, stride=stride)
93
-
94
- # Run inference
95
- if device.type != 'cpu':
96
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
97
- t0 = time.time()
98
- for path, img, im0s, vid_cap in dataset:
99
- img = torch.from_numpy(img).to(device)
100
- img = img.half() if half else img.float() # uint8 to fp16/32
101
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
102
- print(img.shape)
103
-
104
- if img.ndimension() == 3:
105
- img = img.unsqueeze(0)
106
-
107
- # Inference
108
- t1 = time_synchronized()
109
- [pred,anchor_grid],seg,ll= model(img)
110
- t2 = time_synchronized()
111
-
112
- # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
113
- # but this problem will not appear in offical version
114
- tw1 = time_synchronized()
115
- pred = split_for_trace_model(pred,anchor_grid)
116
- tw2 = time_synchronized()
117
-
118
- # Apply NMS
119
- t3 = time_synchronized()
120
- pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
121
- t4 = time_synchronized()
122
-
123
- da_seg_mask = driving_area_mask(seg)
124
- ll_seg_mask = lane_line_mask(ll)
125
-
126
- print(da_seg_mask.shape)
127
- # Process detections
128
- for i, det in enumerate(pred): # detections per image
129
-
130
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
131
-
132
- p = Path(p) # to Path
133
- save_path = str(save_dir / p.name) # img.jpg
134
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
135
- s += '%gx%g ' % img.shape[2:] # print string
136
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
137
- if len(det):
138
- # Rescale boxes from img_size to im0 size
139
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
140
-
141
- # Print results
142
- #for c in det[:, -1].unique():
143
- #n = (det[:, -1] == c).sum() # detections per class
144
- #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
145
-
146
- # Write results
147
- for *xyxy, conf, cls in reversed(det):
148
- if save_txt: # Write to file
149
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
150
- line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
151
- with open(txt_path + '.txt', 'a') as f:
152
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
153
-
154
- if save_img : # Add bbox to image
155
- plot_one_box(xyxy, im0, line_thickness=3)
156
-
157
- # Print time (inference)
158
- print(f'{s}Done. ({t2 - t1:.3f}s)')
159
- show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
160
-
161
- # Save results (image with detections)
162
- if save_img:
163
- if dataset.mode == 'image':
164
- cv2.imwrite(save_path, im0)
165
- print(f" The image with the result is saved in: {save_path}")
166
- else: # 'video' or 'stream'
167
- if vid_path != save_path: # new video
168
- vid_path = save_path
169
- if isinstance(vid_writer, cv2.VideoWriter):
170
- vid_writer.release() # release previous video writer
171
- if vid_cap: # video
172
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
173
- #w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
174
- #h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
175
- w,h = im0.shape[1], im0.shape[0]
176
- else: # stream
177
- fps, w, h = 30, im0.shape[1], im0.shape[0]
178
- save_path += '.mp4'
179
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
180
- vid_writer.write(im0)
181
-
182
- inf_time.update(t2-t1,img.size(0))
183
- nms_time.update(t4-t3,img.size(0))
184
- waste_time.update(tw2-tw1,img.size(0))
185
- print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
186
- print(f'Done. ({time.time() - t0:.3f}s)')
187
- print(im0.shape)
 
188
 
189
  return Image.fromarray(im0[:,:,::-1])
190
 
 
82
  #model = attempt_load(weights, map_location=device) # load FP32 model
83
  #stride = int(model.stride.max()) # model stride
84
  #imgsz = check_img_size(imgsz, s=stride) # check img_size
85
+ print(weights)
86
+ if weights == 'yolopv2.pt':
87
+ stride =32
88
+ model = torch.jit.load(weights,map_location=device)
89
+ model.eval()
90
+
91
+ # Set Dataloader
92
+ vid_path, vid_writer = None, None
93
+ dataset = LoadImages(source, img_size=imgsz, stride=stride)
94
+
95
+ # Run inference
96
+ if device.type != 'cpu':
97
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
98
+ t0 = time.time()
99
+ for path, img, im0s, vid_cap in dataset:
100
+ img = torch.from_numpy(img).to(device)
101
+ img = img.half() if half else img.float() # uint8 to fp16/32
102
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
103
+ print(img.shape)
104
+
105
+ if img.ndimension() == 3:
106
+ img = img.unsqueeze(0)
107
+
108
+ # Inference
109
+ t1 = time_synchronized()
110
+ [pred,anchor_grid],seg,ll= model(img)
111
+ t2 = time_synchronized()
112
+
113
+ # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
114
+ # but this problem will not appear in offical version
115
+ tw1 = time_synchronized()
116
+ pred = split_for_trace_model(pred,anchor_grid)
117
+ tw2 = time_synchronized()
118
+
119
+ # Apply NMS
120
+ t3 = time_synchronized()
121
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
122
+ t4 = time_synchronized()
123
+
124
+ da_seg_mask = driving_area_mask(seg)
125
+ ll_seg_mask = lane_line_mask(ll)
126
+
127
+ print(da_seg_mask.shape)
128
+ # Process detections
129
+ for i, det in enumerate(pred): # detections per image
130
+
131
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
132
+
133
+ p = Path(p) # to Path
134
+ save_path = str(save_dir / p.name) # img.jpg
135
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
136
+ s += '%gx%g ' % img.shape[2:] # print string
137
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
138
+ if len(det):
139
+ # Rescale boxes from img_size to im0 size
140
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
141
+
142
+ # Print results
143
+ #for c in det[:, -1].unique():
144
+ #n = (det[:, -1] == c).sum() # detections per class
145
+ #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
146
+
147
+ # Write results
148
+ for *xyxy, conf, cls in reversed(det):
149
+ if save_txt: # Write to file
150
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
151
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
152
+ with open(txt_path + '.txt', 'a') as f:
153
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
154
+
155
+ if save_img : # Add bbox to image
156
+ plot_one_box(xyxy, im0, line_thickness=3)
157
+
158
+ # Print time (inference)
159
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
160
+ show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
161
+
162
+ # Save results (image with detections)
163
+ if save_img:
164
+ if dataset.mode == 'image':
165
+ cv2.imwrite(save_path, im0)
166
+ print(f" The image with the result is saved in: {save_path}")
167
+ else: # 'video' or 'stream'
168
+ if vid_path != save_path: # new video
169
+ vid_path = save_path
170
+ if isinstance(vid_writer, cv2.VideoWriter):
171
+ vid_writer.release() # release previous video writer
172
+ if vid_cap: # video
173
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
174
+ #w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
175
+ #h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
176
+ w,h = im0.shape[1], im0.shape[0]
177
+ else: # stream
178
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
179
+ save_path += '.mp4'
180
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
181
+ vid_writer.write(im0)
182
+
183
+ #inf_time.update(t2-t1,img.size(0))
184
+ #nms_time.update(t4-t3,img.size(0))
185
+ #waste_time.update(tw2-tw1,img.size(0))
186
+ #print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
187
+ #print(f'Done. ({time.time() - t0:.3f}s)')
188
+ #print(im0.shape)
189
 
190
  return Image.fromarray(im0[:,:,::-1])
191