hank1996 commited on
Commit
375525b
·
1 Parent(s): ad0bd23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -147
app.py CHANGED
@@ -36,153 +36,156 @@ from PIL import Image
36
 
37
 
38
  def detect(img,model):
39
- parser = argparse.ArgumentParser()
40
- parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)')
41
- parser.add_argument('--source', type=str, default='Inference/', help='source') # file/folder, 0 for webcam
42
- parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
43
- parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
44
- parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
45
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
46
- parser.add_argument('--view-img', action='store_true', help='display results')
47
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
48
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
49
- parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
50
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
51
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
52
- parser.add_argument('--augment', action='store_true', help='augmented inference')
53
- parser.add_argument('--update', action='store_true', help='update all models')
54
- parser.add_argument('--project', default='runs/detect', help='save results to project/name')
55
- parser.add_argument('--name', default='exp', help='save results to project/name')
56
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
57
- parser.add_argument('--trace', action='store_true', help='trace model')
58
- opt = parser.parse_args()
59
- img.save("Inference/test.jpg")
60
- source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.trace
61
- save_img = True # save inference images
62
- #webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
63
- #('rtsp://', 'rtmp://', 'http://', 'https://'))
64
- #print(webcam)
65
- # Directories
66
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
67
- (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
68
-
69
- # Initialize
70
- set_logging()
71
- device = select_device(opt.device)
72
- print(device)
73
- half = device.type != 'cpu' # half precision only supported on CUDA
74
-
75
- # Load model
76
- inf_time = AverageMeter()
77
- waste_time = AverageMeter()
78
- nms_time = AverageMeter()
79
-
80
- # Load model
81
- #model = attempt_load(weights, map_location=device) # load FP32 model
82
- #stride = int(model.stride.max()) # model stride
83
- #imgsz = check_img_size(imgsz, s=stride) # check img_size
84
- print(weights)
85
- stride =32
86
- model = torch.jit.load(weights,map_location=device)
87
- model.eval()
88
-
89
- # Set Dataloader
90
- vid_path, vid_writer = None, None
91
- dataset = LoadImages(source, img_size=imgsz, stride=stride)
92
-
93
- # Run inference
94
- if device.type != 'cpu':
95
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
96
- t0 = time.time()
97
- for path, img, im0s, vid_cap in dataset:
98
- img = torch.from_numpy(img).to(device)
99
- img = img.half() if half else img.float() # uint8 to fp16/32
100
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
101
-
102
- if img.ndimension() == 3:
103
- img = img.unsqueeze(0)
104
-
105
- # Inference
106
- t1 = time_synchronized()
107
- [pred,anchor_grid],seg,ll= model(img)
108
- t2 = time_synchronized()
109
-
110
- # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
111
- # but this problem will not appear in offical version
112
- tw1 = time_synchronized()
113
- pred = split_for_trace_model(pred,anchor_grid)
114
- tw2 = time_synchronized()
115
-
116
- # Apply NMS
117
- t3 = time_synchronized()
118
- pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
119
- t4 = time_synchronized()
120
-
121
- da_seg_mask = driving_area_mask(seg)
122
- ll_seg_mask = lane_line_mask(ll)
123
-
124
- # Process detections
125
- for i, det in enumerate(pred): # detections per image
126
-
127
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
128
-
129
- p = Path(p) # to Path
130
- save_path = str(save_dir / p.name) # img.jpg
131
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
132
- s += '%gx%g ' % img.shape[2:] # print string
133
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
134
- if len(det):
135
- # Rescale boxes from img_size to im0 size
136
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
137
-
138
- # Print results
139
- for c in det[:, -1].unique():
140
- n = (det[:, -1] == c).sum() # detections per class
141
- #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
142
-
143
- # Write results
144
- for *xyxy, conf, cls in reversed(det):
145
- if save_txt: # Write to file
146
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
147
- line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
148
- with open(txt_path + '.txt', 'a') as f:
149
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
150
-
151
- if save_img : # Add bbox to image
152
- plot_one_box(xyxy, im0, line_thickness=3)
153
-
154
- # Print time (inference)
155
- print(f'{s}Done. ({t2 - t1:.3f}s)')
156
- show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
157
-
158
- # Save results (image with detections)
159
- if save_img:
160
- if dataset.mode == 'image':
161
- cv2.imwrite(save_path, im0)
162
- print(f" The image with the result is saved in: {save_path}")
163
- else: # 'video' or 'stream'
164
- if vid_path != save_path: # new video
165
- vid_path = save_path
166
- if isinstance(vid_writer, cv2.VideoWriter):
167
- vid_writer.release() # release previous video writer
168
- if vid_cap: # video
169
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
170
- #w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
171
- #h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
172
- w,h = im0.shape[1], im0.shape[0]
173
- else: # stream
174
- fps, w, h = 30, im0.shape[1], im0.shape[0]
175
- save_path += '.mp4'
176
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
177
- vid_writer.write(im0)
178
-
179
- inf_time.update(t2-t1,img.size(0))
180
- nms_time.update(t4-t3,img.size(0))
181
- waste_time.update(tw2-tw1,img.size(0))
182
- print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
183
- print(f'Done. ({time.time() - t0:.3f}s)')
184
- print(im0.shape)
185
-
 
 
 
186
  return Image.fromarray(im0[:,:,::-1])
187
 
188
 
 
36
 
37
 
38
  def detect(img,model):
39
+ with torch.no_grad()
40
+ parser = argparse.ArgumentParser()
41
+ parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)')
42
+ parser.add_argument('--source', type=str, default='Inference/', help='source') # file/folder, 0 for webcam
43
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
44
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
45
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
46
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
47
+ parser.add_argument('--view-img', action='store_true', help='display results')
48
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
49
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
50
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
51
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
52
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
53
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
54
+ parser.add_argument('--update', action='store_true', help='update all models')
55
+ parser.add_argument('--project', default='runs/detect', help='save results to project/name')
56
+ parser.add_argument('--name', default='exp', help='save results to project/name')
57
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
58
+ parser.add_argument('--trace', action='store_true', help='trace model')
59
+ opt = parser.parse_args()
60
+ img.save("Inference/test.jpg")
61
+ source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, opt.trace
62
+ save_img = True # save inference images
63
+ #webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
64
+ #('rtsp://', 'rtmp://', 'http://', 'https://'))
65
+ #print(webcam)
66
+ # Directories
67
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
68
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
69
+
70
+ # Initialize
71
+ set_logging()
72
+ device = select_device(opt.device)
73
+ #print(device)
74
+ half = device.type != 'cpu' # half precision only supported on CUDA
75
+
76
+ # Load model
77
+ inf_time = AverageMeter()
78
+ waste_time = AverageMeter()
79
+ nms_time = AverageMeter()
80
+
81
+ # Load model
82
+ #model = attempt_load(weights, map_location=device) # load FP32 model
83
+ #stride = int(model.stride.max()) # model stride
84
+ #imgsz = check_img_size(imgsz, s=stride) # check img_size
85
+ #print(weights)
86
+ stride =32
87
+ model = torch.jit.load(weights,map_location=device)
88
+ model.eval()
89
+
90
+ # Set Dataloader
91
+ vid_path, vid_writer = None, None
92
+ dataset = LoadImages(source, img_size=imgsz, stride=stride)
93
+
94
+ # Run inference
95
+ if device.type != 'cpu':
96
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
97
+ t0 = time.time()
98
+ for path, img, im0s, vid_cap in dataset:
99
+ img = torch.from_numpy(img).to(device)
100
+ img = img.half() if half else img.float() # uint8 to fp16/32
101
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
102
+ print(img.shape)
103
+
104
+ if img.ndimension() == 3:
105
+ img = img.unsqueeze(0)
106
+
107
+ # Inference
108
+ t1 = time_synchronized()
109
+ [pred,anchor_grid],seg,ll= model(img)
110
+ t2 = time_synchronized()
111
+
112
+ # waste time: the incompatibility of torch.jit.trace causes extra time consumption in demo version
113
+ # but this problem will not appear in offical version
114
+ tw1 = time_synchronized()
115
+ pred = split_for_trace_model(pred,anchor_grid)
116
+ tw2 = time_synchronized()
117
+
118
+ # Apply NMS
119
+ t3 = time_synchronized()
120
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
121
+ t4 = time_synchronized()
122
+
123
+ da_seg_mask = driving_area_mask(seg)
124
+ ll_seg_mask = lane_line_mask(ll)
125
+
126
+ print(da_seg_mask.shape)
127
+ # Process detections
128
+ for i, det in enumerate(pred): # detections per image
129
+
130
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
131
+
132
+ p = Path(p) # to Path
133
+ save_path = str(save_dir / p.name) # img.jpg
134
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
135
+ s += '%gx%g ' % img.shape[2:] # print string
136
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
137
+ if len(det):
138
+ # Rescale boxes from img_size to im0 size
139
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
140
+
141
+ # Print results
142
+ for c in det[:, -1].unique():
143
+ n = (det[:, -1] == c).sum() # detections per class
144
+ #s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
145
+
146
+ # Write results
147
+ for *xyxy, conf, cls in reversed(det):
148
+ if save_txt: # Write to file
149
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
150
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
151
+ with open(txt_path + '.txt', 'a') as f:
152
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
153
+
154
+ if save_img : # Add bbox to image
155
+ plot_one_box(xyxy, im0, line_thickness=3)
156
+
157
+ # Print time (inference)
158
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
159
+ show_seg_result(im0, (da_seg_mask,ll_seg_mask), is_demo=True)
160
+
161
+ # Save results (image with detections)
162
+ if save_img:
163
+ if dataset.mode == 'image':
164
+ cv2.imwrite(save_path, im0)
165
+ print(f" The image with the result is saved in: {save_path}")
166
+ else: # 'video' or 'stream'
167
+ if vid_path != save_path: # new video
168
+ vid_path = save_path
169
+ if isinstance(vid_writer, cv2.VideoWriter):
170
+ vid_writer.release() # release previous video writer
171
+ if vid_cap: # video
172
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
173
+ #w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
174
+ #h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
175
+ w,h = im0.shape[1], im0.shape[0]
176
+ else: # stream
177
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
178
+ save_path += '.mp4'
179
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
180
+ vid_writer.write(im0)
181
+
182
+ inf_time.update(t2-t1,img.size(0))
183
+ nms_time.update(t4-t3,img.size(0))
184
+ waste_time.update(tw2-tw1,img.size(0))
185
+ print('inf : (%.4fs/frame) nms : (%.4fs/frame)' % (inf_time.avg,nms_time.avg))
186
+ print(f'Done. ({time.time() - t0:.3f}s)')
187
+ print(im0.shape)
188
+
189
  return Image.fromarray(im0[:,:,::-1])
190
 
191