Mauricio Guerta commited on
Commit
c104dd9
·
1 Parent(s): 1152ac9

Ajuste tensor

Browse files
Files changed (3) hide show
  1. app.py +43 -24
  2. small-vehicles1.jpeg +0 -0
  3. zidane.jpg +0 -0
app.py CHANGED
@@ -1,18 +1,24 @@
1
  import gradio as gr
2
  import torch
 
3
  import yolov7
4
 
5
 
6
  # Images
7
- torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
8
- torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
9
 
 
 
 
 
 
10
  def yolov7_inference(
11
  image: gr.inputs.Image = None,
12
- model_path: gr.inputs.Dropdown = None,
13
- image_size: gr.inputs.Slider = 640,
14
- conf_threshold: gr.inputs.Slider = 0.25,
15
- iou_threshold: gr.inputs.Slider = 0.45,
16
  ):
17
  """
18
  YOLOv7 inference function
@@ -30,35 +36,48 @@ def yolov7_inference(
30
  model.conf = conf_threshold
31
  model.iou = iou_threshold
32
  results = model([image], size=image_size)
33
- return results.render()[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
 
36
  inputs = [
37
  gr.inputs.Image(type="pil", label="Input Image"),
38
- gr.inputs.Dropdown(
39
- choices=[
40
- "kadirnar/yolov7-tiny-v0.1",
41
- "kadirnar/yolov7-v0.1",
42
- ],
43
- default="kadirnar/yolov7-tiny-v0.1",
44
- label="Model",
45
- ),
46
- gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
47
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
48
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
49
  ]
50
 
51
- outputs = gr.outputs.Image(type="filepath", label="Output Image")
52
  title = "Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors"
53
 
54
- examples = [['small-vehicles1.jpeg', 'kadirnar/yolov7-tiny-v0.1', 640, 0.25, 0.45], ['zidane.jpg', 'kadirnar/yolov7-v0.1', 640, 0.25, 0.45]]
55
  demo_app = gr.Interface(
56
  fn=yolov7_inference,
57
  inputs=inputs,
58
- outputs=outputs,
59
  title=title,
60
  examples=examples,
61
- cache_examples=True,
62
- theme='huggingface',
63
  )
64
- demo_app.launch(debug=True, enable_queue=True)
 
 
1
  import gradio as gr
2
  import torch
3
+ import json
4
  import yolov7
5
 
6
 
7
  # Images
8
+ #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
9
+ #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
10
 
11
+ model_path = "kadirnar/yolov7-v0.1" #"kadirnar/yolov7-tiny-v0.1"
12
+ image_size = 640
13
+ conf_threshold = 0.25
14
+ iou_threshold = 0.45
15
+
16
  def yolov7_inference(
17
  image: gr.inputs.Image = None,
18
+ #model_path: gr.inputs.Dropdown = None,
19
+ #image_size: gr.inputs.Slider = 640,
20
+ #conf_threshold: gr.inputs.Slider = 0.25,
21
+ #iou_threshold: gr.inputs.Slider = 0.45,
22
  ):
23
  """
24
  YOLOv7 inference function
 
36
  model.conf = conf_threshold
37
  model.iou = iou_threshold
38
  results = model([image], size=image_size)
39
+ tensor = {
40
+ "tensorflow": [
41
+ ]
42
+ }
43
+
44
+ if results.pred is not None:
45
+ for i, element in enumerate(results.pred[0]):
46
+ object = {}
47
+ #print (element[0])
48
+ itemclass = round(element[5].item())
49
+ object["classe"] = itemclass
50
+ object["nome"] = results.names[itemclass]
51
+ object["score"] = element[4].item()
52
+ object["x"] = element[0].item()
53
+ object["y"] = element[1].item()
54
+ object["w"] = element[2].item()
55
+ object["h"] = element[3].item()
56
+ tensor["tensorflow"].append(object)
57
+
58
+
59
+
60
+ text = json.dumps(tensor)
61
+ #print (text)
62
+ return text #results.render()[0]
63
 
64
 
65
  inputs = [
66
  gr.inputs.Image(type="pil", label="Input Image"),
 
 
 
 
 
 
 
 
 
 
 
67
  ]
68
 
69
+ #outputs = gr.outputs.Image(type="filepath", label="Output Image")
70
  title = "Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors"
71
 
72
+ examples = [['small-vehicles1.jpeg'], ['zidane.jpg']]
73
  demo_app = gr.Interface(
74
  fn=yolov7_inference,
75
  inputs=inputs,
76
+ outputs=["text"],
77
  title=title,
78
  examples=examples,
79
+ #cache_examples=True,
80
+ #theme='huggingface',
81
  )
82
+ #demo_app.launch(debug=True, server_name="192.168.0.153", server_port=8080, enable_queue=True)
83
+ demo_app.launch(debug=True, server_port=8083, enable_queue=True)
small-vehicles1.jpeg ADDED
zidane.jpg ADDED