atalaydenknalbant commited on
Commit
79d95c0
·
verified ·
1 Parent(s): 96e5275

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -0
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import supervision as sv
3
+ import PIL.Image as Image
4
+ from ultralytics import YOLO
5
+ import gradio as gr
6
+ import torch
7
+
8
+
9
+
10
+ model_filenames = [
11
+ "yolo11n.pt",
12
+ "yolo11s.pt",
13
+ "yolo11m.pt",
14
+ "yolo11l.pt",
15
+ "yolo11x.pt"
16
+ ]
17
+
18
+
19
+
20
+ box_annotator = sv.BoxAnnotator()
21
+ category_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I',
22
+ 9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q',
23
+ 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z'}
24
+
25
+
26
+
27
+ @spaces.GPU
28
+ def yolo_inference(image, model_id, conf_threshold, iou_threshold, max_detection):
29
+ # Download models
30
+
31
+
32
+ model = YOLO(model_id)
33
+ results = model(source=image, imgsz=640, iou=iou_threshold, conf=conf_threshold, verbose=False, max_det=max_detection)[0]
34
+ detections = sv.Detections.from_ultralytics(results)
35
+
36
+ labels = [
37
+ f"{category_dict[class_id]} {confidence:.2f}"
38
+ for class_id, confidence in zip(detections.class_id, detections.confidence)
39
+ ]
40
+ annotated_image = box_annotator.annotate(image, detections=detections, labels=labels)
41
+
42
+ return annotated_image
43
+
44
+ def app():
45
+ with gr.Blocks():
46
+ with gr.Row():
47
+ with gr.Column():
48
+ image = gr.Image(type="pil", label="Image", interactive=True)
49
+
50
+ model_id = gr.Dropdown(
51
+ label="Model",
52
+ choices=model_filenames,
53
+ value=model_filenames[0] if model_filenames else "",
54
+ )
55
+ conf_threshold = gr.Slider(
56
+ label="Confidence Threshold",
57
+ minimum=0.1,
58
+ maximum=1.0,
59
+ step=0.1,
60
+ value=0.25,
61
+ )
62
+ iou_threshold = gr.Slider(
63
+ label="IoU Threshold",
64
+ minimum=0.1,
65
+ maximum=1.0,
66
+ step=0.1,
67
+ value=0.45,
68
+ )
69
+
70
+ max_detection = gr.Slider(
71
+ label="Max Detection",
72
+ minimum=1,
73
+ step=1,
74
+ value=1,
75
+ )
76
+ yolov_infer = gr.Button(value="Detect Objects")
77
+
78
+ with gr.Column():
79
+ output_image = gr.Image(type="pil", label="Annotated Image", interactive=False)
80
+
81
+ yolov_infer.click(
82
+ fn=yolo_inference,
83
+ inputs=[
84
+ image,
85
+ model_id,
86
+ conf_threshold,
87
+ iou_threshold,
88
+ max_detection,
89
+ ],
90
+ outputs=[output_image],
91
+ )
92
+
93
+ gr.Examples(
94
+ examples=[
95
+ [
96
+ "zidane.jpg",
97
+ "yolov11x.pt",
98
+ 0.25,
99
+ 0.45,
100
+ 1,
101
+ ],
102
+
103
+ [
104
+ "bus.jpg",
105
+ "yolov11s.pt",
106
+ 0.25,
107
+ 0.45,
108
+ 1,
109
+ ],
110
+ [
111
+ "yolo_vision.jpg",
112
+ "yolov11m.pt",
113
+ 0.25,
114
+ 0.45,
115
+ 1,
116
+ ],
117
+ ],
118
+ fn=yolo_inference,
119
+ inputs=[
120
+ image,
121
+ model_id,
122
+ conf_threshold,
123
+ iou_threshold,
124
+ max_detection,
125
+ ],
126
+ outputs=[output_image],
127
+ cache_examples="lazy",
128
+ )
129
+
130
+ gradio_app = gr.Blocks()
131
+ with gradio_app:
132
+ gr.HTML(
133
+ """
134
+ <h1 style='text-align: center'>
135
+ Yolov11
136
+ </h1>
137
+ """)
138
+ with gr.Row():
139
+ with gr.Column():
140
+ app()
141
+
142
+ gradio_app.launch()