Spaces:
Sleeping
Sleeping
Update tasks/image.py
Browse files- tasks/image.py +43 -44
tasks/image.py
CHANGED
@@ -2,10 +2,11 @@ from fastapi import APIRouter
|
|
2 |
from datetime import datetime
|
3 |
from datasets import load_dataset
|
4 |
import numpy as np
|
5 |
-
from sklearn.metrics import accuracy_score
|
6 |
import random
|
7 |
import os
|
8 |
|
|
|
9 |
from .utils.evaluation import ImageEvaluationRequest
|
10 |
from .utils.emissions import tracker, clean_emissions_data, get_space_info
|
11 |
|
@@ -14,9 +15,11 @@ load_dotenv()
|
|
14 |
|
15 |
router = APIRouter()
|
16 |
|
17 |
-
DESCRIPTION = "
|
18 |
ROUTE = "/image"
|
19 |
|
|
|
|
|
20 |
def parse_boxes(annotation_string):
|
21 |
"""Parse multiple boxes from a single annotation string.
|
22 |
Each box has 5 values: class_id, x_center, y_center, width, height"""
|
@@ -89,7 +92,7 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
89 |
|
90 |
# Split dataset
|
91 |
train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed)
|
92 |
-
test_dataset = train_test["test"]
|
93 |
|
94 |
# Start tracking emissions
|
95 |
tracker.start()
|
@@ -99,50 +102,51 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
99 |
# YOUR MODEL INFERENCE CODE HERE
|
100 |
# Update the code below to replace the random baseline with your model inference
|
101 |
#--------------------------------------------------------------------------------------------
|
102 |
-
from ultralytics import YOLO
|
103 |
-
|
104 |
-
# Load the trained YOLOv8 model
|
105 |
-
model = YOLO("./../best.pt")
|
106 |
-
|
107 |
predictions = []
|
108 |
true_labels = []
|
109 |
-
pred_boxes = []
|
110 |
-
true_boxes_list = []
|
111 |
-
|
112 |
-
|
113 |
-
# Inference loop
|
114 |
for example in test_dataset:
|
115 |
-
#
|
|
|
116 |
annotation = example.get("annotations", "").strip()
|
117 |
-
|
118 |
-
true_labels.append(int(has_smoke))
|
119 |
-
|
120 |
-
# Load the image
|
121 |
-
image_path = example.get("image_path") # Assuming access to image file path
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
# Classification: Check if the model predicted any boxes (smoke presence)
|
127 |
-
pred_has_smoke = len(results[0].boxes) > 0
|
128 |
-
predictions.append(int(pred_has_smoke))
|
129 |
|
130 |
-
|
131 |
if has_smoke:
|
132 |
-
# Parse all true boxes from the annotation
|
133 |
image_true_boxes = parse_boxes(annotation)
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
for box in results[0].boxes:
|
139 |
-
x_center, y_center, width, height = box.xywh[0].tolist()
|
140 |
-
image_pred_boxes.append([x_center, y_center, width, height])
|
141 |
-
|
142 |
-
pred_boxes.append(image_pred_boxes)
|
143 |
else:
|
144 |
-
true_boxes_list.append([])
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
#--------------------------------------------------------------------------------------------
|
148 |
# YOUR MODEL INFERENCE STOPS HERE
|
@@ -151,10 +155,8 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
151 |
# Stop tracking emissions
|
152 |
emissions_data = tracker.stop_task()
|
153 |
|
154 |
-
# Calculate classification
|
155 |
classification_accuracy = accuracy_score(true_labels, predictions)
|
156 |
-
classification_precision = precision_score(true_labels, predictions)
|
157 |
-
classification_recall = recall_score(true_labels, predictions)
|
158 |
|
159 |
# Calculate mean IoU for object detection (only for images with smoke)
|
160 |
# For each image, we compute the max IoU between the predicted box and all true boxes
|
@@ -172,8 +174,6 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
172 |
"submission_timestamp": datetime.now().isoformat(),
|
173 |
"model_description": DESCRIPTION,
|
174 |
"classification_accuracy": float(classification_accuracy),
|
175 |
-
"classification_precision": float(classification_precision),
|
176 |
-
"classification_recall": float(classification_recall),
|
177 |
"mean_iou": mean_iou,
|
178 |
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
|
179 |
"emissions_gco2eq": emissions_data.emissions * 1000,
|
@@ -185,5 +185,4 @@ async def evaluate_image(request: ImageEvaluationRequest):
|
|
185 |
"test_seed": request.test_seed
|
186 |
}
|
187 |
}
|
188 |
-
|
189 |
return results
|
|
|
2 |
from datetime import datetime
|
3 |
from datasets import load_dataset
|
4 |
import numpy as np
|
5 |
+
from sklearn.metrics import accuracy_score
|
6 |
import random
|
7 |
import os
|
8 |
|
9 |
+
from ultralytics import YOLO # Import YOLO
|
10 |
from .utils.evaluation import ImageEvaluationRequest
|
11 |
from .utils.emissions import tracker, clean_emissions_data, get_space_info
|
12 |
|
|
|
15 |
|
16 |
router = APIRouter()
|
17 |
|
18 |
+
DESCRIPTION = "YOLO Smoke Detection"
|
19 |
ROUTE = "/image"
|
20 |
|
21 |
+
yolo_model = YOLO("best.pt")
|
22 |
+
|
23 |
def parse_boxes(annotation_string):
|
24 |
"""Parse multiple boxes from a single annotation string.
|
25 |
Each box has 5 values: class_id, x_center, y_center, width, height"""
|
|
|
92 |
|
93 |
# Split dataset
|
94 |
train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed)
|
95 |
+
test_dataset = dataset["val"]#train_test["test"]
|
96 |
|
97 |
# Start tracking emissions
|
98 |
tracker.start()
|
|
|
102 |
# YOUR MODEL INFERENCE CODE HERE
|
103 |
# Update the code below to replace the random baseline with your model inference
|
104 |
#--------------------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
105 |
predictions = []
|
106 |
true_labels = []
|
107 |
+
pred_boxes = []
|
108 |
+
true_boxes_list = []
|
109 |
+
|
|
|
|
|
110 |
for example in test_dataset:
|
111 |
+
# Extract image and annotations
|
112 |
+
image = example["image"]
|
113 |
annotation = example.get("annotations", "").strip()
|
114 |
+
|
|
|
|
|
|
|
|
|
115 |
|
116 |
+
has_smoke = len(annotation) > 0
|
117 |
+
true_labels.append(1 if has_smoke else 0)
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
|
120 |
if has_smoke:
|
|
|
121 |
image_true_boxes = parse_boxes(annotation)
|
122 |
+
if image_true_boxes:
|
123 |
+
true_boxes_list.append(image_true_boxes)
|
124 |
+
else:
|
125 |
+
true_boxes_list.append([])
|
|
|
|
|
|
|
|
|
|
|
126 |
else:
|
127 |
+
true_boxes_list.append([])
|
128 |
+
|
129 |
+
results = yolo_model .predict(image, verbose=False) # INFERENCE - prediction
|
130 |
+
|
131 |
+
if len(results[0].boxes):
|
132 |
+
pred_box = results[0].boxes.xywhn[0].cpu().numpy().tolist()
|
133 |
+
predictions.append(1)
|
134 |
+
pred_boxes.append(pred_box)
|
135 |
+
else:
|
136 |
+
predictions.append(0)
|
137 |
+
pred_boxes.append([])
|
138 |
+
|
139 |
+
filtered_true_boxes_list = []
|
140 |
+
filtered_pred_boxes = []
|
141 |
+
|
142 |
+
for true_boxes, pred_boxes_entry in zip(true_boxes_list, pred_boxes): # Only see when annotation(s) is/are both on true label and prediction
|
143 |
+
if true_boxes and pred_boxes_entry:
|
144 |
+
filtered_true_boxes_list.append(true_boxes)
|
145 |
+
filtered_pred_boxes.append(pred_boxes_entry)
|
146 |
+
|
147 |
+
|
148 |
+
true_boxes_list = filtered_true_boxes_list
|
149 |
+
pred_boxes = filtered_pred_boxes
|
150 |
|
151 |
#--------------------------------------------------------------------------------------------
|
152 |
# YOUR MODEL INFERENCE STOPS HERE
|
|
|
155 |
# Stop tracking emissions
|
156 |
emissions_data = tracker.stop_task()
|
157 |
|
158 |
+
# Calculate classification accuracy
|
159 |
classification_accuracy = accuracy_score(true_labels, predictions)
|
|
|
|
|
160 |
|
161 |
# Calculate mean IoU for object detection (only for images with smoke)
|
162 |
# For each image, we compute the max IoU between the predicted box and all true boxes
|
|
|
174 |
"submission_timestamp": datetime.now().isoformat(),
|
175 |
"model_description": DESCRIPTION,
|
176 |
"classification_accuracy": float(classification_accuracy),
|
|
|
|
|
177 |
"mean_iou": mean_iou,
|
178 |
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
|
179 |
"emissions_gco2eq": emissions_data.emissions * 1000,
|
|
|
185 |
"test_seed": request.test_seed
|
186 |
}
|
187 |
}
|
|
|
188 |
return results
|