Spaces:
Running
Running
Add KDE func
Browse files- web_app.py +46 -40
web_app.py
CHANGED
@@ -25,7 +25,7 @@ DETECTION_MODEL_x = os.path.join(DIR_NAME, 'models', 'YOLOv8-X_CNO_Detection.pt'
|
|
25 |
# cno_df = pd.DataFrame()
|
26 |
|
27 |
|
28 |
-
def predict_image(name, model, img, conf_threshold, iou_threshold):
|
29 |
"""Predicts and plots labeled objects in an image using YOLOv8 model with adjustable confidence and IOU thresholds."""
|
30 |
gr.Info("Starting process")
|
31 |
# gr.Warning("Name is empty")
|
@@ -59,29 +59,31 @@ def predict_image(name, model, img, conf_threshold, iou_threshold):
|
|
59 |
cno_image = []
|
60 |
kde_image = []
|
61 |
file_name = []
|
|
|
62 |
|
63 |
-
total_layer_area = []
|
64 |
-
total_layer_cno = []
|
65 |
-
total_layer_density = []
|
66 |
-
avg_area_col = []
|
67 |
-
total_area_col = []
|
68 |
|
69 |
for idx, result in enumerate(results):
|
70 |
cno = len(result.boxes)
|
71 |
|
72 |
file_label = img[idx].split(os.sep)[-1]
|
73 |
-
single_layer_area = []
|
74 |
-
single_layer_cno = []
|
75 |
single_layer_density = []
|
76 |
total_area = 0
|
77 |
if cno < 5:
|
78 |
-
avg_area_col.append(np.nan)
|
79 |
-
total_area_col.append(np.nan)
|
80 |
-
nan_arr = np.empty([25])
|
81 |
-
nan_arr[:] = np.nan
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
85 |
else:
|
86 |
cno_coor = np.empty([cno, 2], dtype=int)
|
87 |
|
@@ -125,8 +127,7 @@ def predict_image(name, model, img, conf_threshold, iou_threshold):
|
|
125 |
(tf - ti)))
|
126 |
kde.bandwidth = bw
|
127 |
_ = kde.fit(cno_coor)
|
128 |
-
|
129 |
-
print("deb", result.orig_img.shape[0])
|
130 |
xgrid = np.arange(0, result.orig_img.shape[1], 1)
|
131 |
ygrid = np.arange(0, result.orig_img.shape[0], 1)
|
132 |
xv, yv = np.meshgrid(xgrid, ygrid)
|
@@ -154,15 +155,18 @@ def predict_image(name, model, img, conf_threshold, iou_threshold):
|
|
154 |
if layer_area == 0:
|
155 |
density = np.round(0.0, 4)
|
156 |
else:
|
157 |
-
density = np.round((ecno / layer_area) * result.orig_img.shape[0] * result.orig_img.shape[1] /
|
158 |
print("Level {}: Area={}, CNO={}, density={}".format(j, layer_area, ecno, density))
|
159 |
-
single_layer_area.append(layer_area)
|
160 |
-
single_layer_cno.append(ecno)
|
161 |
single_layer_density.append(density)
|
162 |
|
163 |
-
total_layer_area.append(single_layer_area)
|
164 |
-
total_layer_cno.append(single_layer_cno)
|
165 |
-
total_layer_density.append(single_layer_density)
|
|
|
|
|
|
|
166 |
|
167 |
|
168 |
# Plot CNO Distribution
|
@@ -174,30 +178,19 @@ def predict_image(name, model, img, conf_threshold, iou_threshold):
|
|
174 |
plt.xlim(0, gdim[1] - 1)
|
175 |
plt.ylim(gdim[0] - 1, 0)
|
176 |
plt.plot()
|
177 |
-
# plt.show()
|
178 |
|
179 |
-
# plt.savefig("test.png", format='png', bbox_inches='tight', pad_inches=0)
|
180 |
-
# plt.figure()
|
181 |
-
# plt.plot([1, 2])
|
182 |
img_buf = io.BytesIO()
|
183 |
plt.savefig(img_buf, format='png', bbox_inches='tight', pad_inches=0)
|
184 |
kde_im = Image.open(img_buf)
|
185 |
-
# kde_im.show()
|
186 |
-
|
187 |
-
# kde_img = Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())
|
188 |
-
# kde_image.append([imgplot, file_label])
|
189 |
kde_image.append([kde_im, file_label])
|
190 |
-
#plt.savefig(os.path.join(kde_dir, '{}_{}_{}_KDE.png'.format(file_list[idx], model_type, conf)),
|
191 |
-
# bbox_inches='tight', pad_inches=0)
|
192 |
-
|
193 |
-
#img_buf.close()
|
194 |
-
### ============================
|
195 |
|
196 |
data = {
|
197 |
"Files": file_name,
|
198 |
"CNO Count": cno_count,
|
|
|
199 |
}
|
200 |
|
|
|
201 |
# load data into a DataFrame object:
|
202 |
cno_df = pd.DataFrame(data)
|
203 |
|
@@ -225,8 +218,11 @@ def highlight_df(df, data: gr.SelectData):
|
|
225 |
# print("selected", data.value["caption"])
|
226 |
return data.value["caption"], styler
|
227 |
|
|
|
228 |
def reset():
|
229 |
name_textbox = ""
|
|
|
|
|
230 |
gender_radio = None
|
231 |
age_slider = 0
|
232 |
fitzpatrick = 1
|
@@ -240,16 +236,26 @@ def reset():
|
|
240 |
cno_gallery = []
|
241 |
test_label = ""
|
242 |
|
243 |
-
return name_textbox, gender_radio, age_slider, fitzpatrick, history, model_radio, input_files, conf_slider, \
|
244 |
iou_slider, analysis_results, afm_gallery, cno_gallery, test_label
|
245 |
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
|
247 |
with gr.Blocks(title="AFM AI Analysis", theme="default") as app:
|
248 |
with gr.Row():
|
249 |
with gr.Column():
|
250 |
# gr.Markdown("User Information")
|
251 |
with gr.Accordion("User Information", open=True):
|
252 |
-
|
|
|
|
|
|
|
|
|
253 |
with gr.Row():
|
254 |
gender_radio = gr.Radio(["Male", "Female"], label="Gender", interactive=True, scale=1)
|
255 |
age_slider = gr.Slider(minimum=0, maximum=100, step=1, value=0, label="Age", interactive=True, scale=2)
|
@@ -280,11 +286,11 @@ with gr.Blocks(title="AFM AI Analysis", theme="default") as app:
|
|
280 |
|
281 |
analyze_btn.click(
|
282 |
fn=predict_image,
|
283 |
-
inputs=[name_textbox, model_radio, input_files, conf_slider, iou_slider],
|
284 |
outputs=[analysis_results, afm_gallery, cno_gallery, kde_gallery]
|
285 |
)
|
286 |
|
287 |
-
clear_btn.click(reset, outputs=[name_textbox, gender_radio, age_slider, fitzpatrick, history, model_radio,
|
288 |
input_files, conf_slider, iou_slider, analysis_results, afm_gallery, cno_gallery,
|
289 |
test_label])
|
290 |
|
|
|
25 |
# cno_df = pd.DataFrame()
|
26 |
|
27 |
|
28 |
+
def predict_image(name, img_h, img_w, model, img, conf_threshold, iou_threshold):
|
29 |
"""Predicts and plots labeled objects in an image using YOLOv8 model with adjustable confidence and IOU thresholds."""
|
30 |
gr.Info("Starting process")
|
31 |
# gr.Warning("Name is empty")
|
|
|
59 |
cno_image = []
|
60 |
kde_image = []
|
61 |
file_name = []
|
62 |
+
ecti_score = []
|
63 |
|
64 |
+
# total_layer_area = []
|
65 |
+
# total_layer_cno = []
|
66 |
+
# total_layer_density = []
|
67 |
+
# avg_area_col = []
|
68 |
+
# total_area_col = []
|
69 |
|
70 |
for idx, result in enumerate(results):
|
71 |
cno = len(result.boxes)
|
72 |
|
73 |
file_label = img[idx].split(os.sep)[-1]
|
74 |
+
# single_layer_area = []
|
75 |
+
# single_layer_cno = []
|
76 |
single_layer_density = []
|
77 |
total_area = 0
|
78 |
if cno < 5:
|
79 |
+
# avg_area_col.append(np.nan)
|
80 |
+
# total_area_col.append(np.nan)
|
81 |
+
# nan_arr = np.empty([25])
|
82 |
+
# nan_arr[:] = np.nan
|
83 |
+
ecti_score.append(np.nan)
|
84 |
+
# total_layer_area.append(nan_arr)
|
85 |
+
# total_layer_cno.append(nan_arr)
|
86 |
+
# total_layer_density.append(nan_arr)
|
87 |
else:
|
88 |
cno_coor = np.empty([cno, 2], dtype=int)
|
89 |
|
|
|
127 |
(tf - ti)))
|
128 |
kde.bandwidth = bw
|
129 |
_ = kde.fit(cno_coor)
|
130 |
+
|
|
|
131 |
xgrid = np.arange(0, result.orig_img.shape[1], 1)
|
132 |
ygrid = np.arange(0, result.orig_img.shape[0], 1)
|
133 |
xv, yv = np.meshgrid(xgrid, ygrid)
|
|
|
155 |
if layer_area == 0:
|
156 |
density = np.round(0.0, 4)
|
157 |
else:
|
158 |
+
density = np.round((ecno / layer_area) * result.orig_img.shape[0] * result.orig_img.shape[1] / (img_h * img_w), 4)
|
159 |
print("Level {}: Area={}, CNO={}, density={}".format(j, layer_area, ecno, density))
|
160 |
+
# single_layer_area.append(layer_area)
|
161 |
+
# single_layer_cno.append(ecno)
|
162 |
single_layer_density.append(density)
|
163 |
|
164 |
+
# total_layer_area.append(single_layer_area)
|
165 |
+
# total_layer_cno.append(single_layer_cno)
|
166 |
+
# total_layer_density.append(single_layer_density)
|
167 |
+
# print(sum_range(single_layer_density, 10, 14))
|
168 |
+
# print("deb ", single_layer_density)
|
169 |
+
ecti_score.append(np.round(sum_range(single_layer_density, 10, 14) / 5.0, 2))
|
170 |
|
171 |
|
172 |
# Plot CNO Distribution
|
|
|
178 |
plt.xlim(0, gdim[1] - 1)
|
179 |
plt.ylim(gdim[0] - 1, 0)
|
180 |
plt.plot()
|
|
|
181 |
|
|
|
|
|
|
|
182 |
img_buf = io.BytesIO()
|
183 |
plt.savefig(img_buf, format='png', bbox_inches='tight', pad_inches=0)
|
184 |
kde_im = Image.open(img_buf)
|
|
|
|
|
|
|
|
|
185 |
kde_image.append([kde_im, file_label])
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
data = {
|
188 |
"Files": file_name,
|
189 |
"CNO Count": cno_count,
|
190 |
+
"ECTI Score": ecti_score
|
191 |
}
|
192 |
|
193 |
+
|
194 |
# load data into a DataFrame object:
|
195 |
cno_df = pd.DataFrame(data)
|
196 |
|
|
|
218 |
# print("selected", data.value["caption"])
|
219 |
return data.value["caption"], styler
|
220 |
|
221 |
+
|
222 |
def reset():
|
223 |
name_textbox = ""
|
224 |
+
img_h = 20
|
225 |
+
img_w = 20
|
226 |
gender_radio = None
|
227 |
age_slider = 0
|
228 |
fitzpatrick = 1
|
|
|
236 |
cno_gallery = []
|
237 |
test_label = ""
|
238 |
|
239 |
+
return name_textbox, img_h, img_w, gender_radio, age_slider, fitzpatrick, history, model_radio, input_files, conf_slider, \
|
240 |
iou_slider, analysis_results, afm_gallery, cno_gallery, test_label
|
241 |
|
242 |
+
def sum_range(l,a,b):
|
243 |
+
s = 0
|
244 |
+
for i in range(a,b+1):
|
245 |
+
s += l[i]
|
246 |
+
return s
|
247 |
+
|
248 |
|
249 |
with gr.Blocks(title="AFM AI Analysis", theme="default") as app:
|
250 |
with gr.Row():
|
251 |
with gr.Column():
|
252 |
# gr.Markdown("User Information")
|
253 |
with gr.Accordion("User Information", open=True):
|
254 |
+
with gr.Row():
|
255 |
+
name_textbox = gr.Textbox(label="Sample")
|
256 |
+
with gr.Row():
|
257 |
+
img_h = gr.Number(label="Image Height (μm)", value=20, interactive=True)
|
258 |
+
img_w = gr.Number(label="Image Width (μm)", value=20, interactive=True)
|
259 |
with gr.Row():
|
260 |
gender_radio = gr.Radio(["Male", "Female"], label="Gender", interactive=True, scale=1)
|
261 |
age_slider = gr.Slider(minimum=0, maximum=100, step=1, value=0, label="Age", interactive=True, scale=2)
|
|
|
286 |
|
287 |
analyze_btn.click(
|
288 |
fn=predict_image,
|
289 |
+
inputs=[name_textbox, img_h, img_w, model_radio, input_files, conf_slider, iou_slider],
|
290 |
outputs=[analysis_results, afm_gallery, cno_gallery, kde_gallery]
|
291 |
)
|
292 |
|
293 |
+
clear_btn.click(reset, outputs=[name_textbox, img_h, img_w, gender_radio, age_slider, fitzpatrick, history, model_radio,
|
294 |
input_files, conf_slider, iou_slider, analysis_results, afm_gallery, cno_gallery,
|
295 |
test_label])
|
296 |
|