countThings / pillmodel.py
dgbkn
dnr
4bbacde
raw
history blame
4.29 kB
import json
import numpy as np
import cv2
from shapely.geometry import Polygon
from io import BytesIO
from ultralytics import YOLO
import torch
# Define colors
COLORS = [(98, 231, 4), (228, 161, 0)] # Green and blue
CLASSES = ['capsules', 'tablets']
def get_prediction(image):
'''
Gets image from telebot, make predictions,
counts predicted classes, draws dots on image,
returns dict with counts and labelled image
'''
# Load a model
model = YOLO('yolov8l.pt') # load an official model
model = YOLO('best.pt') # load a custom model
# image = cv2.imread(image_path)
# Get prediction
prediction = model(image)
# Get predicted classes
predicted_classes = prediction[0].boxes.cls
# Get predicted confidence of each class
prediction_confidences = prediction[0].boxes.conf
# Get polygons
polygons = prediction[0].masks.xy
# Convert polygons to int32
polygons = [polygon.astype(np.int32) for polygon in polygons]
# Create indices mask that shows what is overlapping polygon has smaller confidence score
indices_mask = remove_overlapping_polygons(polygons, prediction_confidences)
# Create new fixed lists with predicted classes and polygons
fixed_predicted_classes = predicted_classes[np.array(indices_mask, dtype=bool)]
fixed_polygons = [polygons[i] for i in range(len(indices_mask)) if indices_mask[i] == 1]
# fixed_predicted_classes = [predicted_classes[i] for i in range(len(indices_mask)) if indices_mask[i] == 1]
# Get counts of classes
unique, counts = torch.unique(fixed_predicted_classes, return_counts=True)
# Get dicts with counts of classes
count_dict = {CLASSES[int(key)]: value for key, value in zip(unique.tolist(), counts.tolist())}
# # Draw polygons
# for polygon, predicted_class in zip(fixed_polygons, fixed_predicted_classes):
# cv2.polylines(image, [polygon], True, COLORS[int(predicted_class)])
# Draw dots
for polygon, predicted_class in zip(fixed_polygons, fixed_predicted_classes):
# Find center of polygon
center_coordinates = (np.mean(polygon[:, 0], dtype=np.int32), np.mean(polygon[:, 1], dtype=np.int32)) # x and y respectively
# Draw a circle
cv2.circle(image, center_coordinates, 5, COLORS[int(predicted_class)], 2, cv2.LINE_AA)
# # Show image with predictions on it
# cv2.imshow("Image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# from google.colab.patches import cv2_imshow
# cv2_imshow(image)
return image, count_dict
def remove_overlapping_polygons(polygons, prediction_confidences):
'''
Takes polygons, finds overlapping regions,
intersection area, overlap percentage,
creates indices mask that shows what
overlapping polygon has smaller confidence.
'''
# Convert the NumPy arrays to Shapely polygons
shapely_polygons = [Polygon(polygon) for polygon in polygons]
# Create an empty list with overlapping pairs
overlapping_pairs = []
# Check for overlaps between all pairs of polygons
for i in range(len(shapely_polygons)):
for j in range(i+1, len(shapely_polygons)):
if shapely_polygons[i].intersects(shapely_polygons[j]):
# Calculate the percentage of overlap
intersection_area = shapely_polygons[i].intersection(shapely_polygons[j]).area
overlap_percentage = intersection_area / shapely_polygons[i].area
# Add overlapping polygons indexes to list
if overlap_percentage > 0.5:
overlapping_pairs.append((i, j))
# Mask of remains indices
indices_mask = [1 for i in range(len(shapely_polygons))]
# Remove one of the overlapping polygons
for first_over_polygon_ind, second_over_polygon_ind in overlapping_pairs:
# Find index that has the smallest prediction confidence
first_has_bigger_conf = prediction_confidences[first_over_polygon_ind] >= prediction_confidences[second_over_polygon_ind]
index_small_conf = [first_over_polygon_ind, second_over_polygon_ind][first_has_bigger_conf]
# Set value with smaller confidence to 0 in indices_mask
indices_mask[index_small_conf] = 0
return indices_mask