Spaces:
Sleeping
Sleeping
File size: 1,784 Bytes
7468932 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from facenet_pytorch import MTCNN, InceptionResnetV1
import torchvision.transforms as transforms
from PIL import Image
import torch
mtcnn = MTCNN(keep_all=False, device='cpu')
model = InceptionResnetV1(pretrained='vggface2').eval()
def preprocess_face(base64_img):
img = Image.open(base64_img).convert('RGB')
preprocess = transforms.Compose([
transforms.Resize((160, 160)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# Detect face and get the bounding box
box, _ = mtcnn.detect(img)
if box is not None:
# Crop the image using the bounding box
img_cropped = img.crop(box[0]) # box[0] contains (x1, y1, x2, y2)
# Apply the rest of preprocessing (resize, tensor, normalize)
img_preprocessed = preprocess(img_cropped).unsqueeze(0)
return img_preprocessed
else:
print("No face detected")
return None
def compare_faces(base64_img1, base64_img2):
# Load and detect face, then crop the image
# Preprocess both images
img1 = preprocess_face(base64_img1) # Replace with your image path
img2 = preprocess_face(base64_img2) # Replace with your image path
# Check if faces were detected in both images
if img1 is not None and img2 is not None:
# Get the embeddings from the FaceNet model
with torch.no_grad():
emb1 = model(img1)
emb2 = model(img2)
# Calculate cosine similarity
cosine_similarity = torch.nn.functional.cosine_similarity(emb1, emb2)
return cosine_similarity.item()
else:
print("Face detection failed on one or both images.")
return None
|