Spaces:
Runtime error
Runtime error
import cv2 | |
import os | |
import random | |
import numpy as np | |
from matplotlib import pyplot as plt | |
from tensorflow.keras.layers import Layer | |
import tensorflow as tf | |
import gradio as gr | |
import uuid | |
def preprocess(file_path): | |
# Read in image from file path | |
byte_img = tf.io.read_file(file_path) | |
# Load in the image | |
img = tf.io.decode_jpeg(byte_img) | |
# Preprocessing steps - resizing the image to be 100x100x3 | |
img = tf.image.resize(img, (100, 100)) | |
# Scale image to be between 0 and 1 | |
img = img / 255.0 | |
# Return image | |
return img | |
# Siamese L1 Distance class | |
class L1Dist(Layer): | |
# Init method - inheritance | |
def __init__(self, **kwargs): | |
super().__init__() | |
# Magic happens here - similarity calculation | |
def call(self, input_embedding, validation_embedding): | |
return tf.math.abs(input_embedding - validation_embedding) | |
# Reload model | |
siamese_model = tf.keras.models.load_model('siamesemodelv2.h5', | |
custom_objects={'L1Dist': L1Dist, 'BinaryCrossentropy': tf.losses.BinaryCrossentropy}) | |
def verify(model, detection_threshold, verification_threshold): | |
# Build results array | |
results = [] | |
ver_images= [] | |
for file in os.listdir(os.path.join('application_data', 'verification_images')): | |
if file[len(file)-4:] == '.jpg': | |
ver_images.append(file) | |
ver_images_len= len(ver_images) | |
for image in ver_images: | |
input_img = preprocess(os.path.join('application_data', 'input_image', 'input_image.jpg')) | |
validation_img = preprocess(os.path.join('application_data', 'verification_images', image)) | |
# Make Predictions | |
result = model.predict(list(np.expand_dims([input_img, validation_img], axis=1))) | |
results.append(result) | |
# Detection Threshold: Metric above which a prediciton is considered positive | |
detection = np.sum(np.array(results) > detection_threshold) | |
print('Detection -- ',detection) | |
# Verification Threshold: Proportion of positive predictions / total positive samples | |
verification = detection / len(ver_images) | |
print('Verification -- ',verification) | |
verified = verification > verification_threshold | |
return detection,verification,verified,ver_images_len | |
def take_ver_images (image): | |
count=0 | |
cv2.imwrite(os.path.join('application_data', 'verification_images', '{}.jpg'.format(uuid.uuid1())), image) | |
count+=1 | |
return '{} Image Collected!'.format(count) | |
def verify_identity (image): | |
cv2.imwrite(os.path.join('application_data', 'input_image', 'input_image.jpg'), image) | |
detection,verification,verified,ver_images_len = verify(siamese_model, 0.8, 0.7) | |
# return {'Detections': int(detection),'Confidence': float(verification),'Verified': bool(verified)} | |
return 'Detected {} out of {} samples'.format(detection,ver_images_len),float(verification),'Verified' if verified else 'Not Verified' | |
with gr.Blocks() as demo: | |
with gr.Tab("Setup Identity"): | |
with gr.Row(): | |
input_img = gr.inputs.Image(shape=(100,100)) | |
output_text = gr.outputs.Label() | |
with gr.Row(): | |
submit_btn = gr.Button("Add Image") | |
submit_btn.click(take_ver_images, inputs=input_img,outputs=output_text) | |
with gr.Tab("Identify"): | |
with gr.Row(): | |
input_img1 = gr.inputs.Image(shape=(100,100)) | |
output_text3 = gr.outputs.Label(label='Verification Result') | |
with gr.Row(): | |
output_text1 = gr.outputs.Label(label='Detections') | |
output_text2 = gr.outputs.Label(label='Confidence') | |
with gr.Row(): | |
submit_btn1 = gr.Button("Verify") | |
submit_btn1.click(verify_identity, inputs=input_img1,outputs=[output_text1,output_text2,output_text3]) | |
demo.launch(debug=True) | |