Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
from model import U2NET
|
5 |
+
from torch.autograd import Variable
|
6 |
+
import numpy as np
|
7 |
+
from huggingface_hub import hf_hub_download
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
# Hàm phát hiện một khuôn mặt duy nhất
|
11 |
+
def detect_single_face(face_cascade, img):
|
12 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
13 |
+
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
|
14 |
+
if len(faces) == 0:
|
15 |
+
print("Warning: No face detected, running on the whole image!")
|
16 |
+
return None
|
17 |
+
wh, idx = 0, 0
|
18 |
+
for i, (x, y, w, h) in enumerate(faces):
|
19 |
+
if w * h > wh:
|
20 |
+
idx, wh = i, w * h
|
21 |
+
return faces[idx]
|
22 |
+
|
23 |
+
# Hàm cắt và chuẩn hóa khuôn mặt
|
24 |
+
def crop_face(img, face):
|
25 |
+
if face is None:
|
26 |
+
return img
|
27 |
+
(x, y, w, h) = face
|
28 |
+
height, width = img.shape[:2]
|
29 |
+
lpad, rpad, tpad, bpad = int(w * 0.4), int(w * 0.4), int(h * 0.6), int(h * 0.2)
|
30 |
+
left, right = max(0, x - lpad), min(width, x + w + rpad)
|
31 |
+
top, bottom = max(0, y - tpad), min(height, y + h + bpad)
|
32 |
+
im_face = img[top:bottom, left:right]
|
33 |
+
if len(im_face.shape) == 2:
|
34 |
+
im_face = np.repeat(im_face[:, :, np.newaxis], 3, axis=2)
|
35 |
+
im_face = np.pad(im_face, ((tpad, bpad), (lpad, rpad), (0, 0)), mode='constant', constant_values=255)
|
36 |
+
im_face = cv2.resize(im_face, (512, 512), interpolation=cv2.INTER_AREA)
|
37 |
+
return im_face
|
38 |
+
|
39 |
+
# Chuẩn hóa dự đoán
|
40 |
+
def normPRED(d):
|
41 |
+
return (d - torch.min(d)) / (torch.max(d) - torch.min(d))
|
42 |
+
|
43 |
+
# Hàm suy luận với U2NET
|
44 |
+
def inference(net, input_img):
|
45 |
+
input_img = input_img / np.max(input_img)
|
46 |
+
tmpImg = np.zeros((input_img.shape[0], input_img.shape[1], 3))
|
47 |
+
tmpImg[:, :, 0] = (input_img[:, :, 2] - 0.406) / 0.225
|
48 |
+
tmpImg[:, :, 1] = (input_img[:, :, 1] - 0.456) / 0.224
|
49 |
+
tmpImg[:, :, 2] = (input_img[:, :, 0] - 0.485) / 0.229
|
50 |
+
tmpImg = torch.from_numpy(tmpImg.transpose((2, 0, 1))[np.newaxis, :, :, :]).type(torch.FloatTensor)
|
51 |
+
tmpImg = Variable(tmpImg.cuda() if torch.cuda.is_available() else tmpImg)
|
52 |
+
d1, _, _, _, _, _, _ = net(tmpImg)
|
53 |
+
pred = normPRED(1.0 - d1[:, 0, :, :])
|
54 |
+
return pred.cpu().data.numpy().squeeze()
|
55 |
+
|
56 |
+
# Hàm chính để xử lý ảnh đầu vào và trả về ảnh chân dung
|
57 |
+
def process_image(img):
|
58 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
|
59 |
+
face = detect_single_face(face_cascade, img)
|
60 |
+
cropped_face = crop_face(img, face)
|
61 |
+
result = inference(u2net, cropped_face)
|
62 |
+
return (result * 255).astype(np.uint8)
|
63 |
+
|
64 |
+
# Tải mô hình từ Hugging Face Hub
|
65 |
+
def load_u2net_model():
|
66 |
+
model_path = hf_hub_download(repo_id="Arrcttacsrks/U2net", filename="u2net_portrait.pth", use_auth_token=os.getenv("HF_TOKEN"))
|
67 |
+
net = U2NET(3, 1)
|
68 |
+
net.load_state_dict(torch.load(model_path, map_location="cuda" if torch.cuda.is_available() else "cpu"))
|
69 |
+
net.eval()
|
70 |
+
return net
|
71 |
+
|
72 |
+
# Khởi tạo mô hình U2NET
|
73 |
+
u2net = load_u2net_model()
|
74 |
+
|
75 |
+
# Tạo giao diện với Gradio
|
76 |
+
iface = gr.Interface(
|
77 |
+
fn=process_image,
|
78 |
+
inputs=gr.Image(type="numpy", label="Upload your image"),
|
79 |
+
outputs=gr.Image(type="numpy", label="Portrait Result"),
|
80 |
+
title="Portrait Generation with U2NET",
|
81 |
+
description="Upload an image to generate its portrait."
|
82 |
+
)
|
83 |
+
|
84 |
+
iface.launch()
|