Spaces:
Running
on
Zero
Running
on
Zero
[email protected]
commited on
Commit
·
48798aa
1
Parent(s):
e356eed
update
Browse files- README.md +4 -4
- assets/coords/sample1.npz +2 -2
- assets/coords/sample2.npz +2 -2
- assets/coords/sample3.npz +2 -2
- assets/coords/sample4.npz +2 -2
- assets/coords/sample5.npz +0 -3
- assets/videos/sample2.mp4 +0 -0
- assets/videos/sample3.mp4 +0 -0
- assets/videos/sample4.mp4 +0 -0
- assets/videos/sample5.mp4 +0 -0
- attributtes_utils.py +1 -1
- inference_util.py +0 -2
- preprocess_videos.py +2 -3
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.41.0
|
8 |
app_file: app.py
|
|
|
1 |
---
|
2 |
+
title: Free-View Expressive Talking Head Video Editing
|
3 |
+
emoji: 🤖
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.41.0
|
8 |
app_file: app.py
|
assets/coords/sample1.npz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b200f395b09505d61f3efb67feaacbbd5bb358e75b476c4da083e4a7cef58af
|
3 |
+
size 525
|
assets/coords/sample2.npz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ac70dd3972f406d9e8195283d11395a7b1e2528bdbdec4a3420eeac919489c9
|
3 |
+
size 909
|
assets/coords/sample3.npz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:246e4910d5ae9937f2d692beb6d6267dcb2f09bf7b7e0bd75d373a167289cf08
|
3 |
+
size 598
|
assets/coords/sample4.npz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:497b14d4185a447327fac69602b66997dc791ff333ead12680c36e3e27d20195
|
3 |
+
size 656
|
assets/coords/sample5.npz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1ee15a8dd3b47bc036a4502ccd60a0a5c29262a5581593e8f97c11e18b389e67
|
3 |
-
size 974
|
|
|
|
|
|
|
|
assets/videos/sample2.mp4
CHANGED
Binary files a/assets/videos/sample2.mp4 and b/assets/videos/sample2.mp4 differ
|
|
assets/videos/sample3.mp4
CHANGED
Binary files a/assets/videos/sample3.mp4 and b/assets/videos/sample3.mp4 differ
|
|
assets/videos/sample4.mp4
CHANGED
Binary files a/assets/videos/sample4.mp4 and b/assets/videos/sample4.mp4 differ
|
|
assets/videos/sample5.mp4
DELETED
Binary file (698 kB)
|
|
attributtes_utils.py
CHANGED
@@ -40,7 +40,7 @@ def input_emotion(emotion_select="neutral"):
|
|
40 |
def input_blink(blink_select="yes"):
|
41 |
if blink_select == "yes":
|
42 |
blink = [[1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [0.8], [0.6], [0.0], [0.0], [1.0]]
|
43 |
-
blink = blink + blink
|
44 |
else:
|
45 |
blink = [[1.0] for _ in range(2)]
|
46 |
return blink
|
|
|
40 |
def input_blink(blink_select="yes"):
|
41 |
if blink_select == "yes":
|
42 |
blink = [[1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [0.8], [0.6], [0.0], [0.0], [1.0]]
|
43 |
+
blink = blink + blink + blink
|
44 |
else:
|
45 |
blink = [[1.0] for _ in range(2)]
|
46 |
return blink
|
inference_util.py
CHANGED
@@ -293,7 +293,6 @@ def infenrece(model, face_path, audio_path, pose, emotion, blink, preview=False)
|
|
293 |
y1, y2, x1, x2 = int(y1), int(y2), int(x1), int(x2)
|
294 |
y = round(y2 - y1)
|
295 |
x = round(x2 - x1)
|
296 |
-
# print(x, y, p.shape)
|
297 |
p = cv2.resize(p.astype(np.uint8), (x, y))
|
298 |
|
299 |
try:
|
@@ -301,7 +300,6 @@ def infenrece(model, face_path, audio_path, pose, emotion, blink, preview=False)
|
|
301 |
except Exception as e:
|
302 |
print(e)
|
303 |
f[y1 : y1 + y, x1 : x1 + x] = p
|
304 |
-
# out.write(f[100:-20])
|
305 |
f = remove_black(f)
|
306 |
if preview:
|
307 |
cv2.imwrite(outfile, f, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
|
|
|
293 |
y1, y2, x1, x2 = int(y1), int(y2), int(x1), int(x2)
|
294 |
y = round(y2 - y1)
|
295 |
x = round(x2 - x1)
|
|
|
296 |
p = cv2.resize(p.astype(np.uint8), (x, y))
|
297 |
|
298 |
try:
|
|
|
300 |
except Exception as e:
|
301 |
print(e)
|
302 |
f[y1 : y1 + y, x1 : x1 + x] = p
|
|
|
303 |
f = remove_black(f)
|
304 |
if preview:
|
305 |
cv2.imwrite(outfile, f, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
|
preprocess_videos.py
CHANGED
@@ -66,8 +66,7 @@ def face_detect(images, pads):
|
|
66 |
y_gap, x_gap = (y2 - y1)//2, (x2 - x1)//2
|
67 |
coords_ = [y1 - y_gap, y2 + y_gap, x1 - x_gap, x2 + x_gap]
|
68 |
|
69 |
-
|
70 |
-
_, coords = get_squre_coords(coords_, image, None)
|
71 |
|
72 |
y1, y2, x1, x2 = coords
|
73 |
y1 = max(0, y1)
|
@@ -80,7 +79,7 @@ def face_detect(images, pads):
|
|
80 |
print("Number of frames cropped: {}".format(len(results)))
|
81 |
print("First coords: {}".format(results[0]))
|
82 |
boxes = np.array(results)
|
83 |
-
boxes = get_smoothened_boxes(boxes, T=
|
84 |
# results = [[image[y1:y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
|
85 |
|
86 |
del detector
|
|
|
66 |
y_gap, x_gap = (y2 - y1)//2, (x2 - x1)//2
|
67 |
coords_ = [y1 - y_gap, y2 + y_gap, x1 - x_gap, x2 + x_gap]
|
68 |
|
69 |
+
_, coords = get_squre_coords(coords_, image)
|
|
|
70 |
|
71 |
y1, y2, x1, x2 = coords
|
72 |
y1 = max(0, y1)
|
|
|
79 |
print("Number of frames cropped: {}".format(len(results)))
|
80 |
print("First coords: {}".format(results[0]))
|
81 |
boxes = np.array(results)
|
82 |
+
boxes = get_smoothened_boxes(boxes, T=15)
|
83 |
# results = [[image[y1:y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
|
84 |
|
85 |
del detector
|