ThunderVVV commited on
Commit
7fb4aa3
·
1 Parent(s): 9ef4431
Files changed (3) hide show
  1. app.py +165 -4
  2. lib/vis/run_vis2.py +10 -3
  3. lib/vis/viewer.py +1 -0
app.py CHANGED
@@ -1,7 +1,168 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ # import spaces
3
+ import sys
4
+ import os
5
+ import torch
6
+ import numpy as np
7
+ import joblib
8
+ from easydict import EasyDict
9
+ from scripts.scripts_test_video.detect_track_video import detect_track_video
10
+ from scripts.scripts_test_video.hawor_video import hawor_motion_estimation, hawor_infiller
11
+ from scripts.scripts_test_video.hawor_slam import hawor_slam
12
+ from hawor.utils.process import get_mano_faces, run_mano, run_mano_left
13
+ from lib.eval_utils.custom_utils import load_slam_cam
14
+ from lib.vis.run_vis2 import run_vis2_on_video, run_vis2_on_video_cam
15
 
 
 
16
 
17
+ def render_reconstruction(input_video, img_focal):
18
+ args = EasyDict()
19
+ args.video_path = input_video
20
+ args.input_type = 'file'
21
+ args.checkpoint = './weights/hawor/checkpoints/hawor.ckpt'
22
+ args.infiller_weight = './weights/hawor/checkpoints/infiller.pt'
23
+ args.vis_mode = 'world'
24
+ args.img_focal = img_focal
25
+
26
+ start_idx, end_idx, seq_folder, imgfiles = detect_track_video(args)
27
+
28
+ frame_chunks_all, img_focal = hawor_motion_estimation(args, start_idx, end_idx, seq_folder)
29
+
30
+ hawor_slam(args, start_idx, end_idx)
31
+ slam_path = os.path.join(seq_folder, f"SLAM/hawor_slam_w_scale_{start_idx}_{end_idx}.npz")
32
+ R_w2c_sla_all, t_w2c_sla_all, R_c2w_sla_all, t_c2w_sla_all = load_slam_cam(slam_path)
33
+
34
+ pred_trans, pred_rot, pred_hand_pose, pred_betas, pred_valid = hawor_infiller(args, start_idx, end_idx, frame_chunks_all)
35
+
36
+ # vis sequence for this video
37
+ hand2idx = {
38
+ "right": 1,
39
+ "left": 0
40
+ }
41
+ vis_start = 0
42
+ vis_end = pred_trans.shape[1] - 1
43
+
44
+ # get faces
45
+ faces = get_mano_faces()
46
+ faces_new = np.array([[92, 38, 234],
47
+ [234, 38, 239],
48
+ [38, 122, 239],
49
+ [239, 122, 279],
50
+ [122, 118, 279],
51
+ [279, 118, 215],
52
+ [118, 117, 215],
53
+ [215, 117, 214],
54
+ [117, 119, 214],
55
+ [214, 119, 121],
56
+ [119, 120, 121],
57
+ [121, 120, 78],
58
+ [120, 108, 78],
59
+ [78, 108, 79]])
60
+ faces_right = np.concatenate([faces, faces_new], axis=0)
61
+
62
+ # get right hand vertices
63
+ hand = 'right'
64
+ hand_idx = hand2idx[hand]
65
+ pred_glob_r = run_mano(pred_trans[hand_idx:hand_idx+1, vis_start:vis_end], pred_rot[hand_idx:hand_idx+1, vis_start:vis_end], pred_hand_pose[hand_idx:hand_idx+1, vis_start:vis_end], betas=pred_betas[hand_idx:hand_idx+1, vis_start:vis_end])
66
+ right_verts = pred_glob_r['vertices'][0]
67
+ right_dict = {
68
+ 'vertices': right_verts.unsqueeze(0),
69
+ 'faces': faces_right,
70
+ }
71
+
72
+ # get left hand vertices
73
+ faces_left = faces_right[:,[0,2,1]]
74
+ hand = 'left'
75
+ hand_idx = hand2idx[hand]
76
+ pred_glob_l = run_mano_left(pred_trans[hand_idx:hand_idx+1, vis_start:vis_end], pred_rot[hand_idx:hand_idx+1, vis_start:vis_end], pred_hand_pose[hand_idx:hand_idx+1, vis_start:vis_end], betas=pred_betas[hand_idx:hand_idx+1, vis_start:vis_end])
77
+ left_verts = pred_glob_l['vertices'][0]
78
+ left_dict = {
79
+ 'vertices': left_verts.unsqueeze(0),
80
+ 'faces': faces_left,
81
+ }
82
+
83
+ R_x = torch.tensor([[1, 0, 0],
84
+ [0, -1, 0],
85
+ [0, 0, -1]]).float()
86
+ R_c2w_sla_all = torch.einsum('ij,njk->nik', R_x, R_c2w_sla_all)
87
+ t_c2w_sla_all = torch.einsum('ij,nj->ni', R_x, t_c2w_sla_all)
88
+ R_w2c_sla_all = R_c2w_sla_all.transpose(-1, -2)
89
+ t_w2c_sla_all = -torch.einsum("bij,bj->bi", R_w2c_sla_all, t_c2w_sla_all)
90
+ left_dict['vertices'] = torch.einsum('ij,btnj->btni', R_x, left_dict['vertices'].cpu())
91
+ right_dict['vertices'] = torch.einsum('ij,btnj->btni', R_x, right_dict['vertices'].cpu())
92
+
93
+ # Here we use aitviewer(https://github.com/eth-ait/aitviewer) for simple visualization.
94
+ if args.vis_mode == 'world':
95
+ output_pth = os.path.join(seq_folder, f"vis_{vis_start}_{vis_end}")
96
+ if not os.path.exists(output_pth):
97
+ os.makedirs(output_pth)
98
+ image_names = imgfiles[vis_start:vis_end]
99
+ print(f"vis {vis_start} to {vis_end}")
100
+ vis_video_path = run_vis2_on_video(left_dict, right_dict, output_pth, img_focal, image_names, R_c2w=R_c2w_sla_all[vis_start:vis_end], t_c2w=t_c2w_sla_all[vis_start:vis_end], interactive=False)
101
+ elif args.vis_mode == 'cam':
102
+ # output_pth = os.path.join(seq_folder, f"vis_{vis_start}_{vis_end}")
103
+ # if not os.path.exists(output_pth):
104
+ # os.makedirs(output_pth)
105
+ # image_names = imgfiles[vis_start:vis_end]
106
+ # print(f"vis {vis_start} to {vis_end}")
107
+ # run_vis2_on_video_cam(left_dict, right_dict, output_pth, img_focal, image_names, R_w2c=R_w2c_sla_all[vis_start:vis_end], t_w2c=t_w2c_sla_all[vis_start:vis_end])
108
+ raise NotImplementedError
109
+
110
+ return vis_video_path
111
+
112
+ # @spaces.GPU()
113
+ def run_wilow_model(image, conf, IoU_threshold=0.5):
114
+ img_cv2 = image[...,::-1]
115
+ return img_vis.astype(np.float32)/255.0, len(detections), None
116
+
117
+
118
+
119
+ header = ('''
120
+ <div class="embed_hidden" style="text-align: center;">
121
+ <h1> <b>HaWoR</b>: World-Space Hand Motion Reconstruction from Egocentric Videos</h1>
122
+ <h3>
123
+ <a href="" target="_blank" rel="noopener noreferrer">Jinglei Zhang</a><sup>1</sup>,
124
+ <a href="https://jiankangdeng.github.io/" target="_blank" rel="noopener noreferrer">Jiankang Deng</a><sup>2</sup>,
125
+ <br>
126
+ <a href="https://scholar.google.com/citations?user=syoPhv8AAAAJ&hl=en" target="_blank" rel="noopener noreferrer">Chao Ma</a><sup>1</sup>
127
+ <a href="https://rolpotamias.github.io" target="_blank" rel="noopener noreferrer">Rolandos Alexandros Potamias</a><sup>2</sup>
128
+ </h3>
129
+ <h3>
130
+ <sup>1</sup>Shanghai Jiao Tong University;
131
+ <sup>2</sup>Imperial College London
132
+ </h3>
133
+ </div>
134
+ <div style="display:flex; gap: 0.3rem; justify-content: center; align-items: center;" align="center">
135
+ <a href='https://arxiv.org/abs/xxxx.xxxxx'><img src='https://img.shields.io/badge/Arxiv-xxxx.xxxxx-A42C25?style=flat&logo=arXiv&logoColor=A42C25'></a>
136
+ <a href=''><img src='https://img.shields.io/badge/Paper-PDF-yellow?style=flat&logo=arXiv&logoColor=yellow'></a>
137
+ <a href='https://hawor-project.github.io/'><img src='https://img.shields.io/badge/Project-Page-%23df5b46?style=flat&logo=Google%20chrome&logoColor=%23df5b46'></a>
138
+ <a href='https://github.com/ThunderVVV/HaWoR'><img src='https://img.shields.io/badge/GitHub-Code-black?style=flat&logo=github&logoColor=white'></a>
139
+ ''')
140
+
141
+
142
+ with gr.Blocks(title="HaWoR: World-Space Hand Motion Reconstruction from Egocentric Videos", css=".gradio-container") as demo:
143
+
144
+ gr.Markdown(header)
145
+
146
+ with gr.Row():
147
+ with gr.Column():
148
+ input_video = gr.Video(label="Input video", sources=["upload"])
149
+ img_focal = gr.Number(label="Focal Length", value=600)
150
+ # threshold = gr.Slider(value=0.3, minimum=0.05, maximum=0.95, step=0.05, label='Detection Confidence Threshold')
151
+ #nms = gr.Slider(value=0.5, minimum=0.05, maximum=0.95, step=0.05, label='IoU NMS Threshold')
152
+ submit = gr.Button("Submit", variant="primary")
153
+
154
+
155
+ with gr.Column():
156
+ reconstruction = gr.Video(label="Reconstruction",show_download_button=True)
157
+ # hands_detected = gr.Textbox(label="Hands Detected")
158
+
159
+ submit.click(fn=render_reconstruction, inputs=[input_video, img_focal], outputs=[reconstruction])
160
+
161
+ with gr.Row():
162
+
163
+ example_images = gr.Examples([
164
+ ['./example/video_0.mp4']
165
+ ],
166
+ inputs=input_video)
167
+
168
+ demo.launch(debug=True)
lib/vis/run_vis2.py CHANGED
@@ -36,7 +36,7 @@ def camera_marker_geometry(radius, height):
36
  return vertices, faces, face_colors
37
 
38
 
39
- def run_vis2_on_video(res_dict, res_dict2, output_pth, focal_length, image_names, R_c2w=None, t_c2w=None):
40
 
41
  img0 = cv2.imread(image_names[0])
42
  height, width, _ = img0.shape
@@ -132,8 +132,15 @@ def run_vis2_on_video(res_dict, res_dict2, output_pth, focal_length, image_names
132
  data = viewer_utils.ViewerData(viewer_Rt, K, vis_w, vis_h)
133
  batch = (meshes, data)
134
 
135
- viewer = viewer_utils.ARCTICViewer(interactive=True, size=(vis_w, vis_h))
136
- viewer.render_seq(batch, out_folder=os.path.join(output_pth, 'aitviewer'))
 
 
 
 
 
 
 
137
 
138
  def run_vis2_on_video_cam(res_dict, res_dict2, output_pth, focal_length, image_names, R_w2c=None, t_w2c=None):
139
 
 
36
  return vertices, faces, face_colors
37
 
38
 
39
+ def run_vis2_on_video(res_dict, res_dict2, output_pth, focal_length, image_names, R_c2w=None, t_c2w=None, interactive=True):
40
 
41
  img0 = cv2.imread(image_names[0])
42
  height, width, _ = img0.shape
 
132
  data = viewer_utils.ViewerData(viewer_Rt, K, vis_w, vis_h)
133
  batch = (meshes, data)
134
 
135
+ if interactive:
136
+ viewer = viewer_utils.ARCTICViewer(interactive=True, size=(vis_w, vis_h))
137
+ viewer.render_seq(batch, out_folder=os.path.join(output_pth, 'aitviewer'))
138
+ else:
139
+ viewer = viewer_utils.ARCTICViewer(interactive=False, size=(vis_w, vis_h), render_types=['video'])
140
+ if os.path.exists(os.path.join(output_pth, 'aitviewer', "video_0.mp4")):
141
+ os.remove(os.path.join(output_pth, 'aitviewer', "video_0.mp4"))
142
+ viewer.render_seq(batch, out_folder=os.path.join(output_pth, 'aitviewer'))
143
+ return os.path.join(output_pth, 'aitviewer', "video_0.mp4")
144
 
145
  def run_vis2_on_video_cam(res_dict, res_dict2, output_pth, focal_length, image_names, R_w2c=None, t_w2c=None):
146
 
lib/vis/viewer.py CHANGED
@@ -108,6 +108,7 @@ class ARCTICViewer:
108
  if "video" in self.render_types:
109
  vid_p = op.join(out_folder, "video.mp4")
110
  v.save_video(video_dir=vid_p)
 
111
 
112
  pbar = tqdm(range(num_iter))
113
  for fidx in pbar:
 
108
  if "video" in self.render_types:
109
  vid_p = op.join(out_folder, "video.mp4")
110
  v.save_video(video_dir=vid_p)
111
+ return
112
 
113
  pbar = tqdm(range(num_iter))
114
  for fidx in pbar: