Spaces:
Running
Running
init space
Browse files- .gitignore +4 -0
- app.py +76 -0
- estimator.py +47 -0
- requirements.txt +2 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.idea/
|
2 |
+
__pycache__/
|
3 |
+
checkpoints
|
4 |
+
results
|
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
from PIFuHD.data import EvalWMetaDataset
|
4 |
+
from PIFuHD.data.ImageBundle import ImageBundle
|
5 |
+
from PIFuHD.options import BaseOptions
|
6 |
+
from PIFuHD.recontructor import Reconstructor
|
7 |
+
from huggingface_hub import hf_hub_download
|
8 |
+
from human_pose_estimator import PoseEstimator
|
9 |
+
from estimator import rect
|
10 |
+
|
11 |
+
REPO_ID = "cxeep/PIFuHD"
|
12 |
+
|
13 |
+
pose_estimator = PoseEstimator("cpu")
|
14 |
+
|
15 |
+
checkpoint_path = hf_hub_download(repo_id=REPO_ID, filename="pifuhd.pt")
|
16 |
+
|
17 |
+
cmd = [
|
18 |
+
'--dataroot', './data',
|
19 |
+
'--results_path', './results',
|
20 |
+
'--loadSize', '1024',
|
21 |
+
'--resolution', '256',
|
22 |
+
'--load_netMR_checkpoint_path', checkpoint_path,
|
23 |
+
'--start_id', '-1',
|
24 |
+
'--end_id', '-1'
|
25 |
+
]
|
26 |
+
|
27 |
+
parser = BaseOptions()
|
28 |
+
opts = parser.parse(cmd)
|
29 |
+
reconstructor = Reconstructor(opts)
|
30 |
+
|
31 |
+
|
32 |
+
def make_bundle(image, name):
|
33 |
+
image, rects = rect(pose_estimator, image)
|
34 |
+
return ImageBundle(img=image, name=name, meta=rects)
|
35 |
+
|
36 |
+
|
37 |
+
def predict(img: np.ndarray):
|
38 |
+
bundle = make_bundle(img, "Model3D")
|
39 |
+
dataset = EvalWMetaDataset(opts, [bundle])
|
40 |
+
img, model = reconstructor.evaluate(dataset)
|
41 |
+
return img, model, model
|
42 |
+
|
43 |
+
|
44 |
+
footer = r"""
|
45 |
+
<center>
|
46 |
+
<b>
|
47 |
+
Demo for <a href='https://github.com/facebookresearch/pifuhd'>PIFuHD</a>
|
48 |
+
</b>
|
49 |
+
</center>
|
50 |
+
"""
|
51 |
+
|
52 |
+
with gr.Blocks(title="PIFuHD") as app:
|
53 |
+
gr.HTML("<center><h1>3D Human Digitization</h1></center>")
|
54 |
+
gr.HTML("<center><h3>PIFuHD: Multi-Level Pixel-Aligned Implicit Function for High-Resolution 3D Human Digitization (CVPR 2020)</h3></center>")
|
55 |
+
with gr.Row(equal_height=False):
|
56 |
+
with gr.Column():
|
57 |
+
input_img = gr.Image(type="numpy", label="Input image")
|
58 |
+
run_btn = gr.Button(variant="primary")
|
59 |
+
with gr.Column():
|
60 |
+
output_obj = gr.Model3D(label="Output model")
|
61 |
+
output_img = gr.Image(type="filepath", label="Output image")
|
62 |
+
output_file = gr.File(label="Download 3D Model")
|
63 |
+
gr.ClearButton(components=[input_img, output_img, output_obj, output_file], variant="stop")
|
64 |
+
|
65 |
+
run_btn.click(predict, [input_img], [output_img, output_obj, output_file])
|
66 |
+
|
67 |
+
with gr.Row():
|
68 |
+
blobs = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
69 |
+
examples = gr.Dataset(components=[input_img], samples=blobs)
|
70 |
+
examples.click(lambda x: x[0], [examples], [input_img])
|
71 |
+
|
72 |
+
with gr.Row():
|
73 |
+
gr.HTML(footer)
|
74 |
+
|
75 |
+
app.launch(share=False, debug=True, show_error=True)
|
76 |
+
app.queue()
|
estimator.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from human_pose_estimator.modules.pose import Pose
|
3 |
+
|
4 |
+
|
5 |
+
def rect(estimator, img: np.ndarray, height_size=512):
|
6 |
+
num_keypoints = Pose.num_kpts
|
7 |
+
_, pose_entries, all_keypoints = estimator.get_poses(img, height_size)
|
8 |
+
|
9 |
+
rects = []
|
10 |
+
for n in range(len(pose_entries)):
|
11 |
+
if len(pose_entries[n]) == 0:
|
12 |
+
continue
|
13 |
+
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
|
14 |
+
|
15 |
+
valid_keypoints = []
|
16 |
+
for kpt_id in range(num_keypoints):
|
17 |
+
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
|
18 |
+
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
|
19 |
+
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
|
20 |
+
valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])
|
21 |
+
valid_keypoints = np.array(valid_keypoints)
|
22 |
+
|
23 |
+
if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:
|
24 |
+
pmin = valid_keypoints.min(0)
|
25 |
+
pmax = valid_keypoints.max(0)
|
26 |
+
|
27 |
+
center = (0.5 * (pmax[:2] + pmin[:2])).astype(np.int32)
|
28 |
+
radius = int(0.65 * max(pmax[0] - pmin[0], pmax[1] - pmin[1]))
|
29 |
+
elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and \
|
30 |
+
pose_entries[n][11] != -1.0:
|
31 |
+
# if leg is missing, use pelvis to get cropping
|
32 |
+
center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(np.int32)
|
33 |
+
radius = int(1.45 * np.sqrt(((center[None, :] - valid_keypoints) ** 2).sum(1)).max(0))
|
34 |
+
center[1] += int(0.05 * radius)
|
35 |
+
else:
|
36 |
+
center = np.array([img.shape[1] // 2, img.shape[0] // 2])
|
37 |
+
radius = max(img.shape[1] // 2, img.shape[0] // 2)
|
38 |
+
|
39 |
+
x1 = center[0] - radius
|
40 |
+
y1 = center[1] - radius
|
41 |
+
|
42 |
+
rects.append([x1, y1, 2 * radius, 2 * radius])
|
43 |
+
|
44 |
+
return img, rects
|
45 |
+
|
46 |
+
|
47 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pifu-hd
|
2 |
+
human_pose_estimator
|