Spaces:
Running
on
Zero
Running
on
Zero
bluestyle97
commited on
Update freesplatter/webui/runner.py
Browse files- freesplatter/webui/runner.py +6 -57
freesplatter/webui/runner.py
CHANGED
@@ -4,9 +4,9 @@ import uuid
|
|
4 |
import time
|
5 |
import rembg
|
6 |
import numpy as np
|
|
|
7 |
import torch
|
8 |
import fpsample
|
9 |
-
import fast_simplification
|
10 |
import matplotlib.pyplot as plt
|
11 |
cmap = plt.get_cmap("hsv")
|
12 |
from torchvision.transforms import v2
|
@@ -22,8 +22,7 @@ from transformers import AutoModelForImageSegmentation
|
|
22 |
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
|
23 |
|
24 |
from freesplatter.hunyuan.hunyuan3d_mvd_std_pipeline import HunYuan3D_MVD_Std_Pipeline
|
25 |
-
from freesplatter.utils.
|
26 |
-
from freesplatter.utils.mesh_renderer import MeshRenderer
|
27 |
from freesplatter.utils.camera_util import *
|
28 |
from freesplatter.utils.recon_util import *
|
29 |
from freesplatter.utils.infer_util import *
|
@@ -78,7 +77,7 @@ class FreeSplatterRunner:
|
|
78 |
self.rembg = AutoModelForImageSegmentation.from_pretrained(
|
79 |
"ZhengPeng7/BiRefNet",
|
80 |
trust_remote_code=True,
|
81 |
-
)
|
82 |
self.rembg.eval()
|
83 |
# self.rembg = rembg.new_session('birefnet-general')
|
84 |
|
@@ -142,14 +141,6 @@ class FreeSplatterRunner:
|
|
142 |
model.load_state_dict(state_dict, strict=True)
|
143 |
self.freesplatter_scene = model.eval().to(device)
|
144 |
|
145 |
-
# mesh optimizer
|
146 |
-
# self.mesh_renderer = MeshRenderer(
|
147 |
-
# near=0.01,
|
148 |
-
# far=100,
|
149 |
-
# ssaa=1,
|
150 |
-
# texture_filter='linear-mipmap-linear',
|
151 |
-
# device=device).to(device)
|
152 |
-
|
153 |
@torch.inference_mode()
|
154 |
def run_segmentation(
|
155 |
self,
|
@@ -379,50 +370,6 @@ class FreeSplatterRunner:
|
|
379 |
t4 = time.time()
|
380 |
|
381 |
# optimize texture
|
382 |
-
# cam_pos = c2ws_fusion[:, :3, 3].cpu().numpy()
|
383 |
-
# cam_inds = torch.from_numpy(fpsample.fps_sampling(cam_pos, 16).astype(int)).to(device=device)
|
384 |
-
|
385 |
-
# alphas_bake = alphas_fusion[cam_inds]
|
386 |
-
# images_bake = (images_fusion[cam_inds] - (1 - alphas_bake)) / alphas_bake.clamp(min=1e-6)
|
387 |
-
|
388 |
-
# out_mesh = Mesh.load(str(mesh_path), auto_uv=False, device='cpu')
|
389 |
-
# max_faces = 50000
|
390 |
-
# mesh_reduction = max(1 - max_faces / out_mesh.f.shape[0], mesh_reduction)
|
391 |
-
# mesh_verts_, mesh_faces_ = fast_simplification.simplify(
|
392 |
-
# out_mesh.v.numpy(), out_mesh.f.numpy(), target_reduction=mesh_reduction)
|
393 |
-
# mesh_verts = out_mesh.v.new_tensor(mesh_verts_, dtype=torch.float32).requires_grad_(False)
|
394 |
-
# mesh_faces = out_mesh.f.new_tensor(mesh_faces_).requires_grad_(False)
|
395 |
-
# out_mesh = Mesh(v=mesh_verts, f=mesh_faces)
|
396 |
-
# out_mesh.auto_normal()
|
397 |
-
# out_mesh.auto_uv()
|
398 |
-
# out_mesh = out_mesh.to(device)
|
399 |
-
|
400 |
-
# intrinsics = fxfycxcy_fusion[0:1].clone()
|
401 |
-
# intrinsics[..., [0, 2]] *= images_bake.shape[-2]
|
402 |
-
# intrinsics[..., [1, 3]] *= images_bake.shape[-3]
|
403 |
-
|
404 |
-
# out_mesh = self.mesh_renderer.bake_multiview(
|
405 |
-
# [out_mesh],
|
406 |
-
# images_bake.unsqueeze(0),
|
407 |
-
# alphas_bake.unsqueeze(0),
|
408 |
-
# c2ws_fusion[cam_inds].unsqueeze(0),
|
409 |
-
# intrinsics.unsqueeze(0),
|
410 |
-
# )[0]
|
411 |
-
# mesh_fine_path = os.path.join(self.output_dir, 'mesh.glb')
|
412 |
-
# # align mesh orientation
|
413 |
-
# out_mesh.v = out_mesh.v.clone()
|
414 |
-
# out_mesh.vn = out_mesh.vn.clone()
|
415 |
-
# out_mesh.v[..., 0] = -out_mesh.v[..., 0]
|
416 |
-
# out_mesh.vn[..., 0] = -out_mesh.vn[..., 0]
|
417 |
-
# out_mesh.v[..., [1, 2]] = out_mesh.v[..., [2, 1]]
|
418 |
-
# out_mesh.vn[..., [1, 2]] = out_mesh.vn[..., [2, 1]]
|
419 |
-
|
420 |
-
# out_mesh.write(mesh_fine_path, flip_yz=False)
|
421 |
-
# print(f"Save optimized mesh at {mesh_fine_path}")
|
422 |
-
# t5 = time.time()
|
423 |
-
|
424 |
-
# optimize texture
|
425 |
-
from freesplatter.utils.mesh_optim import optimize_mesh
|
426 |
cam_pos = c2ws_fusion[:, :3, 3].cpu().numpy()
|
427 |
cam_inds = torch.from_numpy(fpsample.fps_sampling(cam_pos, 16).astype(int)).to(device=device)
|
428 |
|
@@ -436,7 +383,7 @@ class FreeSplatterRunner:
|
|
436 |
intrinsics[:, 1, 1] = fxfycxcy[:, 1]
|
437 |
intrinsics[:, 1, 2] = fxfycxcy[:, 3]
|
438 |
|
439 |
-
out_mesh =
|
440 |
out_mesh = optimize_mesh(
|
441 |
out_mesh,
|
442 |
images_bake,
|
@@ -467,6 +414,7 @@ class FreeSplatterRunner:
|
|
467 |
focal_length,
|
468 |
legends=None,
|
469 |
):
|
|
|
470 |
images = (images.permute(0, 2, 3, 1).detach().cpu().numpy() * 255).astype(np.uint8)
|
471 |
|
472 |
cam2world = create_camera_to_world(torch.tensor([0, -2, 0]), camera_system='opencv').to(c2ws)
|
@@ -596,6 +544,7 @@ class FreeSplatterRunner:
|
|
596 |
focal_length,
|
597 |
legends=None,
|
598 |
):
|
|
|
599 |
images = (images.permute(0, 2, 3, 1).detach().cpu().numpy() * 255).astype(np.uint8)
|
600 |
|
601 |
c2ws = c2ws.detach().cpu().numpy()
|
|
|
4 |
import time
|
5 |
import rembg
|
6 |
import numpy as np
|
7 |
+
import trimesh
|
8 |
import torch
|
9 |
import fpsample
|
|
|
10 |
import matplotlib.pyplot as plt
|
11 |
cmap = plt.get_cmap("hsv")
|
12 |
from torchvision.transforms import v2
|
|
|
22 |
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
|
23 |
|
24 |
from freesplatter.hunyuan.hunyuan3d_mvd_std_pipeline import HunYuan3D_MVD_Std_Pipeline
|
25 |
+
from freesplatter.utils.mesh_optim import optimize_mesh
|
|
|
26 |
from freesplatter.utils.camera_util import *
|
27 |
from freesplatter.utils.recon_util import *
|
28 |
from freesplatter.utils.infer_util import *
|
|
|
77 |
self.rembg = AutoModelForImageSegmentation.from_pretrained(
|
78 |
"ZhengPeng7/BiRefNet",
|
79 |
trust_remote_code=True,
|
80 |
+
).to(device)
|
81 |
self.rembg.eval()
|
82 |
# self.rembg = rembg.new_session('birefnet-general')
|
83 |
|
|
|
141 |
model.load_state_dict(state_dict, strict=True)
|
142 |
self.freesplatter_scene = model.eval().to(device)
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
@torch.inference_mode()
|
145 |
def run_segmentation(
|
146 |
self,
|
|
|
370 |
t4 = time.time()
|
371 |
|
372 |
# optimize texture
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
373 |
cam_pos = c2ws_fusion[:, :3, 3].cpu().numpy()
|
374 |
cam_inds = torch.from_numpy(fpsample.fps_sampling(cam_pos, 16).astype(int)).to(device=device)
|
375 |
|
|
|
383 |
intrinsics[:, 1, 1] = fxfycxcy[:, 1]
|
384 |
intrinsics[:, 1, 2] = fxfycxcy[:, 3]
|
385 |
|
386 |
+
out_mesh = trimesh.load(str(mesh_path), process=False)
|
387 |
out_mesh = optimize_mesh(
|
388 |
out_mesh,
|
389 |
images_bake,
|
|
|
414 |
focal_length,
|
415 |
legends=None,
|
416 |
):
|
417 |
+
images = v2.functional.resize(images, 128, interpolation=3, antialias=True).clamp(0, 1)
|
418 |
images = (images.permute(0, 2, 3, 1).detach().cpu().numpy() * 255).astype(np.uint8)
|
419 |
|
420 |
cam2world = create_camera_to_world(torch.tensor([0, -2, 0]), camera_system='opencv').to(c2ws)
|
|
|
544 |
focal_length,
|
545 |
legends=None,
|
546 |
):
|
547 |
+
images = v2.functional.resize(images, 128, interpolation=3, antialias=True).clamp(0, 1)
|
548 |
images = (images.permute(0, 2, 3, 1).detach().cpu().numpy() * 255).astype(np.uint8)
|
549 |
|
550 |
c2ws = c2ws.detach().cpu().numpy()
|