Spaces:
Runtime error
Runtime error
Chao Xu
commited on
Commit
โข
04f27a5
1
Parent(s):
2337f0d
code pruning and change examples
Browse files- .gitattributes +1 -0
- .gitignore +4 -1
- app.py +71 -105
- demo_examples/00_zero123_lysol.png +3 -0
- demo_examples/01_backpack.png +0 -0
- demo_examples/01_wild_hydrant.png +3 -0
- demo_examples/02_hydrant.png +0 -0
- demo_examples/02_zero123_spyro.png +3 -0
- demo_examples/03_wild2_pineapple_bottle.png +3 -0
- demo_examples/04_robocat.png +0 -0
- demo_examples/04_unsplash_broccoli.png +3 -0
- demo_examples/05_clock.png +0 -0
- demo_examples/05_unsplash_chocolatecake.png +3 -0
- demo_examples/06_kungfucat.png +0 -0
- demo_examples/06_unsplash_stool2.png +3 -0
- demo_examples/07_goose_chef.png +0 -0
- demo_examples/07_objaverse_backpack.png +3 -0
- demo_examples/08_broccoli.png +0 -0
- demo_examples/08_dalle_icecream.png +3 -0
- demo_examples/09_objaverse_barrel.png +3 -0
- demo_examples/09_pineapple_bottle.png +0 -0
- demo_examples/10_GSO_Crosley_Alarm_Clock_Vintage_Metal.png +3 -0
- demo_examples/10_chocolatecake.png +0 -0
- demo_examples/11_extinguisher.png +0 -0
- demo_examples/11_realfusion_cactus_1.png +3 -0
- demo_examples/12_realfusion_cherry_1.png +3 -0
- demo_examples/13_dalle_cowbear.png +3 -0
- demo_examples/14_dalle3_gramophone1.png +3 -0
- demo_examples/15_dalle3_mushroom2.png +3 -0
- demo_examples/16_dalle3_blueberryicecream2.png +3 -0
- demo_examples/17_dalle3_rockingchair1.png +3 -0
- demo_examples/18_dalle3_stump1.png +3 -0
- demo_examples/19_objaverse_stool.png +3 -0
- demo_examples/20_objaverse_tank.png +3 -0
- demo_examples/21_unsplash_bigmac.png +3 -0
- demo_examples/22_unsplash_boxtoy.png +3 -0
- demo_examples/23_wild2_yellow_duck.png +3 -0
- demo_examples/24_unsplash_mario.png +3 -0
- demo_examples/25_unsplash_ebicycle2.png +3 -0
- demo_examples/26_unsplash_strawberrycake.png +3 -0
- demo_examples/27_objaverse_robocat.png +3 -0
- demo_examples/28_GSO_Great_Dinos_Triceratops_Toy.png +3 -0
- demo_examples/29_wild_goose_chef.png +3 -0
- demo_examples/30_wild_peroxide.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1 +1,4 @@
|
|
1 |
-
weights/
|
|
|
|
|
|
|
|
1 |
+
weights/
|
2 |
+
data/
|
3 |
+
*.ipynb
|
4 |
+
demo_examples_*
|
app.py
CHANGED
@@ -1,8 +1,3 @@
|
|
1 |
-
'''
|
2 |
-
conda activate zero123
|
3 |
-
cd stable-diffusion
|
4 |
-
python gradio_new.py 0
|
5 |
-
'''
|
6 |
import os, sys
|
7 |
from huggingface_hub import snapshot_download
|
8 |
|
@@ -66,6 +61,71 @@ _DONE = "Done! Mesh is shown on the right. <br> If it is not satisfactory, pleas
|
|
66 |
_REGEN_1 = "Selected view(s) are regenerated. You can click **Regenerate nearby views and mesh**. <br> Alternatively, if the regenerated view(s) are still not satisfactory, you can repeat the previous step (select the view and regenerate)."
|
67 |
_REGEN_2 = "Regeneration done. Mesh is shown on the right."
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
class CameraVisualizer:
|
70 |
def __init__(self, gradio_plot):
|
71 |
self._gradio_plot = gradio_plot
|
@@ -77,18 +137,6 @@ class CameraVisualizer:
|
|
77 |
self._8bit_image = None
|
78 |
self._image_colorscale = None
|
79 |
|
80 |
-
def polar_change(self, value):
|
81 |
-
self._polar = value
|
82 |
-
# return self.update_figure()
|
83 |
-
|
84 |
-
def azimuth_change(self, value):
|
85 |
-
self._azimuth = value
|
86 |
-
# return self.update_figure()
|
87 |
-
|
88 |
-
def radius_change(self, value):
|
89 |
-
self._radius = value
|
90 |
-
# return self.update_figure()
|
91 |
-
|
92 |
def encode_image(self, raw_image, elev=90):
|
93 |
'''
|
94 |
:param raw_image (H, W, 3) array of uint8 in [0, 255].
|
@@ -261,7 +309,7 @@ def stage1_run(models, device, cam_vis, tmp_dir,
|
|
261 |
os.makedirs(stage1_dir, exist_ok=True)
|
262 |
output_ims = predict_stage1_gradio(model, input_im, save_path=stage1_dir, adjust_set=list(range(4)), device=device, ddim_steps=ddim_steps, scale=scale)
|
263 |
stage2_steps = 50 # ddim_steps
|
264 |
-
zero123_infer(
|
265 |
elev_output = estimate_elev(tmp_dir)
|
266 |
gen_poses(tmp_dir, elev_output)
|
267 |
show_in_im1 = np.asarray(input_im, dtype=np.uint8)
|
@@ -277,8 +325,6 @@ def stage1_run(models, device, cam_vis, tmp_dir,
|
|
277 |
return (90-elev_output, new_fig, *output_ims, *output_ims_2)
|
278 |
else:
|
279 |
rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
|
280 |
-
# elev_output = estimate_elev(tmp_dir)
|
281 |
-
# if elev_output > 75:
|
282 |
if 90-int(elev["label"]) > 75:
|
283 |
rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
|
284 |
else:
|
@@ -297,7 +343,6 @@ def stage1_run(models, device, cam_vis, tmp_dir,
|
|
297 |
|
298 |
def stage2_run(models, device, tmp_dir,
|
299 |
elev, scale, rerun_all=[], stage2_steps=50):
|
300 |
-
# print("elev", elev)
|
301 |
flag_lower_cam = 90-int(elev["label"]) <= 75
|
302 |
is_rerun = True if rerun_all else False
|
303 |
model = models['turncam'].half()
|
@@ -308,7 +353,7 @@ def stage2_run(models, device, tmp_dir,
|
|
308 |
zero123_infer(model, tmp_dir, indices=list(range(1,4))+list(range(8,12)), device=device, ddim_steps=stage2_steps, scale=scale)
|
309 |
else:
|
310 |
print("rerun_idx", rerun_all)
|
311 |
-
zero123_infer(
|
312 |
|
313 |
dataset = tmp_dir
|
314 |
main_dir_path = os.path.dirname(os.path.abspath(
|
@@ -321,7 +366,7 @@ def stage2_run(models, device, tmp_dir,
|
|
321 |
os.system(bash_script)
|
322 |
os.chdir(main_dir_path)
|
323 |
|
324 |
-
ply_path = os.path.join(tmp_dir, f"meshes_val_bg/lod0/
|
325 |
mesh_path = os.path.join(tmp_dir, "mesh.obj")
|
326 |
# Read the textured mesh from .ply file
|
327 |
mesh = trimesh.load_mesh(ply_path)
|
@@ -382,77 +427,6 @@ def preprocess_run(predictor, models, raw_im, preprocess, *bbox_sliders):
|
|
382 |
torch.cuda.empty_cache()
|
383 |
return input_256
|
384 |
|
385 |
-
def calc_cam_cone_pts_3d(polar_deg, azimuth_deg, radius_m, fov_deg):
|
386 |
-
'''
|
387 |
-
:param polar_deg (float).
|
388 |
-
:param azimuth_deg (float).
|
389 |
-
:param radius_m (float).
|
390 |
-
:param fov_deg (float).
|
391 |
-
:return (5, 3) array of float with (x, y, z).
|
392 |
-
'''
|
393 |
-
polar_rad = np.deg2rad(polar_deg)
|
394 |
-
azimuth_rad = np.deg2rad(azimuth_deg)
|
395 |
-
fov_rad = np.deg2rad(fov_deg)
|
396 |
-
polar_rad = -polar_rad # NOTE: Inverse of how used_x relates to x.
|
397 |
-
|
398 |
-
# Camera pose center:
|
399 |
-
cam_x = radius_m * np.cos(azimuth_rad) * np.cos(polar_rad)
|
400 |
-
cam_y = radius_m * np.sin(azimuth_rad) * np.cos(polar_rad)
|
401 |
-
cam_z = radius_m * np.sin(polar_rad)
|
402 |
-
|
403 |
-
# Obtain four corners of camera frustum, assuming it is looking at origin.
|
404 |
-
# First, obtain camera extrinsics (rotation matrix only):
|
405 |
-
camera_R = np.array([[np.cos(azimuth_rad) * np.cos(polar_rad),
|
406 |
-
-np.sin(azimuth_rad),
|
407 |
-
-np.cos(azimuth_rad) * np.sin(polar_rad)],
|
408 |
-
[np.sin(azimuth_rad) * np.cos(polar_rad),
|
409 |
-
np.cos(azimuth_rad),
|
410 |
-
-np.sin(azimuth_rad) * np.sin(polar_rad)],
|
411 |
-
[np.sin(polar_rad),
|
412 |
-
0.0,
|
413 |
-
np.cos(polar_rad)]])
|
414 |
-
# print('camera_R:', lo(camera_R).v)
|
415 |
-
|
416 |
-
# Multiply by corners in camera space to obtain go to space:
|
417 |
-
corn1 = [-1.0, np.tan(fov_rad / 2.0), np.tan(fov_rad / 2.0)]
|
418 |
-
corn2 = [-1.0, -np.tan(fov_rad / 2.0), np.tan(fov_rad / 2.0)]
|
419 |
-
corn3 = [-1.0, -np.tan(fov_rad / 2.0), -np.tan(fov_rad / 2.0)]
|
420 |
-
corn4 = [-1.0, np.tan(fov_rad / 2.0), -np.tan(fov_rad / 2.0)]
|
421 |
-
corn1 = np.dot(camera_R, corn1)
|
422 |
-
corn2 = np.dot(camera_R, corn2)
|
423 |
-
corn3 = np.dot(camera_R, corn3)
|
424 |
-
corn4 = np.dot(camera_R, corn4)
|
425 |
-
|
426 |
-
# Now attach as offset to actual 3D camera position:
|
427 |
-
corn1 = np.array(corn1) / np.linalg.norm(corn1, ord=2)
|
428 |
-
corn_x1 = cam_x + corn1[0]
|
429 |
-
corn_y1 = cam_y + corn1[1]
|
430 |
-
corn_z1 = cam_z + corn1[2]
|
431 |
-
corn2 = np.array(corn2) / np.linalg.norm(corn2, ord=2)
|
432 |
-
corn_x2 = cam_x + corn2[0]
|
433 |
-
corn_y2 = cam_y + corn2[1]
|
434 |
-
corn_z2 = cam_z + corn2[2]
|
435 |
-
corn3 = np.array(corn3) / np.linalg.norm(corn3, ord=2)
|
436 |
-
corn_x3 = cam_x + corn3[0]
|
437 |
-
corn_y3 = cam_y + corn3[1]
|
438 |
-
corn_z3 = cam_z + corn3[2]
|
439 |
-
corn4 = np.array(corn4) / np.linalg.norm(corn4, ord=2)
|
440 |
-
corn_x4 = cam_x + corn4[0]
|
441 |
-
corn_y4 = cam_y + corn4[1]
|
442 |
-
corn_z4 = cam_z + corn4[2]
|
443 |
-
|
444 |
-
xs = [cam_x, corn_x1, corn_x2, corn_x3, corn_x4]
|
445 |
-
ys = [cam_y, corn_y1, corn_y2, corn_y3, corn_y4]
|
446 |
-
zs = [cam_z, corn_z1, corn_z2, corn_z3, corn_z4]
|
447 |
-
|
448 |
-
return np.array([xs, ys, zs]).T
|
449 |
-
|
450 |
-
def save_bbox(dir, x_min, y_min, x_max, y_max):
|
451 |
-
box = np.array([x_min, y_min, x_max, y_max])
|
452 |
-
# save the box to a file
|
453 |
-
bbox_path = os.path.join(dir, "bbox.txt")
|
454 |
-
np.savetxt(bbox_path, box)
|
455 |
-
|
456 |
def on_coords_slider(image, x_min, y_min, x_max, y_max, color=(88, 191, 131, 255)):
|
457 |
"""Draw a bounding box annotation for an image."""
|
458 |
print("on_coords_slider, drawing bbox...")
|
@@ -464,12 +438,11 @@ def on_coords_slider(image, x_min, y_min, x_max, y_max, color=(88, 191, 131, 255
|
|
464 |
y_min = int(y_min * shrink_ratio)
|
465 |
x_max = int(x_max * shrink_ratio)
|
466 |
y_max = int(y_max * shrink_ratio)
|
467 |
-
print("on_coords_slider, image_size:", np.array(image).shape)
|
468 |
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGBA2BGRA)
|
469 |
image = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color, int(max(max(image.shape) / 400*2, 2)))
|
470 |
return cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) # image[:, :, ::-1]
|
471 |
|
472 |
-
def
|
473 |
image.thumbnail([512, 512], Image.Resampling.LANCZOS)
|
474 |
width, height = image.size
|
475 |
image_rem = image.convert('RGBA')
|
@@ -500,12 +473,6 @@ def run_demo(
|
|
500 |
device_idx=_GPU_INDEX,
|
501 |
ckpt='zero123-xl.ckpt'):
|
502 |
|
503 |
-
print('sys.argv:', sys.argv)
|
504 |
-
if len(sys.argv) > 1:
|
505 |
-
print('old device_idx:', device_idx)
|
506 |
-
device_idx = int(sys.argv[1])
|
507 |
-
print('new device_idx:', device_idx)
|
508 |
-
|
509 |
device = f"cuda:{device_idx}" if torch.cuda.is_available() else "cpu"
|
510 |
models = init_model(device, os.path.join(code_dir, ckpt))
|
511 |
# model = models['turncam']
|
@@ -523,9 +490,8 @@ def run_demo(
|
|
523 |
example_fns.sort()
|
524 |
examples_full = [os.path.join(example_folder, x) for x in example_fns if x.endswith('.png')]
|
525 |
|
526 |
-
|
527 |
# Compose demo layout & data flow.
|
528 |
-
css = "#model-3d-out {height: 400px;} #plot-out {height:
|
529 |
with gr.Blocks(title=_TITLE, css=css) as demo:
|
530 |
gr.Markdown('# ' + _TITLE)
|
531 |
gr.Markdown(_DESCRIPTION)
|
@@ -625,7 +591,7 @@ def run_demo(
|
|
625 |
).success(disable_func, inputs=run_btn, outputs=run_btn
|
626 |
).success(fn=tmp_func, inputs=[image_block], outputs=[placeholder]
|
627 |
).success(fn=partial(update_guide, _BBOX_1), outputs=[guide_text]
|
628 |
-
).success(fn=
|
629 |
inputs=[image_block],
|
630 |
outputs=[bbox_block, *bbox_sliders]
|
631 |
).success(fn=partial(update_guide, _BBOX_3), outputs=[guide_text]
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os, sys
|
2 |
from huggingface_hub import snapshot_download
|
3 |
|
|
|
61 |
_REGEN_1 = "Selected view(s) are regenerated. You can click **Regenerate nearby views and mesh**. <br> Alternatively, if the regenerated view(s) are still not satisfactory, you can repeat the previous step (select the view and regenerate)."
|
62 |
_REGEN_2 = "Regeneration done. Mesh is shown on the right."
|
63 |
|
64 |
+
|
65 |
+
def calc_cam_cone_pts_3d(polar_deg, azimuth_deg, radius_m, fov_deg):
|
66 |
+
'''
|
67 |
+
:param polar_deg (float).
|
68 |
+
:param azimuth_deg (float).
|
69 |
+
:param radius_m (float).
|
70 |
+
:param fov_deg (float).
|
71 |
+
:return (5, 3) array of float with (x, y, z).
|
72 |
+
'''
|
73 |
+
polar_rad = np.deg2rad(polar_deg)
|
74 |
+
azimuth_rad = np.deg2rad(azimuth_deg)
|
75 |
+
fov_rad = np.deg2rad(fov_deg)
|
76 |
+
polar_rad = -polar_rad # NOTE: Inverse of how used_x relates to x.
|
77 |
+
|
78 |
+
# Camera pose center:
|
79 |
+
cam_x = radius_m * np.cos(azimuth_rad) * np.cos(polar_rad)
|
80 |
+
cam_y = radius_m * np.sin(azimuth_rad) * np.cos(polar_rad)
|
81 |
+
cam_z = radius_m * np.sin(polar_rad)
|
82 |
+
|
83 |
+
# Obtain four corners of camera frustum, assuming it is looking at origin.
|
84 |
+
# First, obtain camera extrinsics (rotation matrix only):
|
85 |
+
camera_R = np.array([[np.cos(azimuth_rad) * np.cos(polar_rad),
|
86 |
+
-np.sin(azimuth_rad),
|
87 |
+
-np.cos(azimuth_rad) * np.sin(polar_rad)],
|
88 |
+
[np.sin(azimuth_rad) * np.cos(polar_rad),
|
89 |
+
np.cos(azimuth_rad),
|
90 |
+
-np.sin(azimuth_rad) * np.sin(polar_rad)],
|
91 |
+
[np.sin(polar_rad),
|
92 |
+
0.0,
|
93 |
+
np.cos(polar_rad)]])
|
94 |
+
|
95 |
+
# Multiply by corners in camera space to obtain go to space:
|
96 |
+
corn1 = [-1.0, np.tan(fov_rad / 2.0), np.tan(fov_rad / 2.0)]
|
97 |
+
corn2 = [-1.0, -np.tan(fov_rad / 2.0), np.tan(fov_rad / 2.0)]
|
98 |
+
corn3 = [-1.0, -np.tan(fov_rad / 2.0), -np.tan(fov_rad / 2.0)]
|
99 |
+
corn4 = [-1.0, np.tan(fov_rad / 2.0), -np.tan(fov_rad / 2.0)]
|
100 |
+
corn1 = np.dot(camera_R, corn1)
|
101 |
+
corn2 = np.dot(camera_R, corn2)
|
102 |
+
corn3 = np.dot(camera_R, corn3)
|
103 |
+
corn4 = np.dot(camera_R, corn4)
|
104 |
+
|
105 |
+
# Now attach as offset to actual 3D camera position:
|
106 |
+
corn1 = np.array(corn1) / np.linalg.norm(corn1, ord=2)
|
107 |
+
corn_x1 = cam_x + corn1[0]
|
108 |
+
corn_y1 = cam_y + corn1[1]
|
109 |
+
corn_z1 = cam_z + corn1[2]
|
110 |
+
corn2 = np.array(corn2) / np.linalg.norm(corn2, ord=2)
|
111 |
+
corn_x2 = cam_x + corn2[0]
|
112 |
+
corn_y2 = cam_y + corn2[1]
|
113 |
+
corn_z2 = cam_z + corn2[2]
|
114 |
+
corn3 = np.array(corn3) / np.linalg.norm(corn3, ord=2)
|
115 |
+
corn_x3 = cam_x + corn3[0]
|
116 |
+
corn_y3 = cam_y + corn3[1]
|
117 |
+
corn_z3 = cam_z + corn3[2]
|
118 |
+
corn4 = np.array(corn4) / np.linalg.norm(corn4, ord=2)
|
119 |
+
corn_x4 = cam_x + corn4[0]
|
120 |
+
corn_y4 = cam_y + corn4[1]
|
121 |
+
corn_z4 = cam_z + corn4[2]
|
122 |
+
|
123 |
+
xs = [cam_x, corn_x1, corn_x2, corn_x3, corn_x4]
|
124 |
+
ys = [cam_y, corn_y1, corn_y2, corn_y3, corn_y4]
|
125 |
+
zs = [cam_z, corn_z1, corn_z2, corn_z3, corn_z4]
|
126 |
+
|
127 |
+
return np.array([xs, ys, zs]).T
|
128 |
+
|
129 |
class CameraVisualizer:
|
130 |
def __init__(self, gradio_plot):
|
131 |
self._gradio_plot = gradio_plot
|
|
|
137 |
self._8bit_image = None
|
138 |
self._image_colorscale = None
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
def encode_image(self, raw_image, elev=90):
|
141 |
'''
|
142 |
:param raw_image (H, W, 3) array of uint8 in [0, 255].
|
|
|
309 |
os.makedirs(stage1_dir, exist_ok=True)
|
310 |
output_ims = predict_stage1_gradio(model, input_im, save_path=stage1_dir, adjust_set=list(range(4)), device=device, ddim_steps=ddim_steps, scale=scale)
|
311 |
stage2_steps = 50 # ddim_steps
|
312 |
+
zero123_infer(model, tmp_dir, indices=[0], device=device, ddim_steps=stage2_steps, scale=scale)
|
313 |
elev_output = estimate_elev(tmp_dir)
|
314 |
gen_poses(tmp_dir, elev_output)
|
315 |
show_in_im1 = np.asarray(input_im, dtype=np.uint8)
|
|
|
325 |
return (90-elev_output, new_fig, *output_ims, *output_ims_2)
|
326 |
else:
|
327 |
rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
|
|
|
|
|
328 |
if 90-int(elev["label"]) > 75:
|
329 |
rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
|
330 |
else:
|
|
|
343 |
|
344 |
def stage2_run(models, device, tmp_dir,
|
345 |
elev, scale, rerun_all=[], stage2_steps=50):
|
|
|
346 |
flag_lower_cam = 90-int(elev["label"]) <= 75
|
347 |
is_rerun = True if rerun_all else False
|
348 |
model = models['turncam'].half()
|
|
|
353 |
zero123_infer(model, tmp_dir, indices=list(range(1,4))+list(range(8,12)), device=device, ddim_steps=stage2_steps, scale=scale)
|
354 |
else:
|
355 |
print("rerun_idx", rerun_all)
|
356 |
+
zero123_infer(model, tmp_dir, indices=rerun_all, device=device, ddim_steps=stage2_steps, scale=scale)
|
357 |
|
358 |
dataset = tmp_dir
|
359 |
main_dir_path = os.path.dirname(os.path.abspath(
|
|
|
366 |
os.system(bash_script)
|
367 |
os.chdir(main_dir_path)
|
368 |
|
369 |
+
ply_path = os.path.join(tmp_dir, f"meshes_val_bg/lod0/mesh_00215000_gradio_lod0.ply")
|
370 |
mesh_path = os.path.join(tmp_dir, "mesh.obj")
|
371 |
# Read the textured mesh from .ply file
|
372 |
mesh = trimesh.load_mesh(ply_path)
|
|
|
427 |
torch.cuda.empty_cache()
|
428 |
return input_256
|
429 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
430 |
def on_coords_slider(image, x_min, y_min, x_max, y_max, color=(88, 191, 131, 255)):
|
431 |
"""Draw a bounding box annotation for an image."""
|
432 |
print("on_coords_slider, drawing bbox...")
|
|
|
438 |
y_min = int(y_min * shrink_ratio)
|
439 |
x_max = int(x_max * shrink_ratio)
|
440 |
y_max = int(y_max * shrink_ratio)
|
|
|
441 |
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGBA2BGRA)
|
442 |
image = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color, int(max(max(image.shape) / 400*2, 2)))
|
443 |
return cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) # image[:, :, ::-1]
|
444 |
|
445 |
+
def init_bbox(image):
|
446 |
image.thumbnail([512, 512], Image.Resampling.LANCZOS)
|
447 |
width, height = image.size
|
448 |
image_rem = image.convert('RGBA')
|
|
|
473 |
device_idx=_GPU_INDEX,
|
474 |
ckpt='zero123-xl.ckpt'):
|
475 |
|
|
|
|
|
|
|
|
|
|
|
|
|
476 |
device = f"cuda:{device_idx}" if torch.cuda.is_available() else "cpu"
|
477 |
models = init_model(device, os.path.join(code_dir, ckpt))
|
478 |
# model = models['turncam']
|
|
|
490 |
example_fns.sort()
|
491 |
examples_full = [os.path.join(example_folder, x) for x in example_fns if x.endswith('.png')]
|
492 |
|
|
|
493 |
# Compose demo layout & data flow.
|
494 |
+
css = "#model-3d-out {height: 400px;} #plot-out {height: 450px;}"
|
495 |
with gr.Blocks(title=_TITLE, css=css) as demo:
|
496 |
gr.Markdown('# ' + _TITLE)
|
497 |
gr.Markdown(_DESCRIPTION)
|
|
|
591 |
).success(disable_func, inputs=run_btn, outputs=run_btn
|
592 |
).success(fn=tmp_func, inputs=[image_block], outputs=[placeholder]
|
593 |
).success(fn=partial(update_guide, _BBOX_1), outputs=[guide_text]
|
594 |
+
).success(fn=init_bbox,
|
595 |
inputs=[image_block],
|
596 |
outputs=[bbox_block, *bbox_sliders]
|
597 |
).success(fn=partial(update_guide, _BBOX_3), outputs=[guide_text]
|
demo_examples/00_zero123_lysol.png
ADDED
Git LFS Details
|
demo_examples/01_backpack.png
DELETED
Binary file (39.6 kB)
|
|
demo_examples/01_wild_hydrant.png
ADDED
Git LFS Details
|
demo_examples/02_hydrant.png
DELETED
Binary file (125 kB)
|
|
demo_examples/02_zero123_spyro.png
ADDED
Git LFS Details
|
demo_examples/03_wild2_pineapple_bottle.png
ADDED
Git LFS Details
|
demo_examples/04_robocat.png
DELETED
Binary file (35.3 kB)
|
|
demo_examples/04_unsplash_broccoli.png
ADDED
Git LFS Details
|
demo_examples/05_clock.png
DELETED
Binary file (74.5 kB)
|
|
demo_examples/05_unsplash_chocolatecake.png
ADDED
Git LFS Details
|
demo_examples/06_kungfucat.png
DELETED
Binary file (55.1 kB)
|
|
demo_examples/06_unsplash_stool2.png
ADDED
Git LFS Details
|
demo_examples/07_goose_chef.png
DELETED
Binary file (78.6 kB)
|
|
demo_examples/07_objaverse_backpack.png
ADDED
Git LFS Details
|
demo_examples/08_broccoli.png
DELETED
Binary file (80.7 kB)
|
|
demo_examples/08_dalle_icecream.png
ADDED
Git LFS Details
|
demo_examples/09_objaverse_barrel.png
ADDED
Git LFS Details
|
demo_examples/09_pineapple_bottle.png
DELETED
Binary file (117 kB)
|
|
demo_examples/10_GSO_Crosley_Alarm_Clock_Vintage_Metal.png
ADDED
Git LFS Details
|
demo_examples/10_chocolatecake.png
DELETED
Binary file (59 kB)
|
|
demo_examples/11_extinguisher.png
DELETED
Binary file (42.6 kB)
|
|
demo_examples/11_realfusion_cactus_1.png
ADDED
Git LFS Details
|
demo_examples/12_realfusion_cherry_1.png
ADDED
Git LFS Details
|
demo_examples/13_dalle_cowbear.png
ADDED
Git LFS Details
|
demo_examples/14_dalle3_gramophone1.png
ADDED
Git LFS Details
|
demo_examples/15_dalle3_mushroom2.png
ADDED
Git LFS Details
|
demo_examples/16_dalle3_blueberryicecream2.png
ADDED
Git LFS Details
|
demo_examples/17_dalle3_rockingchair1.png
ADDED
Git LFS Details
|
demo_examples/18_dalle3_stump1.png
ADDED
Git LFS Details
|
demo_examples/19_objaverse_stool.png
ADDED
Git LFS Details
|
demo_examples/20_objaverse_tank.png
ADDED
Git LFS Details
|
demo_examples/21_unsplash_bigmac.png
ADDED
Git LFS Details
|
demo_examples/22_unsplash_boxtoy.png
ADDED
Git LFS Details
|
demo_examples/23_wild2_yellow_duck.png
ADDED
Git LFS Details
|
demo_examples/24_unsplash_mario.png
ADDED
Git LFS Details
|
demo_examples/25_unsplash_ebicycle2.png
ADDED
Git LFS Details
|
demo_examples/26_unsplash_strawberrycake.png
ADDED
Git LFS Details
|
demo_examples/27_objaverse_robocat.png
ADDED
Git LFS Details
|
demo_examples/28_GSO_Great_Dinos_Triceratops_Toy.png
ADDED
Git LFS Details
|
demo_examples/29_wild_goose_chef.png
ADDED
Git LFS Details
|
demo_examples/30_wild_peroxide.png
ADDED
Git LFS Details
|