Update pipeline.py
Browse files- pipeline.py +2 -0
pipeline.py
CHANGED
@@ -343,9 +343,11 @@ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
|
|
343 |
if image is None:
|
344 |
raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
|
345 |
assert not isinstance(image, torch.Tensor)
|
|
|
346 |
image_1 = self.feature_extractor_vae(images=image, return_tensors="pt").pixel_values
|
347 |
image_2 = self.feature_extractor_clip(images=image, return_tensors="pt").pixel_values
|
348 |
if depth_image is not None and hasattr(self.unet, "controlnet"):
|
|
|
349 |
depth_image = self.depth_transforms_multi(depth_image).to(
|
350 |
device=self.unet.controlnet.device, dtype=self.unet.controlnet.dtype
|
351 |
)
|
|
|
343 |
if image is None:
|
344 |
raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
|
345 |
assert not isinstance(image, torch.Tensor)
|
346 |
+
image = to_rgb_image(image)
|
347 |
image_1 = self.feature_extractor_vae(images=image, return_tensors="pt").pixel_values
|
348 |
image_2 = self.feature_extractor_clip(images=image, return_tensors="pt").pixel_values
|
349 |
if depth_image is not None and hasattr(self.unet, "controlnet"):
|
350 |
+
depth_image = to_rgb_image(depth_image)
|
351 |
depth_image = self.depth_transforms_multi(depth_image).to(
|
352 |
device=self.unet.controlnet.device, dtype=self.unet.controlnet.dtype
|
353 |
)
|