Spaces:
No application file
No application file
# coding=utf-8 | |
# Copyright 2023 Harutatsu Akiyama, Jinbin Bai, and HuggingFace Inc. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import random | |
import unittest | |
import numpy as np | |
import torch | |
from PIL import Image | |
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer | |
from diffusers import ( | |
AutoencoderKL, | |
ControlNetModel, | |
EulerDiscreteScheduler, | |
StableDiffusionXLControlNetInpaintPipeline, | |
UNet2DConditionModel, | |
) | |
from diffusers.utils.import_utils import is_xformers_available | |
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device | |
from ..pipeline_params import ( | |
IMAGE_TO_IMAGE_IMAGE_PARAMS, | |
TEXT_TO_IMAGE_BATCH_PARAMS, | |
TEXT_TO_IMAGE_IMAGE_PARAMS, | |
TEXT_TO_IMAGE_PARAMS, | |
) | |
from ..test_pipelines_common import ( | |
PipelineKarrasSchedulerTesterMixin, | |
PipelineLatentTesterMixin, | |
PipelineTesterMixin, | |
) | |
enable_full_determinism() | |
class ControlNetPipelineSDXLFastTests( | |
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase | |
): | |
pipeline_class = StableDiffusionXLControlNetInpaintPipeline | |
params = TEXT_TO_IMAGE_PARAMS | |
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS | |
image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) | |
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS | |
def get_dummy_components(self): | |
unet = UNet2DConditionModel( | |
block_out_channels=(32, 64), | |
layers_per_block=2, | |
sample_size=32, | |
in_channels=4, | |
out_channels=4, | |
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), | |
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), | |
# SD2-specific config below | |
attention_head_dim=(2, 4), | |
use_linear_projection=True, | |
addition_embed_type="text_time", | |
addition_time_embed_dim=8, | |
transformer_layers_per_block=(1, 2), | |
projection_class_embeddings_input_dim=80, # 6 * 8 + 32 | |
cross_attention_dim=64, | |
) | |
controlnet = ControlNetModel( | |
block_out_channels=(32, 64), | |
layers_per_block=2, | |
in_channels=4, | |
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), | |
conditioning_embedding_out_channels=(16, 32), | |
# SD2-specific config below | |
attention_head_dim=(2, 4), | |
use_linear_projection=True, | |
addition_embed_type="text_time", | |
addition_time_embed_dim=8, | |
transformer_layers_per_block=(1, 2), | |
projection_class_embeddings_input_dim=80, # 6 * 8 + 32 | |
cross_attention_dim=64, | |
) | |
scheduler = EulerDiscreteScheduler( | |
beta_start=0.00085, | |
beta_end=0.012, | |
steps_offset=1, | |
beta_schedule="scaled_linear", | |
timestep_spacing="leading", | |
) | |
torch.manual_seed(0) | |
vae = AutoencoderKL( | |
block_out_channels=[32, 64], | |
in_channels=3, | |
out_channels=3, | |
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], | |
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], | |
latent_channels=4, | |
) | |
torch.manual_seed(0) | |
text_encoder_config = CLIPTextConfig( | |
bos_token_id=0, | |
eos_token_id=2, | |
hidden_size=32, | |
intermediate_size=37, | |
layer_norm_eps=1e-05, | |
num_attention_heads=4, | |
num_hidden_layers=5, | |
pad_token_id=1, | |
vocab_size=1000, | |
# SD2-specific config below | |
hidden_act="gelu", | |
projection_dim=32, | |
) | |
text_encoder = CLIPTextModel(text_encoder_config) | |
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") | |
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) | |
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") | |
components = { | |
"unet": unet, | |
"controlnet": controlnet, | |
"scheduler": scheduler, | |
"vae": vae, | |
"text_encoder": text_encoder, | |
"tokenizer": tokenizer, | |
"text_encoder_2": text_encoder_2, | |
"tokenizer_2": tokenizer_2, | |
} | |
return components | |
def get_dummy_inputs(self, device, seed=0, img_res=64): | |
if str(device).startswith("mps"): | |
generator = torch.manual_seed(seed) | |
else: | |
generator = torch.Generator(device=device).manual_seed(seed) | |
# Get random floats in [0, 1] as image | |
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) | |
image = image.cpu().permute(0, 2, 3, 1)[0] | |
mask_image = torch.ones_like(image) | |
controlnet_embedder_scale_factor = 2 | |
control_image = ( | |
floats_tensor( | |
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), | |
rng=random.Random(seed), | |
) | |
.to(device) | |
.cpu() | |
) | |
control_image = control_image.cpu().permute(0, 2, 3, 1)[0] | |
# Convert image and mask_image to [0, 255] | |
image = 255 * image | |
mask_image = 255 * mask_image | |
control_image = 255 * control_image | |
# Convert to PIL image | |
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) | |
mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) | |
control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) | |
inputs = { | |
"prompt": "A painting of a squirrel eating a burger", | |
"generator": generator, | |
"num_inference_steps": 2, | |
"guidance_scale": 6.0, | |
"output_type": "numpy", | |
"image": init_image, | |
"mask_image": mask_image, | |
"control_image": control_image, | |
} | |
return inputs | |
def test_attention_slicing_forward_pass(self): | |
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) | |
def test_xformers_attention_forwardGenerator_pass(self): | |
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) | |
def test_inference_batch_single_identical(self): | |
self._test_inference_batch_single_identical(expected_max_diff=2e-3) | |
def test_stable_diffusion_xl_offloads(self): | |
pipes = [] | |
components = self.get_dummy_components() | |
sd_pipe = self.pipeline_class(**components).to(torch_device) | |
pipes.append(sd_pipe) | |
components = self.get_dummy_components() | |
sd_pipe = self.pipeline_class(**components) | |
sd_pipe.enable_model_cpu_offload() | |
pipes.append(sd_pipe) | |
components = self.get_dummy_components() | |
sd_pipe = self.pipeline_class(**components) | |
sd_pipe.enable_sequential_cpu_offload() | |
pipes.append(sd_pipe) | |
image_slices = [] | |
for pipe in pipes: | |
pipe.unet.set_default_attn_processor() | |
inputs = self.get_dummy_inputs(torch_device) | |
image = pipe(**inputs).images | |
image_slices.append(image[0, -3:, -3:, -1].flatten()) | |
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 | |
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 | |
def test_stable_diffusion_xl_multi_prompts(self): | |
components = self.get_dummy_components() | |
sd_pipe = self.pipeline_class(**components).to(torch_device) | |
# forward with single prompt | |
inputs = self.get_dummy_inputs(torch_device) | |
output = sd_pipe(**inputs) | |
image_slice_1 = output.images[0, -3:, -3:, -1] | |
# forward with same prompt duplicated | |
inputs = self.get_dummy_inputs(torch_device) | |
inputs["prompt_2"] = inputs["prompt"] | |
output = sd_pipe(**inputs) | |
image_slice_2 = output.images[0, -3:, -3:, -1] | |
# ensure the results are equal | |
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 | |
# forward with different prompt | |
inputs = self.get_dummy_inputs(torch_device) | |
inputs["prompt_2"] = "different prompt" | |
output = sd_pipe(**inputs) | |
image_slice_3 = output.images[0, -3:, -3:, -1] | |
# ensure the results are not equal | |
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 | |
# manually set a negative_prompt | |
inputs = self.get_dummy_inputs(torch_device) | |
inputs["negative_prompt"] = "negative prompt" | |
output = sd_pipe(**inputs) | |
image_slice_1 = output.images[0, -3:, -3:, -1] | |
# forward with same negative_prompt duplicated | |
inputs = self.get_dummy_inputs(torch_device) | |
inputs["negative_prompt"] = "negative prompt" | |
inputs["negative_prompt_2"] = inputs["negative_prompt"] | |
output = sd_pipe(**inputs) | |
image_slice_2 = output.images[0, -3:, -3:, -1] | |
# ensure the results are equal | |
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 | |
# forward with different negative_prompt | |
inputs = self.get_dummy_inputs(torch_device) | |
inputs["negative_prompt"] = "negative prompt" | |
inputs["negative_prompt_2"] = "different negative prompt" | |
output = sd_pipe(**inputs) | |
image_slice_3 = output.images[0, -3:, -3:, -1] | |
# ensure the results are not equal | |
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 | |
def test_controlnet_sdxl_guess(self): | |
device = "cpu" | |
components = self.get_dummy_components() | |
sd_pipe = self.pipeline_class(**components) | |
sd_pipe = sd_pipe.to(device) | |
sd_pipe.set_progress_bar_config(disable=None) | |
inputs = self.get_dummy_inputs(device) | |
inputs["guess_mode"] = True | |
output = sd_pipe(**inputs) | |
image_slice = output.images[0, -3:, -3:, -1] | |
expected_slice = np.array( | |
[0.5381963, 0.4836803, 0.45821992, 0.5577731, 0.51210403, 0.4794795, 0.59282357, 0.5647199, 0.43100584] | |
) | |
# make sure that it's equal | |
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 | |
# TODO(Patrick, Sayak) - skip for now as this requires more refiner tests | |
def test_save_load_optional_components(self): | |
pass | |
def test_float16_inference(self): | |
super().test_float16_inference(expected_max_diff=5e-1) | |