Spaces:
Runtime error
Runtime error
# Copyright 2022 The OFA-Sys Team. | |
# This source code is licensed under the Apache 2.0 license | |
# found in the LICENSE file in the root directory. | |
# Copyright 2022 The HuggingFace Inc. team. | |
# All rights reserved. | |
# This source code is licensed under the Apache 2.0 license | |
# found in the LICENSE file in the root directory. | |
import inspect | |
from typing import Callable, List, Optional, Union | |
import numpy as np | |
import torch | |
import os | |
from transformers import CLIPFeatureExtractor, CLIPTokenizer | |
from diffusers.configuration_utils import FrozenDict | |
from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler | |
from diffusers.utils import deprecate, logging | |
from diffusers import OnnxRuntimeModel | |
from diffusers import OnnxStableDiffusionPipeline, DiffusionPipeline | |
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput | |
from openvino.runtime import Core | |
ORT_TO_NP_TYPE = { | |
"tensor(bool)": np.bool_, | |
"tensor(int8)": np.int8, | |
"tensor(uint8)": np.uint8, | |
"tensor(int16)": np.int16, | |
"tensor(uint16)": np.uint16, | |
"tensor(int32)": np.int32, | |
"tensor(uint32)": np.uint32, | |
"tensor(int64)": np.int64, | |
"tensor(uint64)": np.uint64, | |
"tensor(float16)": np.float16, | |
"tensor(float)": np.float32, | |
"tensor(double)": np.float64, | |
} | |
logger = logging.get_logger(__name__) | |
class OpenVINOStableDiffusionPipeline(DiffusionPipeline): | |
vae_encoder: OnnxRuntimeModel | |
vae_decoder: OnnxRuntimeModel | |
text_encoder: OnnxRuntimeModel | |
tokenizer: CLIPTokenizer | |
unet: OnnxRuntimeModel | |
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] | |
safety_checker: OnnxRuntimeModel | |
feature_extractor: CLIPFeatureExtractor | |
_optional_components = ["safety_checker", "feature_extractor"] | |
def __init__( | |
self, | |
vae_encoder: OnnxRuntimeModel, | |
vae_decoder: OnnxRuntimeModel, | |
text_encoder: OnnxRuntimeModel, | |
tokenizer: CLIPTokenizer, | |
unet: OnnxRuntimeModel, | |
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], | |
safety_checker: OnnxRuntimeModel, | |
feature_extractor: CLIPFeatureExtractor, | |
requires_safety_checker: bool = True, | |
): | |
super().__init__() | |
if hasattr(scheduler.config, | |
"steps_offset") and scheduler.config.steps_offset != 1: | |
deprecation_message = ( | |
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" | |
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " | |
"to update the config accordingly as leaving `steps_offset` might led to incorrect results" | |
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," | |
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" | |
" file") | |
deprecate("steps_offset!=1", | |
"1.0.0", | |
deprecation_message, | |
standard_warn=False) | |
new_config = dict(scheduler.config) | |
new_config["steps_offset"] = 1 | |
scheduler._internal_dict = FrozenDict(new_config) | |
if hasattr(scheduler.config, | |
"clip_sample") and scheduler.config.clip_sample is True: | |
deprecation_message = ( | |
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." | |
" `clip_sample` should be set to False in the configuration file. Please make sure to update the" | |
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" | |
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" | |
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" | |
) | |
deprecate("clip_sample not set", | |
"1.0.0", | |
deprecation_message, | |
standard_warn=False) | |
new_config = dict(scheduler.config) | |
new_config["clip_sample"] = False | |
scheduler._internal_dict = FrozenDict(new_config) | |
if safety_checker is None and requires_safety_checker: | |
logger.warning( | |
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" | |
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" | |
" results in services or applications open to the public. Both the diffusers team and Hugging Face" | |
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" | |
" it only for use-cases that involve analyzing network behavior or auditing its results. For more" | |
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." | |
) | |
if safety_checker is not None and feature_extractor is None: | |
raise ValueError( | |
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" | |
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." | |
) | |
self.register_modules( | |
vae_encoder=vae_encoder, | |
vae_decoder=vae_decoder, | |
text_encoder=text_encoder, | |
tokenizer=tokenizer, | |
unet=unet, | |
scheduler=scheduler, | |
safety_checker=safety_checker, | |
feature_extractor=feature_extractor, | |
) | |
self.convert_to_openvino() | |
self.register_to_config( | |
requires_safety_checker=requires_safety_checker) | |
def from_onnx_pipeline(cls, onnx_pipe: OnnxStableDiffusionPipeline): | |
r""" | |
Create OpenVINOStableDiffusionPipeline from a onnx stable pipeline. | |
Parameters: | |
onnx_pipe (OnnxStableDiffusionPipeline) | |
""" | |
return cls(onnx_pipe.vae_encoder, onnx_pipe.vae_decoder, | |
onnx_pipe.text_encoder, onnx_pipe.tokenizer, onnx_pipe.unet, | |
onnx_pipe.scheduler, onnx_pipe.safety_checker, | |
onnx_pipe.feature_extractor, True) | |
def convert_to_openvino(self): | |
ie = Core() | |
# VAE decoder | |
vae_decoder_onnx = ie.read_model( | |
model=os.path.join(self.vae_decoder.model_save_dir, "model.onnx")) | |
vae_decoder = ie.compile_model(model=vae_decoder_onnx, | |
device_name="CPU") | |
# Text encoder | |
text_encoder_onnx = ie.read_model( | |
model=os.path.join(self.text_encoder.model_save_dir, "model.onnx")) | |
text_encoder = ie.compile_model(model=text_encoder_onnx, | |
device_name="CPU") | |
# Unet | |
unet_onnx = ie.read_model( | |
model=os.path.join(self.unet.model_save_dir, "model.onnx")) | |
unet = ie.compile_model(model=unet_onnx, device_name="CPU") | |
self.register_modules(vae_decoder=vae_decoder, | |
text_encoder=text_encoder, | |
unet=unet) | |
def _encode_prompt(self, prompt, num_images_per_prompt, | |
do_classifier_free_guidance, negative_prompt): | |
r""" | |
Encodes the prompt into text encoder hidden states. | |
Args: | |
prompt (`str` or `List[str]`): | |
prompt to be encoded | |
num_images_per_prompt (`int`): | |
number of images that should be generated per prompt | |
do_classifier_free_guidance (`bool`): | |
whether to use classifier free guidance or not | |
negative_prompt (`str` or `List[str]`): | |
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | |
if `guidance_scale` is less than `1`). | |
""" | |
batch_size = len(prompt) if isinstance(prompt, list) else 1 | |
# get prompt text embeddings | |
text_inputs = self.tokenizer( | |
prompt, | |
padding="max_length", | |
max_length=self.tokenizer.model_max_length, | |
truncation=True, | |
return_tensors="np", | |
) | |
text_input_ids = text_inputs.input_ids | |
untruncated_ids = self.tokenizer(prompt, | |
padding="max_length", | |
return_tensors="np").input_ids | |
if not np.array_equal(text_input_ids, untruncated_ids): | |
removed_text = self.tokenizer.batch_decode( | |
untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) | |
logger.warning( | |
"The following part of your input was truncated because CLIP can only handle sequences up to" | |
f" {self.tokenizer.model_max_length} tokens: {removed_text}") | |
prompt_embeds = self.text_encoder( | |
{"input_ids": | |
text_input_ids.astype(np.int32)})[self.text_encoder.outputs[0]] | |
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) | |
# get unconditional embeddings for classifier free guidance | |
if do_classifier_free_guidance: | |
uncond_tokens: List[str] | |
if negative_prompt is None: | |
uncond_tokens = [""] * batch_size | |
elif type(prompt) is not type(negative_prompt): | |
raise TypeError( | |
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" | |
f" {type(prompt)}.") | |
elif isinstance(negative_prompt, str): | |
uncond_tokens = [negative_prompt] * batch_size | |
elif batch_size != len(negative_prompt): | |
raise ValueError( | |
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" | |
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | |
" the batch size of `prompt`.") | |
else: | |
uncond_tokens = negative_prompt | |
max_length = text_input_ids.shape[-1] | |
uncond_input = self.tokenizer( | |
uncond_tokens, | |
padding="max_length", | |
max_length=max_length, | |
truncation=True, | |
return_tensors="np", | |
) | |
negative_prompt_embeds = self.text_encoder({ | |
"input_ids": | |
uncond_input.input_ids.astype(np.int32) | |
})[self.text_encoder.outputs[0]] | |
negative_prompt_embeds = np.repeat(negative_prompt_embeds, | |
num_images_per_prompt, | |
axis=0) | |
# For classifier free guidance, we need to do two forward passes. | |
# Here we concatenate the unconditional and text embeddings into a single batch | |
# to avoid doing two forward passes | |
prompt_embeds = np.concatenate( | |
[negative_prompt_embeds, prompt_embeds]) | |
return prompt_embeds | |
def __call__( | |
self, | |
prompt: Union[str, List[str]], | |
height: Optional[int] = 512, | |
width: Optional[int] = 512, | |
num_inference_steps: Optional[int] = 50, | |
guidance_scale: Optional[float] = 7.5, | |
negative_prompt: Optional[Union[str, List[str]]] = None, | |
num_images_per_prompt: Optional[int] = 1, | |
eta: Optional[float] = 0.0, | |
generator: Optional[np.random.RandomState] = None, | |
latents: Optional[np.ndarray] = None, | |
output_type: Optional[str] = "pil", | |
return_dict: bool = True, | |
callback: Optional[Callable[[int, int, np.ndarray], None]] = None, | |
callback_steps: Optional[int] = 1, | |
): | |
if isinstance(prompt, str): | |
batch_size = 1 | |
elif isinstance(prompt, list): | |
batch_size = len(prompt) | |
else: | |
raise ValueError( | |
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" | |
) | |
if height % 8 != 0 or width % 8 != 0: | |
raise ValueError( | |
f"`height` and `width` have to be divisible by 8 but are {height} and {width}." | |
) | |
if (callback_steps is None) or (callback_steps is not None and | |
(not isinstance(callback_steps, int) | |
or callback_steps <= 0)): | |
raise ValueError( | |
f"`callback_steps` has to be a positive integer but is {callback_steps} of type" | |
f" {type(callback_steps)}.") | |
if generator is None: | |
generator = np.random | |
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) | |
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` | |
# corresponds to doing no classifier free guidance. | |
do_classifier_free_guidance = guidance_scale > 1.0 | |
prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, | |
do_classifier_free_guidance, | |
negative_prompt) | |
# get the initial random noise unless the user supplied it | |
latents_dtype = prompt_embeds.dtype | |
latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, | |
width // 8) | |
if latents is None: | |
latents = generator.randn(*latents_shape).astype(latents_dtype) | |
elif latents.shape != latents_shape: | |
raise ValueError( | |
f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" | |
) | |
# set timesteps | |
self.scheduler.set_timesteps(num_inference_steps) | |
latents = latents * np.float64(self.scheduler.init_noise_sigma) | |
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature | |
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. | |
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 | |
# and should be between [0, 1] | |
accepts_eta = "eta" in set( | |
inspect.signature(self.scheduler.step).parameters.keys()) | |
extra_step_kwargs = {} | |
if accepts_eta: | |
extra_step_kwargs["eta"] = eta | |
# timestep_dtype = next( | |
# (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" | |
# ) | |
timestep_dtype = 'tensor(int64)' | |
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] | |
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): | |
# expand the latents if we are doing classifier free guidance | |
latent_model_input = np.concatenate( | |
[latents] * 2) if do_classifier_free_guidance else latents | |
latent_model_input = self.scheduler.scale_model_input( | |
torch.from_numpy(latent_model_input), t) | |
latent_model_input = latent_model_input.cpu().numpy() | |
# predict the noise residual | |
timestep = np.array([t], dtype=timestep_dtype) | |
unet_input = { | |
"sample": latent_model_input, | |
"timestep": timestep, | |
"encoder_hidden_states": prompt_embeds | |
} | |
noise_pred = self.unet(unet_input)[self.unet.outputs[0]] | |
# noise_pred = noise_pred[0] | |
# perform guidance | |
if do_classifier_free_guidance: | |
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) | |
noise_pred = noise_pred_uncond + guidance_scale * ( | |
noise_pred_text - noise_pred_uncond) | |
# compute the previous noisy sample x_t -> x_t-1 | |
scheduler_output = self.scheduler.step( | |
torch.from_numpy(noise_pred), t, torch.from_numpy(latents), | |
**extra_step_kwargs) | |
latents = scheduler_output.prev_sample.numpy() | |
# call the callback, if provided | |
if callback is not None and i % callback_steps == 0: | |
callback(i, t, latents) | |
latents = 1 / 0.18215 * latents | |
image = self.vae_decoder({"latent_sample": | |
latents})[self.vae_decoder.outputs[0]] | |
image = np.clip(image / 2 + 0.5, 0, 1) | |
image = image.transpose((0, 2, 3, 1)) | |
if self.safety_checker is not None: | |
safety_checker_input = self.feature_extractor( | |
self.numpy_to_pil(image), | |
return_tensors="np").pixel_values.astype(image.dtype) | |
image, has_nsfw_concepts = self.safety_checker( | |
clip_input=safety_checker_input, images=image) | |
# There will throw an error if use safety_checker batchsize>1 | |
images, has_nsfw_concept = [], [] | |
for i in range(image.shape[0]): | |
image_i, has_nsfw_concept_i = self.safety_checker( | |
clip_input=safety_checker_input[i:i + 1], | |
images=image[i:i + 1]) | |
images.append(image_i) | |
has_nsfw_concept.append(has_nsfw_concept_i[0]) | |
image = np.concatenate(images) | |
else: | |
has_nsfw_concept = None | |
if output_type == "pil": | |
image = self.numpy_to_pil(image) | |
if not return_dict: | |
return (image, has_nsfw_concept) | |
return StableDiffusionPipelineOutput( | |
images=image, nsfw_content_detected=has_nsfw_concept) | |