Image-Gen-Pro / app.py
KingNish's picture
Update app.py
fb3409b verified
raw
history blame
9.66 kB
from __future__ import annotations
import math
import random
import spaces
import gradio as gr
import numpy as np
import torch
from PIL import Image
from diffusers import EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline
from huggingface_hub import hf_hub_download, InferenceClient
from diffusers import DiffusionPipeline
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16, revision="refs/pr/1").to(device)
help_text = """
To optimize image results:
- Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
- Modify the **Text CFG weight** to influence how closely the edit follows text instructions. Increase it to adhere more to the text, or decrease it for subtler changes.
- Experiment with different **random seeds** and **CFG values** for varied outcomes.
- **Rephrase your instructions** for potentially better results.
- **Increase the number of steps** for enhanced edits.
"""
def set_timesteps_patched(self, num_inference_steps: int, device = None):
self.num_inference_steps = num_inference_steps
ramp = np.linspace(0, 1, self.num_inference_steps)
sigmas = torch.linspace(math.log(self.config.sigma_min), math.log(self.config.sigma_max), len(ramp)).exp().flip(0)
sigmas = (sigmas).to(dtype=torch.float32, device=device)
self.timesteps = self.precondition_noise(sigmas)
self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)])
self._step_index = None
self._begin_index = None
self.sigmas = self.sigmas.to("cpu")
# Image Editor
edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
EDMEulerScheduler.set_timesteps = set_timesteps_patched
pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file( edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16 )
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
pipe_edit.to("cuda")
client1 = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
system_instructions1 = "<|system|>\nAct as Image Prompt Generation expert, Your task is to modify prompt by USER to more better and detailed prompt for Image Generation. \n Ensure the prompt is deatiled, yet descriptive to generate an exceptional image that meets the user's expectations. \n Your task is to reply with final optimized prompt only. Reply with optimized prompt only.\n<|user|>\n"
def promptifier(prompt):
formatted_prompt = f"{system_instructions1}{prompt}\n<|assistant|>\n"
stream = client1.text_generation(formatted_prompt, max_new_tokens=300)
return stream
# Generator
@spaces.GPU(duration=60, queue=False)
def king(type ,
input_image ,
instruction: str ,
negative_prompt: str ="",
enhance_prompt: bool = True,
steps: int = 25,
randomize_seed: bool = True,
seed: int = 2404,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 6,
progress=gr.Progress(track_tqdm=True)
):
if type=="Image Editing" :
input_image = Image.open(input_image).convert('RGB')
if randomize_seed:
seed = random.randint(0, 999999)
generator = torch.manual_seed(seed)
output_image = pipe_edit(
instruction, negative_prompt=negative_prompt, image=input_image,
guidance_scale=guidance_scale, image_guidance_scale=1.5,
width = input_image.width, height = input_image.height,
num_inference_steps=steps, generator=generator, output_type="latent",
).images
refine = refiner(
prompt=f"{instruction}, 4k, hd, high quality, masterpiece",
negative_prompt = negative_prompt,
guidance_scale=7.5,
num_inference_steps=steps,
image=output_image,
generator=generator,
).images[0]
return seed, refine
else :
if randomize_seed:
seed = random.randint(0, 999999)
generator = torch.Generator().manual_seed(seed)
if enhance_prompt:
print(f"BEFORE: {instruction} ")
instruction = promptifier(instruction)
print(f"AFTER: {instruction} ")
image = pipe(
prompt = instruction,
negative_prompt = negative_prompt,
width = width,
height = height,
num_inference_steps = (steps/5),
generator = generator,
guidance_scale=0.0,
output_type="latent"
).images
refine = refiner( prompt=instruction,
negative_prompt = negative_prompt,
guidance_scale = 7.5,
num_inference_steps= steps,
image=image, generator=generator,
).images[0]
return seed, refine
client = InferenceClient()
# Prompt classifier
def response(instruction, input_image=None ):
if input_image is None:
output="Image Generation"
else:
try:
text = instruction
labels = ["Image Editing", "Image Generation"]
classification = client.zero_shot_classification(text, labels, multi_label=True)
output = classification[0]
output = str(output)
if "Editing" in output:
output = "Image Editing"
else:
output = "Image Generation"
except:
if input_image is None:
output="Image Generation"
else:
output="Image Editing"
return output
css = '''
.gradio-container{max-width: 700px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
examples=[
[
"Image Generation",
None,
"A luxurious supercar with a unique design. The car should have a pearl white finish, and gold accents. 4k, realistic.",
],
[
"Image Editing",
"./supercar.png",
"make it red",
],
[
"Image Editing",
"./red_car.png",
"add some snow",
],
[
"Image Generation",
None,
"An alien grasping a sign board contain word 'ALIEN' with Neon Glow, neon, futuristic, neonpunk, neon lights",
],
[
"Image Generation",
None,
"Beautiful Eiffel Tower at Night",
],
[
"Image Generation",
None,
"Beautiful Eiffel Tower at Night",
],
]
with gr.Blocks(css=css) as demo:
gr.Markdown("# Image Generation , Image Editing \n ### Note: First image generation takes time")
with gr.Row():
instruction = gr.Textbox(lines=1, label="Instruction", interactive=True)
generate_button = gr.Button("Run", scale=0)
with gr.Row():
type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
enhance_prompt = gr.Checkbox(label="Enhance prompt", value=False, scale=0)
with gr.Row():
input_image = gr.Image(label="Image", type='filepath', interactive=True)
with gr.Row():
guidance_scale = gr.Number(value=6.0, step=0.1, label="Guidance Scale", interactive=True)
steps = gr.Number(value=25, step=1, label="Steps", interactive=True)
with gr.Accordion("Advanced options", open=False):
with gr.Row():
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, ugly, disgusting, blurry, amputation,(face asymmetry, eyes asymmetry, deformed eyes, open mouth)",
visible=True)
with gr.Row():
width = gr.Slider( label="Width", minimum=256, maximum=2048, step=64, value=1024)
height = gr.Slider( label="Height", minimum=256, maximum=2048, step=64, value=1024)
with gr.Row():
randomize_seed = gr.Checkbox(label="Randomize Seed", value = True, interactive=True )
seed = gr.Number(value=2404, step=1, label="Seed", interactive=True)
gr.Examples(
examples=examples,
inputs=[type,input_image, instruction],
fn=king,
outputs=[input_image],
cache_examples=False,
)
# gr.Markdown(help_text)
instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
gr.on(triggers=[
generate_button.click,
instruction.submit
],
fn=king,
inputs=[type,
input_image,
instruction,
negative_prompt,
enhance_prompt,
steps,
randomize_seed,
seed,
width,
height,
guidance_scale,
],
outputs=[seed, input_image],
api_name = "image_gen_pro",
queue=False
)
demo.queue(max_size=500).launch()