Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import torch | |
from gradio_client import Client, handle_file | |
from colorama import Fore, Style | |
from diffusers import AutoPipelineForImage2Image | |
from PIL import Image | |
joy_client = Client("fancyfeast/joy-caption-alpha-two") | |
qwen_client = Client("Qwen/Qwen2.5-72B-Instruct") | |
pipeline = AutoPipelineForImage2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) | |
lora_ids = { | |
"Realism": "XLabs-AI/flux-RealismLora", | |
"Cartoonism": "aleksa-codes/flux-ghibsky-illustration", | |
} | |
def load_lora(lora_name): | |
print(f"Loading LoRA model: {lora_name}") | |
global pipeline | |
pipeline.unload_lora_weights() | |
pipeline.load_lora_weights(lora_ids[lora_name]) | |
pipeline.enable_model_cpu_offload() | |
print(f"{Fore.GREEN}LoRA model loaded{Style.RESET_ALL}") | |
def describe_image(image_path): | |
print(f"Describing image: {image_path}") | |
image_description = joy_client.predict( | |
input_image=handle_file(image_path), | |
caption_type="Descriptive", | |
caption_length="long", | |
extra_options=[], | |
name_input="", | |
custom_prompt="", | |
api_name="/stream_chat" | |
)[-1] | |
print(f"{Fore.GREEN}{image_description}{Style.RESET_ALL}") | |
return image_description | |
def refine_prompt(image_description): | |
print(f"Improving prompt: {image_description}") | |
qwen_prompt = f"""This is the description of the image: {image_description} | |
And those some good AI Art Prompts: | |
- a cat on a windowsill gazing out at a starry night sky and distant city lights | |
- a fisherman casting a line into a peaceful village lake surrounded by quaint cottages | |
- cozy mountain cabin covered in snow, with smoke curling from the chimney and a warm, inviting light spilling through the windows | |
- Mykonos | |
- an orange Lamborghini driving down a hill road at night with a beautiful ocean view in the background, side view, no text | |
- a small Yorkie on a windowsill during a snowy winter night, with a warm, cozy glow from inside and soft snowflakes drifting outside | |
- serene Japanese garden with a koi pond and a traditional tea house, nestled under a canopy of cherry blossoms in full bloom | |
- the most beautiful place in the universe | |
Based on what i gave you, Write a great short AI Art Prompt for me that is based on the image description above (Don't write anything else, just the prompt) | |
""" | |
refined_prompt = qwen_client.predict( | |
query=qwen_prompt, | |
history=[], | |
system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", | |
api_name="/model_chat" | |
)[1][0][-1] | |
print(f"{Fore.GREEN}{refined_prompt}{Style.RESET_ALL}") | |
return refined_prompt | |
def img2img_infer(image_path, image_description): | |
pil_image = Image.open(image_path) | |
width, height = pil_image.size | |
enhanced_image = pipeline(f'GHIBSKY style, {image_description}', image=pil_image).images[0] | |
enhanced_image = enhanced_image.resize((width, height)) | |
return enhanced_image | |
with gr.Blocks(title="Magnific") as demo: | |
with gr.Row(): | |
with gr.Column(): | |
image_path = gr.Image(label="Image", type="filepath") | |
lora_dropdown = gr.Dropdown(label="LoRA Model", choices=list(lora_ids.keys()), value=None) | |
describe_btn = gr.Button(value="Describe Image", variant="primary") | |
with gr.Row(equal_height=True): | |
image_description = gr.Textbox(label="Image Description", scale=4) | |
refine_prompt_btn = gr.Button(value="Refine", variant="primary", scale=1) | |
submit_btn = gr.Button(value="Submit", variant="primary") | |
enhanced_image = gr.Image(label="Enhanced Image", type="pil") | |
lora_dropdown.change(load_lora, inputs=lora_dropdown) | |
refine_prompt_btn.click(refine_prompt, inputs=image_description, outputs=image_description) | |
describe_btn.click(describe_image, inputs=image_path, outputs=image_description) | |
submit_btn.click(img2img_infer, inputs=[image_path, image_description], outputs=enhanced_image) | |
demo.queue().launch(share=False) |