Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,827 Bytes
079a382 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import gradio as gr
import torch
import spaces
from diffusers import FluxInpaintPipeline
from PIL import Image
# Initialize the pipeline
pipe = FluxInpaintPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.bfloat16
)
pipe.to("cuda")
pipe.load_lora_weights(
"ali-vilab/In-Context-LoRA",
weight_name="visual-identity-design.safetensors"
)
def square_center_crop(img, target_size=768):
if img.mode in ('RGBA', 'P'):
img = img.convert('RGB')
width, height = img.size
crop_size = min(width, height)
left = (width - crop_size) // 2
top = (height - crop_size) // 2
right = left + crop_size
bottom = top + crop_size
img_cropped = img.crop((left, top, right, bottom))
return img_cropped.resize((target_size, target_size), Image.Resampling.LANCZOS)
def duplicate_horizontally(img):
width, height = img.size
if width != height:
raise ValueError(f"Input image must be square, got {width}x{height}")
new_image = Image.new('RGB', (width * 2, height))
new_image.paste(img, (0, 0))
new_image.paste(img, (width, 0))
return new_image
# Load the mask image
mask = Image.open("mask_square.png")
@spaces.GPU
def generate(image, prompt_user):
prompt_structure = "The two-panel image showcases the logo of a brand, [LEFT] the left panel is showing the logo [RIGHT] the right panel has this logo applied to "
prompt = prompt_structure + prompt_user
cropped_image = square_center_crop(image)
logo_dupli = duplicate_horizontally(cropped_image)
out = pipe(
prompt=prompt,
image=logo_dupli,
mask_image=mask,
guidance_scale=6,
height=768,
width=1536,
num_inference_steps=28,
max_sequence_length=256,
strength=1
).images[0]
width, height = out.size
half_width = width // 2
image_2 = out.crop((half_width, 0, width, height))
return image_2
def process_image(input_image, prompt):
try:
if input_image is None:
return None, "Please upload an image first."
if not prompt:
return None, "Please provide a prompt."
result = generate(input_image, prompt)
return result, "Generation completed successfully!"
except Exception as e:
return None, f"Error during generation: {str(e)}"
with gr.Blocks() as demo:
gr.Markdown("# Logo in Context")
gr.Markdown("### In-Context LoRA + Image-to-Image, apply your logo to anything")
with gr.Row():
with gr.Column():
input_image = gr.Image(
label="Upload Logo Image",
type="pil",
height=384
)
prompt_input = gr.Textbox(
label="Where should the logo be applied?",
placeholder="e.g., a coffee cup on a wooden table",
lines=2
)
generate_btn = gr.Button("Generate Application", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Application")
status_text = gr.Textbox(
label="Status",
interactive=False
)
with gr.Row():
gr.Markdown("""
### Instructions:
1. Upload a logo image (preferably square)
2. Describe where you'd like to see the logo applied
3. Click 'Generate Application' and wait for the result
Note: The generation process might take a few moments.
""")
# Set up the click event
generate_btn.click(
fn=process_image,
inputs=[input_image, prompt_input],
outputs=[output_image]
)
# Launch the interface
if __name__ == "__main__":
demo.launch() |