Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,748 Bytes
725e3cd ef187eb 976a266 0cffd40 ef187eb 11fa80e 63b6eaf 874369a 2b0f02c 11fa80e 0cffd40 8b1e96d 9bc5ccd 8b1e96d ec35e66 4efab5c ec35e66 874369a 4efab5c 8b1e96d 275bb26 4a702be 976a266 d748926 a1bc52b 725e3cd 4a702be ce19625 8b1e96d 275bb26 ce19625 725e3cd ce19625 725e3cd 874369a 9b38787 3a2b9b2 8b1e96d ce19625 11fa80e ce19625 874369a 0cffd40 8b3ca8d 725e3cd 874369a 8b3ca8d 0cffd40 8b1e96d 0cffd40 038ee5b c267bf7 8b1e96d 0cffd40 725e3cd 8b1e96d 3bb60df ce19625 725e3cd ce19625 c267bf7 ce19625 c267bf7 874369a 8b3ca8d f4107e3 964861c 8b3ca8d 038ee5b 8b3ca8d 8b1e96d 874369a 8b1e96d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import spaces
import gradio as gr
import torch
from diffusers import FluxPipeline, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler
from huggingface_hub import hf_hub_download
from PIL import Image
import requests
from translatepy import Translator
import numpy as np
import random
translator = Translator()
# Constants
model = "black-forest-labs/FLUX.1-dev"
CSS = """
footer {
visibility: hidden;
}
"""
MAX_SEED = np.iinfo(np.int32).max
# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
transformer = FluxTransformer2DModel.from_single_file(
"https://huggingface.co/aixonlab/flux.1-lumiere-alpha/blob/main/lumiere_flux_alpha-fp8.safetensors",
torch_dtype=torch.bfloat16
)
pipe = FluxPipeline.from_pretrained(
model,
transformer=transformer,
torch_dtype=torch.bfloat16)
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
pipe.scheduler.config, use_beta_sigmas=True
)
pipe.to("cuda")
# Function
@spaces.GPU()
def generate_image(
prompt,
width=768,
height=1024,
scale=3.5,
steps=24,
seed=-1,
nums=1,
progress=gr.Progress(track_tqdm=True)
):
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
generator = torch.Generator().manual_seed(seed)
prompt = str(translator.translate(prompt, 'English'))
print(f'prompt:{prompt}')
image = pipe(
prompt,
width=width,
height=height,
guidance_scale=scale,
num_inference_steps=steps,
generator=generator,
output_type="pil",
max_sequence_length=512,
num_images_per_prompt=nums,
).images
return image, seed
examples = [
"close up portrait, Amidst the interplay of light and shadows in a photography studio,a soft spotlight traces the contours of a face,highlighting a figure clad in a sleek black turtleneck. The garment,hugging the skin with subtle luxury,complements the Caucasian model's understated makeup,embodying minimalist elegance. Behind,a pale gray backdrop extends,its fine texture shimmering subtly in the dim light,artfully balancing the composition and focusing attention on the subject. In a palette of black,gray,and skin tones,simplicity intertwines with profundity,as every detail whispers untold stories.",
"Caucasian,The image features a young woman of European descent standing in an studio setting,surrounded by silk. (She is wearing a silk dress),paired with a bold. Her brown hair is wet and tousled,falling naturally around her face,giving her a raw and edgy look. The woman has an intense and direct gaze,adding to the dramatic feel of the image. The backdrop is flowing silk,big silk. The overall composition blends elements of fashion and nature,creating a striking and powerful visual",
"A black and white portrait of a young woman with a captivating gaze. She's bundled up in a cozy black sweater,hands gently cupped near her face. The monochromatic tones highlight her delicate features and the contemplative mood of the image",
"Fashion photography portrait,close up portrait,(a woman of European descent is surrounded by lava rock and magma from head to neck, red magma hair, wear volcanic lava rock magma outfit coat lava rock magma fashion costume with ruffled layers"
]
# Gradio Interface
with gr.Blocks(css=CSS, theme="ocean") as demo:
gr.HTML("<h1><center>flux.1-lumiere</center></h1>")
gr.HTML("<p><center><a href='https://huggingface.co/aixonlab/flux.1-lumiere-alpha</a></center></p>")
with gr.Group():
with gr.Row():
prompt = gr.Textbox(label='Enter Your Prompt(multilingual)', scale=6)
submit = gr.Button(scale=1, variant='primary')
img = gr.Gallery(label="Gallery", columns = 1, preview=True, height=600)
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=1280,
step=8,
value=768,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=1280,
step=8,
value=1024,
)
with gr.Row():
scale = gr.Slider(
label="Guidance Scale",
minimum=0,
maximum=50,
step=0.1,
value=3.0,
)
steps = gr.Slider(
label="Steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Row():
seed = gr.Slider(
label="Seed(-1 Random)",
minimum=-1,
maximum=MAX_SEED,
step=1,
value=0,
visible=True
)
nums = gr.Slider(
label="Image Numbers",
minimum=1,
maximum=4,
step=1,
value=1,
scale=1,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[img,seed],
fn=generate_image,
cache_examples=True,
cache_mode='lazy'
)
gr.on(
triggers=[
prompt.submit,
submit.click,
],
fn=generate_image,
inputs=[
prompt,
width,
height,
scale,
steps,
seed,
nums
],
outputs=[img, seed],
api_name="run",
)
demo.queue().launch() |