File size: 4,051 Bytes
e984a2c
 
 
 
ae04b43
e984a2c
b9a20a2
e984a2c
36e241a
e984a2c
36e241a
0b3f980
36e241a
0b3f980
e984a2c
3ce81f3
 
e984a2c
3ce81f3
 
 
f710012
 
3ce81f3
 
 
 
 
 
 
 
f710012
3ce81f3
 
 
 
 
 
73b860d
3ce81f3
 
4c99be6
 
 
 
 
 
3ce81f3
 
 
 
 
 
 
 
 
 
 
 
73b860d
3ce81f3
 
 
 
 
 
 
73b860d
3ce81f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
from diffusers import StableDiffusionXLPipeline, DPMSolverSinglestepScheduler
import torch
import spaces

device = "cuda"

torch.cuda.max_memory_allocated(device=device)
pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash")
pipe = pipe.to(device)
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

@spaces.GPU(duration=20,queue=False)
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)        
    generator = torch.Generator().manual_seed(seed)    
    image = pipe(
        prompt = prompt, 
        negative_prompt = negative_prompt,
        guidance_scale = guidance_scale, 
        num_inference_steps = num_inference_steps, 
        width = width, 
        height = height,
        generator = generator
    ).images[0]     
    return image

examples = [
    "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
    "An astronaut riding a green horse",
    "A delicious ceviche cheesecake slice",
    "Alien holding board with word flash on it",
]

with gr.Blocks() as demo:
    with gr.Column():
        gr.Markdown("""# SDXL Flash
        ### Super fast text to Image Generator.
        ### <span style='color: red;'>You may change the steps from 5 to 8 or 10, if you didn't get satisfied results.
        ### First Image processing takes time then images generate faster.""")    
        with gr.Row():
            
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
            
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result")

        with gr.Accordion("Advanced Settings", open=False):
            
            negative_prompt = gr.Text(
                label="Negative prompt",
                max_lines=1,
                placeholder="Enter a negative prompt",
                value = "Ugly, malformed, noise, blur, watermark, distorted",
            )
            
            seed = gr.Slider(
                label="Seed",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )
            
            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
            
            with gr.Row():
                
                width = gr.Slider(
                    label="Width",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=512,
                )
                
                height = gr.Slider(
                    label="Height",
                    minimum=256,
                    maximum=MAX_IMAGE_SIZE,
                    step=32,
                    value=512,
                )
            
            with gr.Row():
                
                guidance_scale = gr.Slider(
                    label="Guidance scale",
                    minimum=0.0,
                    maximum=10.0,
                    step=0.1,
                    value=3.0,
                )
                
                num_inference_steps = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=12,
                    step=1,
                    value=5,
                )
        
        gr.Examples(
            examples = examples,
            inputs = [prompt]
        )

    run_button.click(
        fn = infer,
        inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
        outputs = [result]
    )

demo.queue().launch()