waqashayder commited on
Commit
84e1500
1 Parent(s): 9f45aaa

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +465 -0
app.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #patch 0.04
3
+ #Func() Dalle Collage Moved Midjourney Space
4
+ #Pruned DalleCollage Space
5
+ import os
6
+ import random
7
+ import uuid
8
+ import json
9
+
10
+ import gradio as gr
11
+ import numpy as np
12
+ from PIL import Image
13
+ import spaces
14
+ import torch
15
+ from diffusers import DiffusionPipeline
16
+ from typing import Tuple
17
+
18
+ #BaseConditions--
19
+ bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
20
+ bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
21
+ default_negative = os.getenv("default_negative","")
22
+
23
+ def check_text(prompt, negative=""):
24
+ for i in bad_words:
25
+ if i in prompt:
26
+ return True
27
+ for i in bad_words_negative:
28
+ if i in negative:
29
+ return True
30
+ return False
31
+
32
+ style_list = [
33
+ {
34
+ "name": "3840 x 2160",
35
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
36
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
37
+ },
38
+ {
39
+ "name": "2560 x 1440",
40
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
41
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
42
+ },
43
+
44
+ {
45
+ "name": "HD+",
46
+ "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
47
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
48
+ },
49
+
50
+ {
51
+ "name": "Style Zero",
52
+ "prompt": "{prompt}",
53
+ "negative_prompt": "",
54
+ },
55
+ ]
56
+
57
+ collage_style_list = [
58
+ {
59
+ "name": "Hi-Res",
60
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
61
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
62
+ },
63
+ {
64
+ "name": "B & W",
65
+ "prompt": "black and white collage of {prompt}. monochromatic, timeless, classic, dramatic contrast",
66
+ "negative_prompt": "colorful, vibrant, bright, flashy",
67
+ },
68
+ {
69
+ "name": "Polaroid",
70
+ "prompt": "collage of polaroid photos featuring {prompt}. vintage style, high contrast, nostalgic, instant film aesthetic",
71
+ "negative_prompt": "digital, modern, low quality, blurry",
72
+ },
73
+ {
74
+ "name": "Watercolor",
75
+ "prompt": "watercolor collage of {prompt}. soft edges, translucent colors, painterly effects",
76
+ "negative_prompt": "digital, sharp lines, solid colors",
77
+ },
78
+ {
79
+ "name": "Cinematic",
80
+ "prompt": "cinematic collage of {prompt}. film stills, movie posters, dramatic lighting",
81
+ "negative_prompt": "static, lifeless, mundane",
82
+ },
83
+ {
84
+ "name": "Nostalgic",
85
+ "prompt": "nostalgic collage of {prompt}. retro imagery, vintage objects, sentimental journey",
86
+ "negative_prompt": "contemporary, futuristic, forward-looking",
87
+ },
88
+ {
89
+ "name": "Vintage",
90
+ "prompt": "vintage collage of {prompt}. aged paper, sepia tones, retro imagery, antique vibes",
91
+ "negative_prompt": "modern, contemporary, futuristic, high-tech",
92
+ },
93
+ {
94
+ "name": "Scrapbook",
95
+ "prompt": "scrapbook style collage of {prompt}. mixed media, hand-cut elements, textures, paper, stickers, doodles",
96
+ "negative_prompt": "clean, digital, modern, low quality",
97
+ },
98
+ {
99
+ "name": "NeoNGlow",
100
+ "prompt": "neon glow collage of {prompt}. vibrant colors, glowing effects, futuristic vibes",
101
+ "negative_prompt": "dull, muted colors, vintage, retro",
102
+ },
103
+ {
104
+ "name": "Geometric",
105
+ "prompt": "geometric collage of {prompt}. abstract shapes, colorful, sharp edges, modern design, high quality",
106
+ "negative_prompt": "blurry, low quality, traditional, dull",
107
+ },
108
+ {
109
+ "name": "Thematic",
110
+ "prompt": "thematic collage of {prompt}. cohesive theme, well-organized, matching colors, creative layout",
111
+ "negative_prompt": "random, messy, unorganized, clashing colors",
112
+ },
113
+
114
+ {
115
+ "name": "No Style",
116
+ "prompt": "{prompt}",
117
+ "negative_prompt": "",
118
+ },
119
+ ]
120
+
121
+ filters = {
122
+ "Vivid": {
123
+ "prompt": "extra vivid {prompt}",
124
+ "negative_prompt": "washed out, dull"
125
+ },
126
+ "Playa": {
127
+ "prompt": "{prompt} set in a vast playa",
128
+ "negative_prompt": "forest, mountains"
129
+ },
130
+ "Desert": {
131
+ "prompt": "{prompt} set in a desert landscape",
132
+ "negative_prompt": "ocean, city"
133
+ },
134
+ "West": {
135
+ "prompt": "{prompt} with a western theme",
136
+ "negative_prompt": "eastern, modern"
137
+ },
138
+ "Blush": {
139
+ "prompt": "{prompt} with a soft blush color palette",
140
+ "negative_prompt": "harsh colors, neon"
141
+ },
142
+ "Minimalist": {
143
+ "prompt": "{prompt} with a minimalist design",
144
+ "negative_prompt": "cluttered, ornate"
145
+ },
146
+
147
+ "Zero filter": {
148
+ "prompt": "{prompt}",
149
+ "negative_prompt": ""
150
+ },
151
+
152
+
153
+ }
154
+
155
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
156
+ collage_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in collage_style_list}
157
+ filter_styles = {k: (v["prompt"], v["negative_prompt"]) for k, v in filters.items()}
158
+ STYLE_NAMES = list(styles.keys())
159
+ COLLAGE_STYLE_NAMES = list(collage_styles.keys())
160
+ FILTER_NAMES = list(filters.keys())
161
+ DEFAULT_STYLE_NAME = "3840 x 2160"
162
+ DEFAULT_COLLAGE_STYLE_NAME = "Hi-Res"
163
+ DEFAULT_FILTER_NAME = "Vivid"
164
+
165
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
166
+ if style_name in styles:
167
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
168
+ elif style_name in collage_styles:
169
+ p, n = collage_styles.get(style_name, collage_styles[DEFAULT_COLLAGE_STYLE_NAME])
170
+ elif style_name in filter_styles:
171
+ p, n = filter_styles.get(style_name, filter_styles[DEFAULT_FILTER_NAME])
172
+ else:
173
+ p, n = styles[DEFAULT_STYLE_NAME]
174
+
175
+ if not negative:
176
+ negative = ""
177
+ return p.replace("{prompt}", positive), n + negative
178
+
179
+
180
+
181
+ DESCRIPTION = """## MidJourney
182
+
183
+ Drop your best results in the community: [rb.gy/klkbs7](http://rb.gy/klkbs7), Have you tried the stable hamster space? [rb.gy/hfrm2f](http://rb.gy/hfrm2f)
184
+ """
185
+
186
+
187
+ if not torch.cuda.is_available():
188
+ DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
189
+
190
+ MAX_SEED = np.iinfo(np.int32).max
191
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
192
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
193
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
194
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
195
+
196
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
197
+
198
+ if torch.cuda.is_available():
199
+ pipe = DiffusionPipeline.from_pretrained(
200
+ "----you model goes here-----",
201
+ torch_dtype=torch.float16,
202
+ use_safetensors=True,
203
+ add_watermarker=False,
204
+ variant="fp16"
205
+ ).to(device)
206
+
207
+ if ENABLE_CPU_OFFLOAD:
208
+ pipe.enable_model_cpu_offload()
209
+ else:
210
+ pipe.to(device)
211
+ print("Loaded on Device!")
212
+
213
+ if USE_TORCH_COMPILE:
214
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
215
+ print("Model Compiled!")
216
+
217
+ def save_image(img, path):
218
+ img.save(path)
219
+
220
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
221
+ if randomize_seed:
222
+ seed = random.randint(0, MAX_SEED)
223
+ return seed
224
+
225
+ @spaces.GPU(enable_queue=True)
226
+ def generate(
227
+ prompt: str,
228
+ negative_prompt: str = "",
229
+ use_negative_prompt: bool = False,
230
+ style: str = DEFAULT_STYLE_NAME,
231
+ collage_style: str = DEFAULT_COLLAGE_STYLE_NAME,
232
+ filter_name: str = DEFAULT_FILTER_NAME,
233
+ grid_size: str = "2x2",
234
+ seed: int = 0,
235
+ width: int = 1024,
236
+ height: int = 1024,
237
+ guidance_scale: float = 3,
238
+ randomize_seed: bool = False,
239
+ use_resolution_binning: bool = True,
240
+ progress=gr.Progress(track_tqdm=True),
241
+ ):
242
+ if check_text(prompt, negative_prompt):
243
+ raise ValueError("Prompt contains restricted words.")
244
+
245
+ if collage_style != "No Style":
246
+ prompt, negative_prompt = apply_style(collage_style, prompt, negative_prompt)
247
+ elif filter_name != "No Filter":
248
+ prompt, negative_prompt = apply_style(filter_name, prompt, negative_prompt)
249
+ else:
250
+ prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
251
+
252
+ seed = int(randomize_seed_fn(seed, randomize_seed))
253
+ generator = torch.Generator().manual_seed(seed)
254
+
255
+ if not use_negative_prompt:
256
+ negative_prompt = "" # type: ignore
257
+ negative_prompt += default_negative
258
+
259
+ grid_sizes = {
260
+ "2x1": (2, 1),
261
+ "1x2": (1, 2),
262
+ "2x2": (2, 2),
263
+ "2x3": (2, 3),
264
+ "3x2": (3, 2),
265
+ "1x1": (1, 1)
266
+ }
267
+
268
+ grid_size_x, grid_size_y = grid_sizes.get(grid_size, (2, 2))
269
+ num_images = grid_size_x * grid_size_y
270
+
271
+ options = {
272
+ "prompt": prompt,
273
+ "negative_prompt": negative_prompt,
274
+ "width": width,
275
+ "height": height,
276
+ "guidance_scale": guidance_scale,
277
+ "num_inference_steps": 20,
278
+ "generator": generator,
279
+ "num_images_per_prompt": num_images,
280
+ "use_resolution_binning": use_resolution_binning,
281
+ "output_type": "pil",
282
+ }
283
+
284
+ torch.cuda.empty_cache() # Clear GPU memory
285
+ images = pipe(**options).images
286
+
287
+ grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
288
+
289
+ for i, img in enumerate(images[:num_images]):
290
+ grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
291
+
292
+ unique_name = str(uuid.uuid4()) + ".png"
293
+ save_image(grid_img, unique_name)
294
+ return [unique_name], seed
295
+
296
+ examples = [
297
+ "Portrait of a beautiful woman in a hat, summer outfit, with freckles on her face, in a close up shot, with sunlight, outdoors, in soft light, with a beach background, looking at the camera, with high resolution photography, in the style of Hasselblad X2D50c --ar 85:128 --v 6.0 --style raw",
298
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
299
+ "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K, Photo-Realistic",
300
+ "Closeup of blonde woman depth of field, bokeh, shallow focus, minimalism, fujifilm xh2s with Canon EF lens, cinematic --ar 85:128 --v 6.0 --style raw"
301
+ ]
302
+
303
+ css = '''
304
+ .gradio-container{max-width: 670px !important}
305
+ h1{text-align:center}
306
+ '''
307
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
308
+ gr.Markdown(DESCRIPTION)
309
+ gr.DuplicateButton(
310
+ value="Duplicate Space for private use",
311
+ elem_id="duplicate-button",
312
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
313
+ )
314
+ with gr.Group():
315
+ with gr.Row():
316
+ prompt = gr.Text(
317
+ label="Prompt",
318
+ show_label=False,
319
+ max_lines=1,
320
+ placeholder="Enter your prompt",
321
+ container=False,
322
+ )
323
+ run_button = gr.Button("Run")
324
+ result = gr.Gallery(label="Grid", columns=1, preview=True)
325
+
326
+
327
+ with gr.Row(visible=True):
328
+ filter_selection = gr.Radio(
329
+ show_label=True,
330
+ container=True,
331
+ interactive=True,
332
+ choices=FILTER_NAMES,
333
+ value=DEFAULT_FILTER_NAME,
334
+ label="Filter Type",
335
+ )
336
+
337
+ with gr.Row(visible=True):
338
+ style_selection = gr.Radio(
339
+ show_label=True,
340
+ container=True,
341
+ interactive=True,
342
+ choices=STYLE_NAMES,
343
+ value=DEFAULT_STYLE_NAME,
344
+ label="Quality Style",
345
+ )
346
+
347
+ with gr.Row(visible=True):
348
+ collage_style_selection = gr.Radio(
349
+ show_label=True,
350
+ container=True,
351
+ interactive=True,
352
+ choices=COLLAGE_STYLE_NAMES,
353
+ value=DEFAULT_COLLAGE_STYLE_NAME,
354
+ label="Collage Template",
355
+ )
356
+ with gr.Row(visible=True):
357
+ grid_size_selection = gr.Dropdown(
358
+ choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
359
+ value="2x2",
360
+ label="Grid Size"
361
+ )
362
+
363
+ with gr.Accordion("Advanced options", open=False):
364
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
365
+ negative_prompt = gr.Text(
366
+ label="Negative prompt",
367
+ max_lines=1,
368
+ placeholder="Enter a negative prompt",
369
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
370
+ visible=True,
371
+ )
372
+ with gr.Row():
373
+ num_inference_steps = gr.Slider(
374
+ label="Steps",
375
+ minimum=10,
376
+ maximum=30,
377
+ step=1,
378
+ value=15,
379
+ )
380
+ with gr.Row():
381
+ num_images_per_prompt = gr.Slider(
382
+ label="Images",
383
+ minimum=1,
384
+ maximum=5,
385
+ step=1,
386
+ value=2,
387
+ )
388
+ seed = gr.Slider(
389
+ label="Seed",
390
+ minimum=0,
391
+ maximum=MAX_SEED,
392
+ step=1,
393
+ value=0,
394
+ visible=True
395
+ )
396
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
397
+
398
+ with gr.Row(visible=True):
399
+ width = gr.Slider(
400
+ label="Width",
401
+ minimum=512,
402
+ maximum=2048,
403
+ step=8,
404
+ value=1024,
405
+ )
406
+ height = gr.Slider(
407
+ label="Height",
408
+ minimum=512,
409
+ maximum=2048,
410
+ step=8,
411
+ value=1024,
412
+ )
413
+
414
+ with gr.Row():
415
+ guidance_scale = gr.Slider(
416
+ label="Guidance Scale",
417
+ minimum=0.1,
418
+ maximum=20.0,
419
+ step=0.1,
420
+ value=6,
421
+ )
422
+
423
+ gr.Examples(
424
+ examples=examples,
425
+ inputs=prompt,
426
+ outputs=[result, seed],
427
+ fn=generate,
428
+ #cache_examples=True,
429
+ cache_examples=CACHE_EXAMPLES,
430
+ )
431
+
432
+ use_negative_prompt.change(
433
+ fn=lambda x: gr.update(visible=x),
434
+ inputs=use_negative_prompt,
435
+ outputs=negative_prompt,
436
+ api_name=False,
437
+ )
438
+
439
+ gr.on(
440
+ triggers=[
441
+ prompt.submit,
442
+ negative_prompt.submit,
443
+ run_button.click,
444
+ ],
445
+ fn=generate,
446
+ inputs=[
447
+ prompt,
448
+ negative_prompt,
449
+ use_negative_prompt,
450
+ style_selection,
451
+ collage_style_selection,
452
+ filter_selection,
453
+ grid_size_selection,
454
+ seed,
455
+ width,
456
+ height,
457
+ guidance_scale,
458
+ randomize_seed,
459
+ ],
460
+ outputs=[result, seed],
461
+ api_name="run",
462
+ )
463
+
464
+ if __name__ == "__main__":
465
+ demo.queue(max_size=20).launch()