thejagstudio commited on
Commit
c12fa30
1 Parent(s): 531e233

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -249
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import json
2
  from argparse import ArgumentParser
3
 
@@ -261,253 +262,8 @@ if args.noimagesave:
261
  else:
262
  app_settings.settings.generated_images.save_image = True
263
 
264
- if not args.realtime:
265
- # To minimize realtime mode dependencies
266
- from backend.upscale.upscaler import upscale_image
267
- from frontend.cli_interactive import interactive_mode
268
 
269
- if args.gui:
270
- from frontend.gui.ui import start_gui
271
-
272
- print("Starting desktop GUI mode(Qt)")
273
- start_gui(
274
- [],
275
- app_settings,
276
- )
277
- elif args.webui:
278
- from frontend.webui.ui import start_webui
279
-
280
- print("Starting web UI mode")
281
- start_webui(
282
- args.share,
283
- )
284
- elif args.realtime:
285
- from frontend.webui.realtime_ui import start_realtime_text_to_image
286
-
287
- print("Starting realtime text to image(EXPERIMENTAL)")
288
- start_realtime_text_to_image(args.share)
289
- elif args.api:
290
- from backend.api.web import start_web_server
291
-
292
- start_web_server()
293
-
294
- else:
295
- context = get_context(InterfaceType.CLI)
296
- config = app_settings.settings
297
-
298
- if args.use_openvino:
299
- config.lcm_diffusion_setting.openvino_lcm_model_id = args.openvino_lcm_model_id
300
- else:
301
- config.lcm_diffusion_setting.lcm_model_id = args.lcm_model_id
302
-
303
- config.lcm_diffusion_setting.prompt = args.prompt
304
- config.lcm_diffusion_setting.negative_prompt = args.negative_prompt
305
- config.lcm_diffusion_setting.image_height = args.image_height
306
- config.lcm_diffusion_setting.image_width = args.image_width
307
- config.lcm_diffusion_setting.guidance_scale = args.guidance_scale
308
- config.lcm_diffusion_setting.number_of_images = args.number_of_images
309
- config.lcm_diffusion_setting.inference_steps = args.inference_steps
310
- config.lcm_diffusion_setting.strength = args.strength
311
- config.lcm_diffusion_setting.seed = args.seed
312
- config.lcm_diffusion_setting.use_openvino = args.use_openvino
313
- config.lcm_diffusion_setting.use_tiny_auto_encoder = args.use_tiny_auto_encoder
314
- config.lcm_diffusion_setting.use_lcm_lora = args.use_lcm_lora
315
- config.lcm_diffusion_setting.lcm_lora.base_model_id = args.base_model_id
316
- config.lcm_diffusion_setting.lcm_lora.lcm_lora_id = args.lcm_lora_id
317
- config.lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
318
- config.lcm_diffusion_setting.lora.enabled = False
319
- config.lcm_diffusion_setting.lora.path = args.lora
320
- config.lcm_diffusion_setting.lora.weight = args.lora_weight
321
- config.lcm_diffusion_setting.lora.fuse = True
322
- if config.lcm_diffusion_setting.lora.path:
323
- config.lcm_diffusion_setting.lora.enabled = True
324
- if args.usejpeg:
325
- config.generated_images.format = ImageFormat.JPEG.value.upper()
326
- if args.seed > -1:
327
- config.lcm_diffusion_setting.use_seed = True
328
- else:
329
- config.lcm_diffusion_setting.use_seed = False
330
- config.lcm_diffusion_setting.use_offline_model = args.use_offline_model
331
- config.lcm_diffusion_setting.use_safety_checker = args.use_safety_checker
332
-
333
- # Read custom settings from JSON file
334
- custom_settings = {}
335
- if args.custom_settings:
336
- with open(args.custom_settings) as f:
337
- custom_settings = json.load(f)
338
-
339
- # Basic ControlNet settings; if ControlNet is enabled, an image is
340
- # required even in txt2img mode
341
- config.lcm_diffusion_setting.controlnet = None
342
- controlnet_settings_from_dict(
343
- config.lcm_diffusion_setting,
344
- custom_settings,
345
- )
346
-
347
- # Interactive mode
348
- if args.interactive:
349
- # wrapper(interactive_mode, config, context)
350
- config.lcm_diffusion_setting.lora.fuse = False
351
- interactive_mode(config, context)
352
-
353
- # Start of non-interactive CLI image generation
354
- if args.img2img and args.file != "":
355
- config.lcm_diffusion_setting.init_image = Image.open(args.file)
356
- config.lcm_diffusion_setting.diffusion_task = DiffusionTask.image_to_image.value
357
- elif args.img2img and args.file == "":
358
- print("Error : You need to specify a file in img2img mode")
359
- exit()
360
- elif args.upscale and args.file == "" and args.custom_settings == None:
361
- print("Error : You need to specify a file in SD upscale mode")
362
- exit()
363
- elif (
364
- args.prompt == ""
365
- and args.file == ""
366
- and args.custom_settings == None
367
- and not args.benchmark
368
- ):
369
- print("Error : You need to provide a prompt")
370
- exit()
371
-
372
- if args.upscale:
373
- # image = Image.open(args.file)
374
- output_path = FastStableDiffusionPaths.get_upscale_filepath(
375
- args.file,
376
- 2,
377
- config.generated_images.format,
378
- )
379
- result = upscale_image(
380
- context,
381
- args.file,
382
- output_path,
383
- 2,
384
- )
385
- # Perform Tiled SD upscale (EXPERIMENTAL)
386
- elif args.sdupscale:
387
- if args.use_openvino:
388
- config.lcm_diffusion_setting.strength = 0.3
389
- upscale_settings = None
390
- if custom_settings != {}:
391
- upscale_settings = custom_settings
392
- filepath = args.file
393
- output_format = config.generated_images.format
394
- if upscale_settings:
395
- filepath = upscale_settings["source_file"]
396
- output_format = upscale_settings["output_format"].upper()
397
- output_path = FastStableDiffusionPaths.get_upscale_filepath(
398
- filepath,
399
- 2,
400
- output_format,
401
- )
402
-
403
- generate_upscaled_image(
404
- config,
405
- filepath,
406
- config.lcm_diffusion_setting.strength,
407
- upscale_settings=upscale_settings,
408
- context=context,
409
- tile_overlap=32 if config.lcm_diffusion_setting.use_openvino else 16,
410
- output_path=output_path,
411
- image_format=output_format,
412
- )
413
- exit()
414
- # If img2img argument is set and prompt is empty, use image variations mode
415
- elif args.img2img and args.prompt == "":
416
- for i in range(0, args.batch_count):
417
- generate_image_variations(
418
- config.lcm_diffusion_setting.init_image, args.strength
419
- )
420
- else:
421
-
422
- if args.benchmark:
423
- print("Initializing benchmark...")
424
- bench_lcm_setting = config.lcm_diffusion_setting
425
- bench_lcm_setting.prompt = "a cat"
426
- bench_lcm_setting.use_tiny_auto_encoder = False
427
- context.generate_text_to_image(
428
- settings=config,
429
- device=DEVICE,
430
- )
431
- latencies = []
432
-
433
- print("Starting benchmark please wait...")
434
- for _ in range(3):
435
- context.generate_text_to_image(
436
- settings=config,
437
- device=DEVICE,
438
- )
439
- latencies.append(context.latency)
440
-
441
- avg_latency = sum(latencies) / 3
442
-
443
- bench_lcm_setting.use_tiny_auto_encoder = True
444
-
445
- context.generate_text_to_image(
446
- settings=config,
447
- device=DEVICE,
448
- )
449
- latencies = []
450
- for _ in range(3):
451
- context.generate_text_to_image(
452
- settings=config,
453
- device=DEVICE,
454
- )
455
- latencies.append(context.latency)
456
-
457
- avg_latency_taesd = sum(latencies) / 3
458
-
459
- benchmark_name = ""
460
-
461
- if config.lcm_diffusion_setting.use_openvino:
462
- benchmark_name = "OpenVINO"
463
- else:
464
- benchmark_name = "PyTorch"
465
-
466
- bench_model_id = ""
467
- if bench_lcm_setting.use_openvino:
468
- bench_model_id = bench_lcm_setting.openvino_lcm_model_id
469
- elif bench_lcm_setting.use_lcm_lora:
470
- bench_model_id = bench_lcm_setting.lcm_lora.base_model_id
471
- else:
472
- bench_model_id = bench_lcm_setting.lcm_model_id
473
-
474
- benchmark_result = [
475
- ["Device", f"{DEVICE.upper()},{get_device_name()}"],
476
- ["Stable Diffusion Model", bench_model_id],
477
- [
478
- "Image Size ",
479
- f"{bench_lcm_setting.image_width}x{bench_lcm_setting.image_height}",
480
- ],
481
- [
482
- "Inference Steps",
483
- f"{bench_lcm_setting.inference_steps}",
484
- ],
485
- [
486
- "Benchmark Passes",
487
- 3,
488
- ],
489
- [
490
- "Average Latency",
491
- f"{round(avg_latency,3)} sec",
492
- ],
493
- [
494
- "Average Latency(TAESD* enabled)",
495
- f"{round(avg_latency_taesd,3)} sec",
496
- ],
497
- ]
498
- print()
499
- print(
500
- f" FastSD Benchmark - {benchmark_name:8} "
501
- )
502
- print(f"-" * 80)
503
- for benchmark in benchmark_result:
504
- print(f"{benchmark[0]:35} - {benchmark[1]}")
505
- print(f"-" * 80)
506
- print("*TAESD - Tiny AutoEncoder for Stable Diffusion")
507
-
508
- else:
509
- for i in range(0, args.batch_count):
510
- context.generate_text_to_image(
511
- settings=config,
512
- device=DEVICE,
513
- )
 
1
+ from frontend.webui.ui import start_webui
2
  import json
3
  from argparse import ArgumentParser
4
 
 
262
  else:
263
  app_settings.settings.generated_images.save_image = True
264
 
 
 
 
 
265
 
266
+ print("Starting web UI mode")
267
+ start_webui(
268
+ args.share,
269
+ )