amarax commited on
Commit
94324d4
1 Parent(s): b4af587

Update the description of the app, and switch the base diffuser to something else.

Browse files
Files changed (2) hide show
  1. app.py +2 -3
  2. visual_foundation_models.py +12 -10
app.py CHANGED
@@ -155,9 +155,7 @@ if OPENAI_API_KEY:
155
  with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
156
  gr.Markdown("<h3><center>Visual ChatGPT</center></h3>")
157
  gr.Markdown(
158
- """This is a demo to the work [Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models](https://github.com/microsoft/visual-chatgpt).<br>
159
- This space connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting.<br>
160
- This space currently only supports English (目前只支持英文对话, 中文正在开发中).<br>
161
  """
162
  )
163
 
@@ -214,6 +212,7 @@ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
214
  run.click(lambda: "", None, txt)
215
 
216
  btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
 
217
  clear.click(bot.memory.clear)
218
  clear.click(lambda: [], None, chatbot)
219
  clear.click(lambda: [], None, state)
 
155
  with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
156
  gr.Markdown("<h3><center>Visual ChatGPT</center></h3>")
157
  gr.Markdown(
158
+ """This is based on the [demo](https://huggingface.co/spaces/microsoft/visual_chatgpt) to the work [Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models](https://github.com/microsoft/visual-chatgpt).<br>
 
 
159
  """
160
  )
161
 
 
212
  run.click(lambda: "", None, txt)
213
 
214
  btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
215
+
216
  clear.click(bot.memory.clear)
217
  clear.click(lambda: [], None, chatbot)
218
  clear.click(lambda: [], None, state)
visual_foundation_models.py CHANGED
@@ -16,6 +16,8 @@ from PIL import Image
16
  import numpy as np
17
  from pytorch_lightning import seed_everything
18
 
 
 
19
  def prompts(name, description):
20
  def decorator(func):
21
  func.name = name
@@ -145,10 +147,10 @@ class Text2Image:
145
  print(f"Initializing Text2Image to {device}")
146
  self.device = device
147
  self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
148
- self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
149
  torch_dtype=self.torch_dtype)
150
  self.pipe.to(device)
151
- self.a_prompt = 'best quality, extremely detailed'
152
  self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
153
  'fewer digits, cropped, worst quality, low quality'
154
 
@@ -217,7 +219,7 @@ class CannyText2Image:
217
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny",
218
  torch_dtype=self.torch_dtype)
219
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
220
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
221
  torch_dtype=self.torch_dtype)
222
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
223
  self.pipe.to(device)
@@ -273,7 +275,7 @@ class LineText2Image:
273
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd",
274
  torch_dtype=self.torch_dtype)
275
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
276
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
277
  torch_dtype=self.torch_dtype
278
  )
279
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
@@ -331,7 +333,7 @@ class HedText2Image:
331
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed",
332
  torch_dtype=self.torch_dtype)
333
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
334
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
335
  torch_dtype=self.torch_dtype
336
  )
337
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
@@ -389,7 +391,7 @@ class ScribbleText2Image:
389
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble",
390
  torch_dtype=self.torch_dtype)
391
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
392
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
393
  torch_dtype=self.torch_dtype
394
  )
395
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
@@ -444,7 +446,7 @@ class PoseText2Image:
444
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose",
445
  torch_dtype=self.torch_dtype)
446
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
447
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
448
  torch_dtype=self.torch_dtype)
449
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
450
  self.pipe.to(device)
@@ -551,7 +553,7 @@ class SegText2Image:
551
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg",
552
  torch_dtype=self.torch_dtype)
553
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
554
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
555
  torch_dtype=self.torch_dtype)
556
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
557
  self.pipe.to(device)
@@ -610,7 +612,7 @@ class DepthText2Image:
610
  self.controlnet = ControlNetModel.from_pretrained(
611
  "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype)
612
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
613
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
614
  torch_dtype=self.torch_dtype)
615
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
616
  self.pipe.to(device)
@@ -681,7 +683,7 @@ class NormalText2Image:
681
  self.controlnet = ControlNetModel.from_pretrained(
682
  "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype)
683
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
684
- "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
685
  torch_dtype=self.torch_dtype)
686
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
687
  self.pipe.to(device)
 
16
  import numpy as np
17
  from pytorch_lightning import seed_everything
18
 
19
+ base_diffuser = "johnslegers/epic-diffusion-v1.1"
20
+
21
  def prompts(name, description):
22
  def decorator(func):
23
  func.name = name
 
147
  print(f"Initializing Text2Image to {device}")
148
  self.device = device
149
  self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
150
+ self.pipe = StableDiffusionPipeline.from_pretrained(base_diffuser,
151
  torch_dtype=self.torch_dtype)
152
  self.pipe.to(device)
153
+ self.a_prompt = 'digital art, highly detailed, intricate, sharp focus, Trending on Artstation, deviantart, unreal engine 5, 4K UHD image'
154
  self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
155
  'fewer digits, cropped, worst quality, low quality'
156
 
 
219
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny",
220
  torch_dtype=self.torch_dtype)
221
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
222
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
223
  torch_dtype=self.torch_dtype)
224
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
225
  self.pipe.to(device)
 
275
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd",
276
  torch_dtype=self.torch_dtype)
277
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
278
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
279
  torch_dtype=self.torch_dtype
280
  )
281
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
 
333
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed",
334
  torch_dtype=self.torch_dtype)
335
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
336
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
337
  torch_dtype=self.torch_dtype
338
  )
339
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
 
391
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble",
392
  torch_dtype=self.torch_dtype)
393
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
394
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
395
  torch_dtype=self.torch_dtype
396
  )
397
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
 
446
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose",
447
  torch_dtype=self.torch_dtype)
448
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
449
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
450
  torch_dtype=self.torch_dtype)
451
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
452
  self.pipe.to(device)
 
553
  self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg",
554
  torch_dtype=self.torch_dtype)
555
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
556
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
557
  torch_dtype=self.torch_dtype)
558
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
559
  self.pipe.to(device)
 
612
  self.controlnet = ControlNetModel.from_pretrained(
613
  "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype)
614
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
615
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
616
  torch_dtype=self.torch_dtype)
617
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
618
  self.pipe.to(device)
 
683
  self.controlnet = ControlNetModel.from_pretrained(
684
  "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype)
685
  self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
686
+ base_diffuser, controlnet=self.controlnet, safety_checker=None,
687
  torch_dtype=self.torch_dtype)
688
  self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
689
  self.pipe.to(device)