souljoy weifeng commited on
Commit
0d68328
0 Parent(s):

Duplicate from IDEA-CCNL/Taiyi-Stable-Diffusion-Chinese

Browse files

Co-authored-by: weifeng <wf-genius@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +76 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Taiyi Stable Diffusion Chinese
3
+ emoji: 🤯
4
+ colorFrom: red
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.10.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: creativeml-openrail-m
11
+ duplicated_from: IDEA-CCNL/Taiyi-Stable-Diffusion-Chinese
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import torch
4
+
5
+ from diffusers import (
6
+ StableDiffusionPipeline,
7
+ StableDiffusionImg2ImgPipeline,
8
+ StableDiffusionInpaintPipeline,
9
+ )
10
+
11
+ device="cuda"
12
+ model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1"
13
+
14
+ pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
15
+ pipe_img2img = StableDiffusionImg2ImgPipeline(**pipe_text2img.components).to(device)
16
+
17
+
18
+ # pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained(model_id).to(device) # work
19
+ # pipe_inpaint = StableDiffusionInpaintPipeline(**pipe_text2img.components) # not work
20
+ # def infer_text2img(prompt, guide, steps, width, height):
21
+ # output = pipe_text2img(prompt, width=width, height=height, guidance_scale=guide, num_inference_steps=steps,)
22
+ # image = output.images[0]
23
+ # return image
24
+
25
+ def infer_text2img(prompt, guide, steps, width, height, image_in, strength):
26
+ if image_in is not None:
27
+ init_image = image_in.convert("RGB").resize((width, height))
28
+ output = pipe_img2img(prompt, init_image=init_image, strength=strength, width=width, height=height, guidance_scale=guide, num_inference_steps=steps)
29
+ else:
30
+ output = pipe_text2img(prompt, width=width, height=height, guidance_scale=guide, num_inference_steps=steps,)
31
+ image = output.images[0]
32
+ return image
33
+
34
+ def infer_inpaint(prompt, guide, steps, width, height, image_in):
35
+ init_image = image_in["image"].convert("RGB").resize((width, height))
36
+ mask = image_in["mask"].convert("RGB").resize((width, height))
37
+
38
+ output = pipe_inpaint(prompt, \
39
+ init_image=init_image, mask_image=mask, \
40
+ width=width, height=height, \
41
+ guidance_scale=7.5, num_inference_steps=20)
42
+ image = output.images[0]
43
+ return image
44
+
45
+ with gr.Blocks() as demo:
46
+ examples = [
47
+ ["飞流直下三千尺, 疑是银河落九天, 瀑布, 插画"],
48
+ ["东临碣石, 以观沧海, 波涛汹涌, 插画"],
49
+ ["孤帆远影碧空尽,惟见长江天际流,油画"],
50
+ ["女孩背影, 日落, 唯美插画"],
51
+ ]
52
+ with gr.Row():
53
+ with gr.Column(scale=1, ):
54
+ image_out = gr.Image(label = '输出(output)')
55
+ with gr.Column(scale=1, ):
56
+ image_in = gr.Image(source='upload', elem_id="image_upload", type="pil", label="参考图(非必须)(ref)")
57
+ prompt = gr.Textbox(label = '提示词(prompt)')
58
+ submit_btn = gr.Button("生成图像(Generate)")
59
+ with gr.Row(scale=0.5 ):
60
+ guide = gr.Slider(2, 15, value = 7, step = 0.1, label = '文本引导强度(guidance scale)')
61
+ steps = gr.Slider(10, 30, value = 20, step = 1, label = '迭代次数(inference steps)')
62
+ width = gr.Slider(384, 640, value = 512, step = 64, label = '宽度(width)')
63
+ height = gr.Slider(384, 640, value = 512, step = 64, label = '高度(height)')
64
+ strength = gr.Slider(0, 1.0, value = 0.8, step = 0.02, label = '参考图改变程度(strength)')
65
+ ex = gr.Examples(examples, fn=infer_text2img, inputs=[prompt, guide, steps, width, height], outputs=image_out)
66
+
67
+ # with gr.Column(scale=1, ):
68
+ # image_in = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload")
69
+ # inpaint_prompt = gr.Textbox(label = '提示词(prompt)')
70
+ # inpaint_btn = gr.Button("图像编辑(Inpaint)")
71
+ # img2img_prompt = gr.Textbox(label = '提示词(prompt)')
72
+ # img2img_btn = gr.Button("图像编辑(Inpaint)")
73
+ submit_btn.click(fn = infer_text2img, inputs = [prompt, guide, steps, width, height, image_in, strength], outputs = image_out)
74
+ # inpaint_btn.click(fn = infer_inpaint, inputs = [inpaint_prompt, width, height, image_in], outputs = image_out)
75
+ # img2img_btn.click(fn = infer_img2img, inputs = [img2img_prompt, width, height, image_in], outputs = image_out)
76
+ demo.queue(concurrency_count=1, max_size=8).launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ torchvision
4
+ git+https://github.com/huggingface/diffusers.git
5
+ transformers
6
+ accelerate