Spaces:
Running
on
Zero
Running
on
Zero
Migrate from yapf to black
Browse files- .pre-commit-config.yaml +54 -35
- .style.yapf +0 -5
- .vscode/settings.json +11 -8
- app.py +64 -63
- model.py +41 -50
.pre-commit-config.yaml
CHANGED
@@ -1,36 +1,55 @@
|
|
1 |
repos:
|
2 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v4.4.0
|
4 |
+
hooks:
|
5 |
+
- id: check-executables-have-shebangs
|
6 |
+
- id: check-json
|
7 |
+
- id: check-merge-conflict
|
8 |
+
- id: check-shebang-scripts-are-executable
|
9 |
+
- id: check-toml
|
10 |
+
- id: check-yaml
|
11 |
+
- id: end-of-file-fixer
|
12 |
+
- id: mixed-line-ending
|
13 |
+
args: ["--fix=lf"]
|
14 |
+
- id: requirements-txt-fixer
|
15 |
+
- id: trailing-whitespace
|
16 |
+
- repo: https://github.com/myint/docformatter
|
17 |
+
rev: v1.7.5
|
18 |
+
hooks:
|
19 |
+
- id: docformatter
|
20 |
+
args: ["--in-place"]
|
21 |
+
- repo: https://github.com/pycqa/isort
|
22 |
+
rev: 5.12.0
|
23 |
+
hooks:
|
24 |
+
- id: isort
|
25 |
+
args: ["--profile", "black"]
|
26 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
+
rev: v1.5.1
|
28 |
+
hooks:
|
29 |
+
- id: mypy
|
30 |
+
args: ["--ignore-missing-imports"]
|
31 |
+
additional_dependencies:
|
32 |
+
["types-python-slugify", "types-requests", "types-PyYAML"]
|
33 |
+
- repo: https://github.com/psf/black
|
34 |
+
rev: 23.9.1
|
35 |
+
hooks:
|
36 |
+
- id: black
|
37 |
+
language_version: python3.10
|
38 |
+
args: ["--line-length", "119"]
|
39 |
+
- repo: https://github.com/kynan/nbstripout
|
40 |
+
rev: 0.6.1
|
41 |
+
hooks:
|
42 |
+
- id: nbstripout
|
43 |
+
args:
|
44 |
+
[
|
45 |
+
"--extra-keys",
|
46 |
+
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
47 |
+
]
|
48 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
49 |
+
rev: 1.7.0
|
50 |
+
hooks:
|
51 |
+
- id: nbqa-black
|
52 |
+
- id: nbqa-pyupgrade
|
53 |
+
args: ["--py37-plus"]
|
54 |
+
- id: nbqa-isort
|
55 |
+
args: ["--float-to-top"]
|
.style.yapf
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
[style]
|
2 |
-
based_on_style = pep8
|
3 |
-
blank_line_before_nested_class_or_def = false
|
4 |
-
spaces_before_comment = 2
|
5 |
-
split_before_logical_operator = true
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/settings.json
CHANGED
@@ -1,18 +1,21 @@
|
|
1 |
{
|
2 |
-
"python.linting.enabled": true,
|
3 |
-
"python.linting.flake8Enabled": true,
|
4 |
-
"python.linting.pylintEnabled": false,
|
5 |
-
"python.linting.lintOnSave": true,
|
6 |
-
"python.formatting.provider": "yapf",
|
7 |
-
"python.formatting.yapfArgs": [
|
8 |
-
"--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
|
9 |
-
],
|
10 |
"[python]": {
|
|
|
11 |
"editor.formatOnType": true,
|
12 |
"editor.codeActionsOnSave": {
|
13 |
"source.organizeImports": true
|
14 |
}
|
15 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"editor.formatOnSave": true,
|
17 |
"files.insertFinalNewline": true
|
18 |
}
|
|
|
1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
"[python]": {
|
3 |
+
"editor.defaultFormatter": "ms-python.black-formatter",
|
4 |
"editor.formatOnType": true,
|
5 |
"editor.codeActionsOnSave": {
|
6 |
"source.organizeImports": true
|
7 |
}
|
8 |
},
|
9 |
+
"black-formatter.args": [
|
10 |
+
"--line-length=119"
|
11 |
+
],
|
12 |
+
"isort.args": ["--profile", "black"],
|
13 |
+
"flake8.args": [
|
14 |
+
"--max-line-length=119"
|
15 |
+
],
|
16 |
+
"ruff.args": [
|
17 |
+
"--line-length=119"
|
18 |
+
],
|
19 |
"editor.formatOnSave": true,
|
20 |
"files.insertFinalNewline": true
|
21 |
}
|
app.py
CHANGED
@@ -11,13 +11,13 @@ import torch
|
|
11 |
|
12 |
from model import Model
|
13 |
|
14 |
-
DESCRIPTION =
|
15 |
|
16 |
-
SPACE_ID = os.getenv(
|
17 |
if SPACE_ID is not None:
|
18 |
DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
19 |
if not torch.cuda.is_available():
|
20 |
-
DESCRIPTION +=
|
21 |
|
22 |
model = Model()
|
23 |
|
@@ -34,50 +34,49 @@ def create_demo(mode_name: str) -> gr.Blocks:
|
|
34 |
with gr.Blocks() as demo:
|
35 |
with gr.Row():
|
36 |
with gr.Column():
|
37 |
-
mode = gr.Dropdown(
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
image = gr.Image(label=
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
randomize_seed = gr.Checkbox(label=
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
74 |
with gr.Column():
|
75 |
-
result_image = gr.Image(label=
|
76 |
-
|
77 |
-
in ['t2i', 'i', 'joint', 'i2t2i'])
|
78 |
-
result_text = gr.Text(label='Generated text',
|
79 |
-
visible=mode_name
|
80 |
-
in ['i2t', 't', 'joint', 't2i2t'])
|
81 |
inputs = [
|
82 |
mode,
|
83 |
prompt,
|
@@ -110,26 +109,28 @@ def create_demo(mode_name: str) -> gr.Blocks:
|
|
110 |
fn=model.run,
|
111 |
inputs=inputs,
|
112 |
outputs=outputs,
|
113 |
-
api_name=f
|
114 |
)
|
115 |
return demo
|
116 |
|
117 |
|
118 |
-
with gr.Blocks(css=
|
119 |
gr.Markdown(DESCRIPTION)
|
120 |
with gr.Tabs():
|
121 |
-
with gr.TabItem(
|
122 |
-
create_demo(
|
123 |
-
with gr.TabItem(
|
124 |
-
create_demo(
|
125 |
-
with gr.TabItem(
|
126 |
-
create_demo(
|
127 |
-
with gr.TabItem(
|
128 |
-
create_demo(
|
129 |
-
with gr.TabItem(
|
130 |
-
create_demo(
|
131 |
-
with gr.TabItem(
|
132 |
-
create_demo(
|
133 |
-
with gr.TabItem(
|
134 |
-
create_demo(
|
135 |
-
|
|
|
|
|
|
11 |
|
12 |
from model import Model
|
13 |
|
14 |
+
DESCRIPTION = "# [UniDiffuser](https://github.com/thu-ml/unidiffuser)"
|
15 |
|
16 |
+
SPACE_ID = os.getenv("SPACE_ID")
|
17 |
if SPACE_ID is not None:
|
18 |
DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
19 |
if not torch.cuda.is_available():
|
20 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
|
21 |
|
22 |
model = Model()
|
23 |
|
|
|
34 |
with gr.Blocks() as demo:
|
35 |
with gr.Row():
|
36 |
with gr.Column():
|
37 |
+
mode = gr.Dropdown(
|
38 |
+
label="Mode",
|
39 |
+
choices=[
|
40 |
+
"t2i",
|
41 |
+
"i2t",
|
42 |
+
"joint",
|
43 |
+
"i",
|
44 |
+
"t",
|
45 |
+
"i2t2i",
|
46 |
+
"t2i2t",
|
47 |
+
],
|
48 |
+
value=mode_name,
|
49 |
+
visible=False,
|
50 |
+
)
|
51 |
+
prompt = gr.Text(label="Prompt", max_lines=1, visible=mode_name in ["t2i", "t2i2t"])
|
52 |
+
image = gr.Image(label="Input image", type="pil", visible=mode_name in ["i2t", "i2t2i"])
|
53 |
+
run_button = gr.Button("Run")
|
54 |
+
with gr.Accordion("Advanced options", open=False):
|
55 |
+
seed = gr.Slider(
|
56 |
+
label="Seed",
|
57 |
+
minimum=0,
|
58 |
+
maximum=MAX_SEED,
|
59 |
+
step=1,
|
60 |
+
value=0,
|
61 |
+
)
|
62 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
63 |
+
num_steps = gr.Slider(
|
64 |
+
label="Steps",
|
65 |
+
minimum=1,
|
66 |
+
maximum=100,
|
67 |
+
value=20,
|
68 |
+
step=1,
|
69 |
+
)
|
70 |
+
guidance_scale = gr.Slider(
|
71 |
+
label="Guidance Scale",
|
72 |
+
minimum=0.1,
|
73 |
+
maximum=30.0,
|
74 |
+
value=8.0,
|
75 |
+
step=0.1,
|
76 |
+
)
|
77 |
with gr.Column():
|
78 |
+
result_image = gr.Image(label="Generated image", visible=mode_name in ["t2i", "i", "joint", "i2t2i"])
|
79 |
+
result_text = gr.Text(label="Generated text", visible=mode_name in ["i2t", "t", "joint", "t2i2t"])
|
|
|
|
|
|
|
|
|
80 |
inputs = [
|
81 |
mode,
|
82 |
prompt,
|
|
|
109 |
fn=model.run,
|
110 |
inputs=inputs,
|
111 |
outputs=outputs,
|
112 |
+
api_name=f"run_{mode_name}",
|
113 |
)
|
114 |
return demo
|
115 |
|
116 |
|
117 |
+
with gr.Blocks(css="style.css") as demo:
|
118 |
gr.Markdown(DESCRIPTION)
|
119 |
with gr.Tabs():
|
120 |
+
with gr.TabItem("text2image"):
|
121 |
+
create_demo("t2i")
|
122 |
+
with gr.TabItem("image2text"):
|
123 |
+
create_demo("i2t")
|
124 |
+
with gr.TabItem("image variation"):
|
125 |
+
create_demo("i2t2i")
|
126 |
+
with gr.TabItem("joint generation"):
|
127 |
+
create_demo("joint")
|
128 |
+
with gr.TabItem("image generation"):
|
129 |
+
create_demo("i")
|
130 |
+
with gr.TabItem("text generation"):
|
131 |
+
create_demo("t")
|
132 |
+
with gr.TabItem("text variation"):
|
133 |
+
create_demo("t2i2t")
|
134 |
+
|
135 |
+
if __name__ == "__main__":
|
136 |
+
demo.queue(max_size=15).launch()
|
model.py
CHANGED
@@ -7,15 +7,12 @@ from diffusers import UniDiffuserPipeline
|
|
7 |
|
8 |
class Model:
|
9 |
def __init__(self):
|
10 |
-
self.device = torch.device(
|
11 |
-
|
12 |
-
|
13 |
-
self.pipe = UniDiffuserPipeline.from_pretrained(
|
14 |
-
'thu-ml/unidiffuser-v1', torch_dtype=torch.float16)
|
15 |
self.pipe.to(self.device)
|
16 |
else:
|
17 |
-
self.pipe = UniDiffuserPipeline.from_pretrained(
|
18 |
-
'thu-ml/unidiffuser-v1')
|
19 |
|
20 |
def run(
|
21 |
self,
|
@@ -27,61 +24,55 @@ class Model:
|
|
27 |
guidance_scale: float = 8.0,
|
28 |
) -> tuple[PIL.Image.Image | None, str]:
|
29 |
generator = torch.Generator(device=self.device).manual_seed(seed)
|
30 |
-
if mode ==
|
31 |
self.pipe.set_text_to_image_mode()
|
32 |
-
sample = self.pipe(
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
elif mode == 'i2t':
|
38 |
self.pipe.set_image_to_text_mode()
|
39 |
-
sample = self.pipe(
|
40 |
-
|
41 |
-
|
42 |
-
generator=generator)
|
43 |
return None, sample.text[0]
|
44 |
-
elif mode ==
|
45 |
self.pipe.set_joint_mode()
|
46 |
-
sample = self.pipe(num_inference_steps=num_steps,
|
47 |
-
guidance_scale=guidance_scale,
|
48 |
-
generator=generator)
|
49 |
return sample.images[0], sample.text[0]
|
50 |
-
elif mode ==
|
51 |
self.pipe.set_image_mode()
|
52 |
-
sample = self.pipe(num_inference_steps=num_steps,
|
53 |
-
|
54 |
-
|
55 |
-
return sample.images[0], ''
|
56 |
-
elif mode == 't':
|
57 |
self.pipe.set_text_mode()
|
58 |
-
sample = self.pipe(num_inference_steps=num_steps,
|
59 |
-
guidance_scale=guidance_scale,
|
60 |
-
generator=generator)
|
61 |
return None, sample.text[0]
|
62 |
-
elif mode ==
|
63 |
self.pipe.set_image_to_text_mode()
|
64 |
-
sample = self.pipe(
|
65 |
-
|
66 |
-
|
67 |
-
generator=generator)
|
68 |
self.pipe.set_text_to_image_mode()
|
69 |
-
sample = self.pipe(
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
75 |
self.pipe.set_text_to_image_mode()
|
76 |
-
sample = self.pipe(
|
77 |
-
|
78 |
-
|
79 |
-
generator=generator)
|
80 |
self.pipe.set_image_to_text_mode()
|
81 |
-
sample = self.pipe(
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
85 |
return None, sample.text[0]
|
86 |
else:
|
87 |
raise ValueError
|
|
|
7 |
|
8 |
class Model:
|
9 |
def __init__(self):
|
10 |
+
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
11 |
+
if self.device.type == "cuda":
|
12 |
+
self.pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
|
|
|
|
|
13 |
self.pipe.to(self.device)
|
14 |
else:
|
15 |
+
self.pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
|
|
|
16 |
|
17 |
def run(
|
18 |
self,
|
|
|
24 |
guidance_scale: float = 8.0,
|
25 |
) -> tuple[PIL.Image.Image | None, str]:
|
26 |
generator = torch.Generator(device=self.device).manual_seed(seed)
|
27 |
+
if mode == "t2i":
|
28 |
self.pipe.set_text_to_image_mode()
|
29 |
+
sample = self.pipe(
|
30 |
+
prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator
|
31 |
+
)
|
32 |
+
return sample.images[0], ""
|
33 |
+
elif mode == "i2t":
|
|
|
34 |
self.pipe.set_image_to_text_mode()
|
35 |
+
sample = self.pipe(
|
36 |
+
image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator
|
37 |
+
)
|
|
|
38 |
return None, sample.text[0]
|
39 |
+
elif mode == "joint":
|
40 |
self.pipe.set_joint_mode()
|
41 |
+
sample = self.pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
|
|
|
|
|
42 |
return sample.images[0], sample.text[0]
|
43 |
+
elif mode == "i":
|
44 |
self.pipe.set_image_mode()
|
45 |
+
sample = self.pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
|
46 |
+
return sample.images[0], ""
|
47 |
+
elif mode == "t":
|
|
|
|
|
48 |
self.pipe.set_text_mode()
|
49 |
+
sample = self.pipe(num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator)
|
|
|
|
|
50 |
return None, sample.text[0]
|
51 |
+
elif mode == "i2t2i":
|
52 |
self.pipe.set_image_to_text_mode()
|
53 |
+
sample = self.pipe(
|
54 |
+
image=image, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator
|
55 |
+
)
|
|
|
56 |
self.pipe.set_text_to_image_mode()
|
57 |
+
sample = self.pipe(
|
58 |
+
prompt=sample.text[0],
|
59 |
+
num_inference_steps=num_steps,
|
60 |
+
guidance_scale=guidance_scale,
|
61 |
+
generator=generator,
|
62 |
+
)
|
63 |
+
return sample.images[0], ""
|
64 |
+
elif mode == "t2i2t":
|
65 |
self.pipe.set_text_to_image_mode()
|
66 |
+
sample = self.pipe(
|
67 |
+
prompt=prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator
|
68 |
+
)
|
|
|
69 |
self.pipe.set_image_to_text_mode()
|
70 |
+
sample = self.pipe(
|
71 |
+
image=sample.images[0],
|
72 |
+
num_inference_steps=num_steps,
|
73 |
+
guidance_scale=guidance_scale,
|
74 |
+
generator=generator,
|
75 |
+
)
|
76 |
return None, sample.text[0]
|
77 |
else:
|
78 |
raise ValueError
|