Himanshu-AT commited on
Commit
d589c4e
1 Parent(s): 9b1ec91

add .gitignore to exclude .DS_Store files

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +209 -133
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
app.py CHANGED
@@ -7,6 +7,7 @@ from leffa.inference import LeffaInference
7
  from utils.garment_agnostic_mask_predictor import AutoMasker
8
  from utils.densepose_predictor import DensePosePredictor
9
  from utils.utils import resize_and_center
 
10
 
11
  import gradio as gr
12
 
@@ -35,7 +36,7 @@ pt_model = LeffaModel(
35
  )
36
  pt_inference = LeffaInference(model=pt_model)
37
 
38
-
39
  def leffa_predict(src_image_path, ref_image_path, control_type):
40
  assert control_type in [
41
  "virtual_tryon", "pose_transfer"], "Invalid control type: {}".format(control_type)
@@ -83,145 +84,220 @@ def leffa_predict(src_image_path, ref_image_path, control_type):
83
  # gen_image.save("gen_image.png")
84
  return np.array(gen_image)
85
 
86
-
87
  def leffa_predict_vt(src_image_path, ref_image_path):
88
  return leffa_predict(src_image_path, ref_image_path, "virtual_tryon")
89
 
 
 
 
 
 
 
 
 
90
 
 
91
  def leffa_predict_pt(src_image_path, ref_image_path):
92
  return leffa_predict(src_image_path, ref_image_path, "pose_transfer")
93
 
94
-
95
- if __name__ == "__main__":
96
- # import sys
97
-
98
- # src_image_path = sys.argv[1]
99
- # ref_image_path = sys.argv[2]
100
- # control_type = sys.argv[3]
101
- # leffa_predict(src_image_path, ref_image_path, control_type)
102
-
103
- title = "## Leffa: Learning Flow Fields in Attention for Controllable Person Image Generation"
104
- link = "[📚 Paper](https://arxiv.org/abs/2412.08486) - [🔥 Demo](https://huggingface.co/spaces/franciszzj/Leffa) - [🤗 Model](https://huggingface.co/franciszzj/Leffa)"
105
- description = "Leffa is a unified framework for controllable person image generation that enables precise manipulation of both appearance (i.e., virtual try-on) and pose (i.e., pose transfer)."
106
- note = "Note: The models used in the demo are trained solely on academic datasets. Virtual try-on uses VITON-HD, and pose transfer uses DeepFashion."
107
-
108
- with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.red)).queue() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  gr.Markdown(title)
110
- gr.Markdown(link)
111
  gr.Markdown(description)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- with gr.Tab("Control Appearance (Virtual Try-on)"):
114
- with gr.Row():
115
- with gr.Column():
116
- gr.Markdown("#### Person Image")
117
- vt_src_image = gr.Image(
118
- sources=["upload"],
119
- type="filepath",
120
- label="Person Image",
121
- width=512,
122
- height=512,
123
- )
124
-
125
- gr.Examples(
126
- inputs=vt_src_image,
127
- examples_per_page=5,
128
- examples=["./ckpts/examples/person1/01350_00.jpg",
129
- "./ckpts/examples/person1/01376_00.jpg",
130
- "./ckpts/examples/person1/01416_00.jpg",
131
- "./ckpts/examples/person1/05976_00.jpg",
132
- "./ckpts/examples/person1/06094_00.jpg",],
133
- )
134
-
135
- with gr.Column():
136
- gr.Markdown("#### Garment Image")
137
- vt_ref_image = gr.Image(
138
- sources=["upload"],
139
- type="filepath",
140
- label="Garment Image",
141
- width=512,
142
- height=512,
143
- )
144
-
145
- gr.Examples(
146
- inputs=vt_ref_image,
147
- examples_per_page=5,
148
- examples=["./ckpts/examples/garment/01449_00.jpg",
149
- "./ckpts/examples/garment/01486_00.jpg",
150
- "./ckpts/examples/garment/01853_00.jpg",
151
- "./ckpts/examples/garment/02070_00.jpg",
152
- "./ckpts/examples/garment/03553_00.jpg",],
153
- )
154
-
155
- with gr.Column():
156
- gr.Markdown("#### Generated Image")
157
- vt_gen_image = gr.Image(
158
- label="Generated Image",
159
- width=512,
160
- height=512,
161
- )
162
-
163
- with gr.Row():
164
- vt_gen_button = gr.Button("Generate")
165
-
166
- vt_gen_button.click(fn=leffa_predict_vt, inputs=[
167
- vt_src_image, vt_ref_image], outputs=[vt_gen_image])
168
-
169
- with gr.Tab("Control Pose (Pose Transfer)"):
170
- with gr.Row():
171
- with gr.Column():
172
- gr.Markdown("#### Person Image")
173
- pt_ref_image = gr.Image(
174
- sources=["upload"],
175
- type="filepath",
176
- label="Person Image",
177
- width=512,
178
- height=512,
179
- )
180
-
181
- gr.Examples(
182
- inputs=pt_ref_image,
183
- examples_per_page=5,
184
- examples=["./ckpts/examples/person1/01350_00.jpg",
185
- "./ckpts/examples/person1/01376_00.jpg",
186
- "./ckpts/examples/person1/01416_00.jpg",
187
- "./ckpts/examples/person1/05976_00.jpg",
188
- "./ckpts/examples/person1/06094_00.jpg",],
189
- )
190
-
191
- with gr.Column():
192
- gr.Markdown("#### Target Pose Person Image")
193
- pt_src_image = gr.Image(
194
- sources=["upload"],
195
- type="filepath",
196
- label="Target Pose Person Image",
197
- width=512,
198
- height=512,
199
- )
200
-
201
- gr.Examples(
202
- inputs=pt_src_image,
203
- examples_per_page=5,
204
- examples=["./ckpts/examples/person2/01850_00.jpg",
205
- "./ckpts/examples/person2/01875_00.jpg",
206
- "./ckpts/examples/person2/02532_00.jpg",
207
- "./ckpts/examples/person2/02902_00.jpg",
208
- "./ckpts/examples/person2/05346_00.jpg",],
209
- )
210
-
211
- with gr.Column():
212
- gr.Markdown("#### Generated Image")
213
- pt_gen_image = gr.Image(
214
- label="Generated Image",
215
- width=512,
216
- height=512,
217
- )
218
-
219
- with gr.Row():
220
- pose_transfer_gen_button = gr.Button("Generate")
221
-
222
- pose_transfer_gen_button.click(fn=leffa_predict_pt, inputs=[
223
- pt_src_image, pt_ref_image], outputs=[pt_gen_image])
224
-
225
- gr.Markdown(note)
226
-
227
- demo.launch(share=True, server_port=7860)
 
7
  from utils.garment_agnostic_mask_predictor import AutoMasker
8
  from utils.densepose_predictor import DensePosePredictor
9
  from utils.utils import resize_and_center
10
+ import spaces
11
 
12
  import gradio as gr
13
 
 
36
  )
37
  pt_inference = LeffaInference(model=pt_model)
38
 
39
+ @spaces.GPU
40
  def leffa_predict(src_image_path, ref_image_path, control_type):
41
  assert control_type in [
42
  "virtual_tryon", "pose_transfer"], "Invalid control type: {}".format(control_type)
 
84
  # gen_image.save("gen_image.png")
85
  return np.array(gen_image)
86
 
87
+ @spaces.GPU
88
  def leffa_predict_vt(src_image_path, ref_image_path):
89
  return leffa_predict(src_image_path, ref_image_path, "virtual_tryon")
90
 
91
+ @spaces.GPU
92
+ def leffa_predict_vt_image_url(src_image_path, ref_image_path):
93
+ src_image = fetch_image_from_url(src_image_path)
94
+ print("fetched person image")
95
+ ref_image = fetch_image_from_url(ref_image_path)
96
+ print("fetched garment image")
97
+
98
+ return leffa_predict(src_image, ref_image, "virtual_tryon")
99
 
100
+ @spaces.GPU
101
  def leffa_predict_pt(src_image_path, ref_image_path):
102
  return leffa_predict(src_image_path, ref_image_path, "pose_transfer")
103
 
104
+ def fetch_image_from_url(url):
105
+ try:
106
+ response = requests.get(url)
107
+ img = Image.open(BytesIO(response.content))
108
+ return img
109
+ except Exception as e:
110
+ print(e)
111
+ return None
112
+
113
+ def handle_image_input(image_input):
114
+ if image_input.startswith('http'):
115
+ return fetch_image_from_url(image_input)
116
+ else:
117
+ return Image.open(image_input)
118
+
119
+ # if __name__ == "__main__":
120
+ # # import sys
121
+
122
+ # # src_image_path = sys.argv[1]
123
+ # # ref_image_path = sys.argv[2]
124
+ # # control_type = sys.argv[3]
125
+ # # leffa_predict(src_image_path, ref_image_path, control_type)
126
+
127
+ # title = "## Leffa: Learning Flow Fields in Attention for Controllable Person Image Generation"
128
+ # link = "[📚 Paper](https://arxiv.org/abs/2412.08486) - [🔥 Demo](https://huggingface.co/spaces/franciszzj/Leffa) - [🤗 Model](https://huggingface.co/franciszzj/Leffa)"
129
+ # description = "Leffa is a unified framework for controllable person image generation that enables precise manipulation of both appearance (i.e., virtual try-on) and pose (i.e., pose transfer)."
130
+ # note = "Note: The models used in the demo are trained solely on academic datasets. Virtual try-on uses VITON-HD, and pose transfer uses DeepFashion."
131
+
132
+ # with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.red)).queue() as demo:
133
+ # gr.Markdown(title)
134
+ # gr.Markdown(link)
135
+ # gr.Markdown(description)
136
+
137
+ # with gr.Tab("Control Appearance (Virtual Try-on)"):
138
+ # with gr.Row():
139
+ # with gr.Column():
140
+ # gr.Markdown("#### Person Image")
141
+ # vt_src_image = gr.Image(
142
+ # sources=["upload", "url"],
143
+ # type="filepath",
144
+ # label="Person Image",
145
+ # width=512,
146
+ # height=512,
147
+ # )
148
+
149
+ # gr.Examples(
150
+ # inputs=vt_src_image,
151
+ # examples_per_page=5,
152
+ # examples=["./ckpts/examples/person1/01350_00.jpg",
153
+ # "./ckpts/examples/person1/01376_00.jpg",
154
+ # "./ckpts/examples/person1/01416_00.jpg",
155
+ # "./ckpts/examples/person1/05976_00.jpg",
156
+ # "./ckpts/examples/person1/06094_00.jpg",],
157
+ # )
158
+
159
+ # with gr.Column():
160
+ # gr.Markdown("#### Garment Image")
161
+ # vt_ref_image = gr.Image(
162
+ # sources=["upload", "url"],
163
+ # type="filepath",
164
+ # label="Garment Image",
165
+ # width=512,
166
+ # height=512,
167
+ # )
168
+
169
+ # gr.Examples(
170
+ # inputs=vt_ref_image,
171
+ # examples_per_page=5,
172
+ # examples=["./ckpts/examples/garment/01449_00.jpg",
173
+ # "./ckpts/examples/garment/01486_00.jpg",
174
+ # "./ckpts/examples/garment/01853_00.jpg",
175
+ # "./ckpts/examples/garment/02070_00.jpg",
176
+ # "./ckpts/examples/garment/03553_00.jpg",],
177
+ # )
178
+
179
+ # with gr.Column():
180
+ # gr.Markdown("#### Generated Image")
181
+ # vt_gen_image = gr.Image(
182
+ # label="Generated Image",
183
+ # width=512,
184
+ # height=512,
185
+ # )
186
+
187
+ # with gr.Row():
188
+ # vt_gen_button = gr.Button("Generate")
189
+
190
+ # vt_gen_button.click(fn=leffa_predict_vt, inputs=[
191
+ # vt_src_image, vt_ref_image], outputs=[vt_gen_image])
192
+
193
+ # with gr.Tab("Control Pose (Pose Transfer)"):
194
+ # with gr.Row():
195
+ # with gr.Column():
196
+ # gr.Markdown("#### Person Image")
197
+ # pt_ref_image = gr.Image(
198
+ # sources=["upload"],
199
+ # type="filepath",
200
+ # label="Person Image",
201
+ # width=512,
202
+ # height=512,
203
+ # )
204
+
205
+ # gr.Examples(
206
+ # inputs=pt_ref_image,
207
+ # examples_per_page=5,
208
+ # examples=["./ckpts/examples/person1/01350_00.jpg",
209
+ # "./ckpts/examples/person1/01376_00.jpg",
210
+ # "./ckpts/examples/person1/01416_00.jpg",
211
+ # "./ckpts/examples/person1/05976_00.jpg",
212
+ # "./ckpts/examples/person1/06094_00.jpg",],
213
+ # )
214
+
215
+ # with gr.Column():
216
+ # gr.Markdown("#### Target Pose Person Image")
217
+ # pt_src_image = gr.Image(
218
+ # sources=["upload"],
219
+ # type="filepath",
220
+ # label="Target Pose Person Image",
221
+ # width=512,
222
+ # height=512,
223
+ # )
224
+
225
+ # gr.Examples(
226
+ # inputs=pt_src_image,
227
+ # examples_per_page=5,
228
+ # examples=["./ckpts/examples/person2/01850_00.jpg",
229
+ # "./ckpts/examples/person2/01875_00.jpg",
230
+ # "./ckpts/examples/person2/02532_00.jpg",
231
+ # "./ckpts/examples/person2/02902_00.jpg",
232
+ # "./ckpts/examples/person2/05346_00.jpg",],
233
+ # )
234
+
235
+ # with gr.Column():
236
+ # gr.Markdown("#### Generated Image")
237
+ # pt_gen_image = gr.Image(
238
+ # label="Generated Image",
239
+ # width=512,
240
+ # height=512,
241
+ # )
242
+
243
+ # with gr.Row():
244
+ # pose_transfer_gen_button = gr.Button("Generate")
245
+
246
+ # pose_transfer_gen_button.click(fn=leffa_predict_pt, inputs=[
247
+ # pt_src_image, pt_ref_image], outputs=[pt_gen_image])
248
+
249
+ # gr.Markdown(note)
250
+
251
+ # demo.launch(share=True, server_port=7860)
252
+
253
+ def create_demo():
254
+ title = "## Virtual Try-on with URLs"
255
+ description = "Enter URLs for both the person image and the garment image to generate a virtual try-on result."
256
+
257
+ with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink)) as demo:
258
  gr.Markdown(title)
 
259
  gr.Markdown(description)
260
+
261
+ with gr.Row():
262
+ with gr.Column():
263
+ person_url = gr.Textbox(
264
+ label="Person Image URL",
265
+ placeholder="Enter URL of the person image..."
266
+ )
267
+ garment_url = gr.Textbox(
268
+ label="Garment Image URL",
269
+ placeholder="Enter URL of the garment image..."
270
+ )
271
+
272
+ # Example URLs
273
+ gr.Examples(
274
+ inputs=[person_url, garment_url],
275
+ examples=[
276
+ ["https://example.com/person1.jpg", "https://example.com/garment1.jpg"],
277
+ ["https://example.com/person2.jpg", "https://example.com/garment2.jpg"],
278
+ ],
279
+ label="Example URLs"
280
+ )
281
+
282
+ generate_btn = gr.Button("Generate Try-on")
283
+
284
+ with gr.Column():
285
+ output_image = gr.Image(
286
+ label="Generated Result",
287
+ width=512,
288
+ height=512
289
+ )
290
+
291
+ generate_btn.click(
292
+ fn=virtual_tryon_from_urls,
293
+ inputs=[person_url, garment_url],
294
+ outputs=output_image
295
+ )
296
+
297
+ gr.Markdown("Note: This model is trained solely on academic datasets (VITON-HD).")
298
+
299
+ return demo
300
 
301
+ if __name__ == "__main__":
302
+ demo = create_demo()
303
+ demo.launch(share=True, server_port=7860)