File size: 14,630 Bytes
5edd223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
<!DOCTYPE html>
<html>
<head>
  <meta charset="utf-8">
  <meta name="description"
        content="🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models">
  <meta name="keywords" content="">
  <meta name="viewport" content="width=device-width, initial-scale=1">

  <title>🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models</title>
  <script async src="https://www.googletagmanager.com/gtag/js?id=G-PYVRSFMDRL"></script>
  <script>
    window.dataLayer = window.dataLayer || [];
    function gtag() {
      dataLayer.push(arguments);
    }
    gtag('js', new Date());
    gtag('config', 'G-PYVRSFMDRL');
  </script>


  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
        rel="stylesheet">
  <link rel="stylesheet" href="resource/css/bulma.min.css">
  <link rel="stylesheet" href="resource/css/bulma-carousel.min.css">
  <link rel="stylesheet" href="resource/css/bulma-slider.min.css">
  <link rel="stylesheet" href="resource/css/fontawesome.all.min.css">
  <link rel="stylesheet"
        href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
  <link rel="stylesheet" href="resource/css/index.css">
  <link rel="icon" href="resource/images/favicon.svg">

  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
  <script defer src="resource/js/fontawesome.all.min.js"></script>
  <script src="resource/js/bulma-carousel.min.js"></script>
  <script src="resource/js/bulma-slider.min.js"></script>
  <script src="resource/js/index.js"></script>
</head>
<body>


<section class="hero">
  <div class="hero-body">
    <div class="container is-max-desktop">
      <div class="columns is-centered">
        <div class="column has-text-centered">
          <h1 class="title is-1 publication-title">🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models</h1>
          <div class="is-size-5 publication-authors">
            <span class="author-block">
              <a href="">Zheng Chong</a><sup>1,3</sup>,</span>
            <span class="author-block">
              <a href="">Xiao Dong</a><sup>1</sup>,</span>
            <span class="author-block">
              <a href="">Haoxiang Li</a><sup>2</sup>,</span>
            <span class="author-block">
              <a href="">Shiyue Zhang</a><sup>1</sup>,
            </span>
            <span class="author-block">
              <a href="">Wenqing Zhang</a><sup>1</sup>,
            </span>
            <span class="author-block">
                <a href="">Xujie Zhang</a><sup>1</sup>,
            </span>
            <span class="author-block">
                <a href="">Hanqing Zhao</a><sup>3,4</sup>,
            </span>
            <span class="author-block">
              <a href="">Xiaodan Liang</a><sup>*1,3</sup>,
            </span>
          </div>
          <div class="is-size-5 publication-authors">
            <span class="author-block"><sup>1</sup>Sun Yat-Sen University,</span>
            <span class="author-block"><sup>2</sup>Pixocial Technology,</span>
            <span class="author-block"><sup>3</sup>Peng Cheng Laboratory,</span>
            <span class="author-block"><sup>4</sup>SIAT</span>

          </div>

          <div class="column has-text-centered">
            <div class="publication-links">
              <!-- PDF Link. -->
              <span class="link-block">
                <a href="https://arxiv.org/pdf/2407.15886"
                    class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                      <i class="fas fa-file-pdf"></i>
                  </span>
                  <span>Paper</span>
                </a>
              </span>
              <!--  Arxiv Link. -->
              <span class="link-block">
                <a href="http://arxiv.org/abs/2407.15886"
                   class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                      <i class="ai ai-arxiv"></i>
                  </span>
                  <span>arXiv</span>
                </a>
              </span>
              <!--  Demo Link. -->
              <span class="link-block">
                <a href="http://120.76.142.206:8888"
                   class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                      <i class="fas fa-gamepad"></i>
                  </span>
                  <span>Demo</span>
                  </a>
              </span>
              <!--  Demo Link. -->
              <span class="link-block">
                <a href="https://huggingface.co/spaces/zhengchong/CatVTON"
                    class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                      <i class="fas fa-gamepad"></i>
                  </span>
                  <span>Space</span>
                  </a>
              </span>
              <!-- Models Link. -->
              <span class="link-block">
                <a href="https://huggingface.co/zhengchong/CatVTON"
                   class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                    <i class="fas fa-cube"></i>
                  </span>
                  <span>Models</span>
                </a>
              </span>
              <!-- Code Link. -->
              <span class="link-block">
                <a href="https://github.com/Zheng-Chong/CatVTON"
                   class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                      <i class="fab fa-github"></i>
                  </span>
                  <span>Code</span>
                  </a>
              </span>
            </div>
          </div>
        </div>
      </div>
    </div>
  </div>
</section>

<section class="hero teaser">
  <div class="container is-max-desktop">
    <div class="hero-body">
      <img src="resource/img/teaser.jpg" alt="teaser">
      <p>
        CatVTON is a simple and efficient virtual try-on diffusion model with 1) Lightweight Network (899.06M parameters totally),
         2) Parameter-Efficient Training (49.57M parameters trainable) and 3) Simplified Inference (< 8G VRAM for 1024X768 
         resolution).
      </p>
    </div>
  </div>
</section>

<!-- Abstract -->
<section class="section">
  <div class="container is-max-desktop">
    <!-- Abstract. -->
    <div class="columns is-centered has-text-centered">
      <div class="column is-four-fifths">
        <h2 class="title is-3">Abstract</h2>
        <div class="content has-text-justified">
          <p>
            Virtual try-on methods based on diffusion models achieve realistic try-on effects but replicate the backbone network 
            as a ReferenceNet or leverage additional image encoders to process condition inputs, resulting in high training and 
            inference costs. 
            In this work, we rethink the necessity of ReferenceNet and image encoders and innovate the interaction between garment 
            and person, proposing CatVTON, a simple and efficient virtual try-on diffusion model. It facilitates the seamless 
            transfer of in-shop or worn garments of arbitrary categories to target persons by simply concatenating them in spatial 
            dimensions as inputs. The efficiency of our model is demonstrated in three aspects: 

            (1) Lightweight network. Only the original diffusion modules are used, without additional network modules. The text 
            encoder and cross attentions for text injection in the backbone are removed, further reducing the parameters by 167.02M.

            (2) Parameter-efficient training. We identified the try-on relevant modules through experiments and achieved 
            high-quality try-on effects by training only 49.57M parameters (~5.51% of the backbone network’s parameters). 

            (3) Simplified inference. CatVTON eliminates all unnecessary conditions and preprocessing steps, including 
            pose estimation, human parsing, and text input, requiring only garment reference, target person image, and mask for 
            the virtual try-on process. 

            Extensive experiments demonstrate that CatVTON achieves superior qualitative and 
            quantitative results with fewer prerequisites and trainable parameters than baseline methods. Furthermore, 
            CatVTON shows good generalization in in-the-wild scenarios despite using open-source datasets with only 73K samples.
          </p>
        </div>
      </div>
    </div>
    <!--/ Abstract. -->
  </div>
</section>


<section class="section">
  <div class="container is-max-desktop">
    <!-- Architecture. -->
    <div class="columns is-centered">
      <div class="column is-full-width">
        <h2 class="title is-3">Architecture</h2>
        <div class="content has-text-justified">
          <img src="resource/img/architecture.jpg">
          <p>
            Our method achieves the high-quality try-on by simply concatenating the conditional image (garment or reference person) 
            with the target person image in the spatial dimension, ensuring they remain in the same feature space throughout the 
            diffusion process. Only the self-attention parameters, which provide global interaction, are learnable during training.
             Unnecessary cross-attention for text interaction is omitted, and no additional conditions, such as pose and parsing, 
             are required. These factors result in a lightweight network with minimal trainable parameters and simplified inference.
          </p>
          
        </div>
      </div>
    </div>
    <!-- Two Columns -->
    <div class="columns is-centered">
      <!-- Visual Effects. -->
      <div class="column">
        <div class="content">
          <h2 class="title is-3">Structure Comparison</h2>
          <p>
            We illustrate simple structure comparison of different kinds of try-on methods below. Our approach neither relies on warped garments nor 
            requires the heavy ReferenceNet for additional garment encoding; it only needs simple concatenation of the garment 
            and person images as input to obtain high-quality try-on results.
          </p>
          <img src="resource/img/structure.jpg">
        </div>
      </div>

      <!-- Efficiency Comparison -->
      <div class="column">
        <h2 class="title is-3">Efficiency Comparison</h2>
        <div class="columns is-centered">
          <div class="column content">
            <p>
              We represent each method by two concentric circles, 
              where the outer circle denotes the total parameters and the inner circle denotes the trainable parameters, with the 
              area proportional to the parameter number. CatVTON achieves lower FID on the VITONHD dataset with fewer total 
              parameters, trainable parameters, and memory usage.
            </p>
            <img src="resource/img/efficency.jpg">
          </div>

        </div>
      </div>
    </div>

    <!-- Demo -->
    <div class="columns is-centered">
      <div class="column is-full-width">
        <h2 class="title is-3">Online Demo</h2>
        <div class="content has-text-justified">
          <!-- <iframe src="http://120.76.142.206:8888" width="100%" height="700px"  frameborder="1/0"  name="demo"  scrolling="yes/no/auto">   
          </iframe>           -->
          <p>
            Since GitHub Pages does not support embedded web pages, please jump to our <a href="http://120.76.142.206:8888">Demo </a>.
          </p>
        </div>
      </div>
    </div>

    <!-- Acknowledgement -->
    <div class="columns is-centered">
      <div class="column is-full-width">
        <h2 class="title is-3">Acknowledgement</h2>
        <div class="content has-text-justified">
          <p>
            Our code is modified based on <a href="https://github.com/huggingface/diffusers">Diffusers</a>. 
            We adopt <a href="https://huggingface.co/runwayml/stable-diffusion-inpainting">Stable Diffusion v1.5 inpainitng</a> as base model.
            We use <a href="https://github.com/GoGoDuck912/Self-Correction-Human-Parsing/tree/master">SCHP</a> 
            and <a href="https://github.com/facebookresearch/DensePose">DensePose</a> to automatically generate masks in our 
            <a href="https://github.com/gradio-app/gradio">Gradio</a> App. 
            Thanks to all the contributors!
          </p>
        </div>
      </div>
    </div>
    <!-- "BibTeX -->

    <div class="container is-max-desktop content">
      <h2 class="title">BibTeX</h2>
      <pre><code>
        @misc{chong2024catvtonconcatenationneedvirtual,
          title={CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models}, 
          author={Zheng Chong and Xiao Dong and Haoxiang Li and Shiyue Zhang and Wenqing Zhang and Xujie Zhang and Hanqing Zhao and Xiaodan Liang},
          year={2024},
          eprint={2407.15886},
          archivePrefix={arXiv},
          primaryClass={cs.CV},
          url={https://arxiv.org/abs/2407.15886}, 
        }
      </code></pre>
    </div>
  </div>
</section>



<footer class="footer">
  <div class="container">
    <div class="content has-text-centered">
      <a class="icon-link" href="http://arxiv.org/abs/2407.15886" class="external-link" disabled>
        <i class="ai ai-arxiv"></i>
      </a>
      <a class="icon-link" href="https://arxiv.org/pdf/2407.15886">
        <i class="fas fa-file-pdf"></i>
      </a>
      <a class="icon-link" href="http://120.76.142.206:8888" class="external-link" disabled>
        <i class="fas fa-gamepad"></i>
      </a>
      <a class="icon-link" href="https://github.com/Zheng-Chong/CatVTON" class="external-link" disabled>
        <i class="fab fa-github"></i>
      </a>

      <a class="icon-link" href="https://huggingface.co/zhengchong/CatVTON" class="external-link" disabled>
        <i class="fas fa-cube"></i>
      </a>
      
    </div>
    <div class="columns is-centered">
      <div class="column is-8">
        <div class="content">
          <p>
            This website is modified from <a href="https://nerfies.github.io/">Nerfies</a>. Thanks for the great work!
            Their source code is available on <a href="https://github.com/nerfies/nerfies.github.io">GitHub</a>.
         </p>
        </div>
      </div>
    </div>
  </div>
</footer>

</body>
</html>