pradachan's picture
Upload folder using huggingface_hub
f71c233 verified
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{ddpm,yang2023diffusion}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1}\protected@file@percent }
\newlabel{sec:intro}{{1}{1}{Introduction}{section.1}{}}
\newlabel{sec:intro@cref}{{[section][1][]1}{[1][1][]1}}
\citation{ddpm,yang2023diffusion}
\citation{kotelnikov2022tabddpm}
\citation{edm}
\@writefile{toc}{\contentsline {section}{\numberline {2}Related Work}{2}{section.2}\protected@file@percent }
\newlabel{sec:related}{{2}{2}{Related Work}{section.2}{}}
\newlabel{sec:related@cref}{{[section][2][]2}{[1][2][]2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Diffusion Models for Low-Dimensional Data}{2}{subsection.2.1}\protected@file@percent }
\citation{goodfellow2016deep}
\citation{vae}
\citation{gan}
\citation{pmlr-v37-sohl-dickstein15}
\citation{gan}
\citation{ddpm,yang2023diffusion}
\citation{vae}
\citation{gan}
\citation{ddpm}
\citation{pmlr-v37-sohl-dickstein15}
\citation{edm}
\citation{kotelnikov2022tabddpm}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Multi-Expert Approaches in Generative Models}{3}{subsection.2.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Techniques for Improving Mode Capture}{3}{subsection.2.3}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {3}Background}{3}{section.3}\protected@file@percent }
\newlabel{sec:background}{{3}{3}{Background}{section.3}{}}
\newlabel{sec:background@cref}{{[section][3][]3}{[1][3][]3}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Comparison of KL divergence values across different runs and datasets, demonstrating the improvement achieved by our dual-expert architecture.\relax }}{4}{figure.caption.1}\protected@file@percent }
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{fig:kl_divergence_comparison}{{1}{4}{Comparison of KL divergence values across different runs and datasets, demonstrating the improvement achieved by our dual-expert architecture.\relax }{figure.caption.1}{}}
\newlabel{fig:kl_divergence_comparison@cref}{{[figure][1][]1}{[1][4][]4}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Problem Setting}{4}{subsection.3.1}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Generated samples for the `dino' dataset across different runs, showcasing the improved quality and diversity achieved by our dual-expert architecture.\relax }}{5}{figure.caption.2}\protected@file@percent }
\newlabel{fig:dino_generated_samples}{{2}{5}{Generated samples for the `dino' dataset across different runs, showcasing the improved quality and diversity achieved by our dual-expert architecture.\relax }{figure.caption.2}{}}
\newlabel{fig:dino_generated_samples@cref}{{[figure][2][]2}{[1][4][]5}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Method}{5}{section.4}\protected@file@percent }
\newlabel{sec:method}{{4}{5}{Method}{section.4}{}}
\newlabel{sec:method@cref}{{[section][4][]4}{[1][4][]5}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Experimental Setup}{6}{section.5}\protected@file@percent }
\newlabel{sec:experimental}{{5}{6}{Experimental Setup}{section.5}{}}
\newlabel{sec:experimental@cref}{{[section][5][]5}{[1][6][]6}}
\@writefile{toc}{\contentsline {section}{\numberline {6}Results}{7}{section.6}\protected@file@percent }
\newlabel{sec:results}{{6}{7}{Results}{section.6}{}}
\newlabel{sec:results@cref}{{[section][6][]6}{[1][7][]7}}
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Performance comparison between baseline and dual-expert models\relax }}{7}{table.caption.3}\protected@file@percent }
\newlabel{tab:performance_comparison}{{1}{7}{Performance comparison between baseline and dual-expert models\relax }{table.caption.3}{}}
\newlabel{tab:performance_comparison@cref}{{[table][1][]1}{[1][7][]7}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Training loss curves for the `dino' dataset, comparing the baseline model with different configurations of the dual-expert architecture.\relax }}{8}{figure.caption.4}\protected@file@percent }
\newlabel{fig:dino_train_loss}{{3}{8}{Training loss curves for the `dino' dataset, comparing the baseline model with different configurations of the dual-expert architecture.\relax }{figure.caption.4}{}}
\newlabel{fig:dino_train_loss@cref}{{[figure][3][]3}{[1][7][]8}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Generated samples for the `dino' dataset, comparing the baseline model with different configurations of the dual-expert architecture. The color gradient represents the gating weights, illustrating how the model specializes across different regions of the data distribution.\relax }}{8}{figure.caption.5}\protected@file@percent }
\newlabel{fig:dino_generated_samples}{{4}{8}{Generated samples for the `dino' dataset, comparing the baseline model with different configurations of the dual-expert architecture. The color gradient represents the gating weights, illustrating how the model specializes across different regions of the data distribution.\relax }{figure.caption.5}{}}
\newlabel{fig:dino_generated_samples@cref}{{[figure][4][]4}{[1][7][]8}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Distribution of gating weights for the `dino' dataset, illustrating the specialization of the two expert networks in the dual-expert architecture.\relax }}{9}{figure.caption.6}\protected@file@percent }
\newlabel{fig:dino_gating_weights}{{5}{9}{Distribution of gating weights for the `dino' dataset, illustrating the specialization of the two expert networks in the dual-expert architecture.\relax }{figure.caption.6}{}}
\newlabel{fig:dino_gating_weights@cref}{{[figure][5][]5}{[1][7][]9}}
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Ablation study results for the `dino' dataset\relax }}{9}{table.caption.7}\protected@file@percent }
\newlabel{tab:ablation_study}{{2}{9}{Ablation study results for the `dino' dataset\relax }{table.caption.7}{}}
\newlabel{tab:ablation_study@cref}{{[table][2][]2}{[1][7][]9}}
\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusion and Future Work}{9}{section.7}\protected@file@percent }
\newlabel{sec:conclusion}{{7}{9}{Conclusion and Future Work}{section.7}{}}
\newlabel{sec:conclusion@cref}{{[section][7][]7}{[1][9][]9}}
\bibstyle{iclr2024_conference}
\bibdata{references}
\bibcite{gan}{{1}{2014}{{Goodfellow et~al.}}{{Goodfellow, Pouget-Abadie, Mirza, Xu, Warde-Farley, Ozair, Courville, and Bengio}}}
\bibcite{goodfellow2016deep}{{2}{2016}{{Goodfellow et~al.}}{{Goodfellow, Bengio, Courville, and Bengio}}}
\bibcite{ddpm}{{3}{2020}{{Ho et~al.}}{{Ho, Jain, and Abbeel}}}
\bibcite{edm}{{4}{2022}{{Karras et~al.}}{{Karras, Aittala, Aila, and Laine}}}
\bibcite{vae}{{5}{2014}{{Kingma \& Welling}}{{Kingma and Welling}}}
\bibcite{kotelnikov2022tabddpm}{{6}{2022}{{Kotelnikov et~al.}}{{Kotelnikov, Baranchuk, Rubachev, and Babenko}}}
\bibcite{pmlr-v37-sohl-dickstein15}{{7}{2015}{{Sohl-Dickstein et~al.}}{{Sohl-Dickstein, Weiss, Maheswaranathan, and Ganguli}}}
\bibcite{yang2023diffusion}{{8}{2023}{{Yang et~al.}}{{Yang, Zhang, Song, Hong, Xu, Zhao, Zhang, Cui, and Yang}}}
\ttl@finishall
\gdef \@abspage@last{11}