{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,9]],"date-time":"2025-05-09T17:26:04Z","timestamp":1746811564944,"version":"3.37.3"},"publisher-location":"New York, NY, USA","reference-count":68,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,12,3]]},"DOI":"10.1145\/3680528.3687625","type":"proceedings-article","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T08:14:37Z","timestamp":1733213677000},"page":"1-11","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Fast High-Resolution Image Synthesis with Latent Adversarial Diffusion Distillation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3140-5743","authenticated-orcid":false,"given":"Axel","family":"Sauer","sequence":"first","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-4773-7554","authenticated-orcid":false,"given":"Frederic","family":"Boesel","sequence":"additional","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-0133-460X","authenticated-orcid":false,"given":"Tim","family":"Dockhorn","sequence":"additional","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-4584-0245","authenticated-orcid":false,"given":"Andreas","family":"Blattmann","sequence":"additional","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-5802-7492","authenticated-orcid":false,"given":"Patrick","family":"Esser","sequence":"additional","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-0105-1526","authenticated-orcid":false,"given":"Robin","family":"Rombach","sequence":"additional","affiliation":[{"name":"Black Forest Labs, Freiburg, Germany"}]}],"member":"320","published-online":{"date-parts":[[2024,12,3]]},"reference":[{"key":"e_1_3_3_3_2_1","unstructured":"Yogesh Balaji Seungjun Nah Xun Huang Arash Vahdat Jiaming Song Qinsheng Zhang Karsten Kreis Miika Aittala Timo Aila Samuli Laine et\u00a0al. 2022. ediff-i: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2211.01324 (2022)."},{"key":"e_1_3_3_3_3_1","doi-asserted-by":"crossref","unstructured":"Omer Bar-Tal Hila Chefer Omer Tov Charles Herrmann Roni Paiss Shiran Zada Ariel Ephrat Junhwa Hur Yuanzhen Li Tomer Michaeli Oliver Wang Deqing Sun Tali Dekel and Inbar Mosseri. 2024. Lumiere: A Space-Time Diffusion Model for Video Generation. arxiv:https:\/\/arXiv.org\/abs\/2401.12945\u00a0[cs.CV]","DOI":"10.1145\/3680528.3687614"},{"key":"e_1_3_3_3_4_1","unstructured":"David Berthelot Arnaud Autef Jierui Lin Dian\u00a0Ang Yap Shuangfei Zhai Siyuan Hu Daniel Zheng Walter Talbott and Eric Gu. 2023. Tract: Denoising diffusion models with transitive closure time-distillation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2303.04248 (2023)."},{"key":"e_1_3_3_3_5_1","unstructured":"Andreas Blattmann Tim Dockhorn Sumith Kulal Daniel Mendelevitch Maciej Kilian Dominik Lorenz Yam Levi Zion English Vikram Voleti Adam Letts et\u00a0al. 2023a. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2311.15127 (2023)."},{"key":"e_1_3_3_3_6_1","doi-asserted-by":"crossref","unstructured":"Andreas Blattmann Robin Rombach Huan Ling Tim Dockhorn Seung\u00a0Wook Kim Sanja Fidler and Karsten Kreis. 2023b. Align your Latents: High-Resolution Video Synthesis with Latent Diffusion Models. arxiv:https:\/\/arXiv.org\/abs\/2304.08818\u00a0[cs.CV]","DOI":"10.1109\/CVPR52729.2023.02161"},{"key":"e_1_3_3_3_7_1","volume-title":"The Second Tiny Papers Track at ICLR 2024","author":"Boesel Frederic","year":"2024","unstructured":"Frederic Boesel and Robin Rombach. 2024. Improving Image Editing Models with Generative Data Refinement. In The Second Tiny Papers Track at ICLR 2024. https:\/\/openreview.net\/forum?id=q5UrA58oyY"},{"key":"e_1_3_3_3_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"e_1_3_3_3_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"e_1_3_3_3_10_1","unstructured":"Xiaoliang Dai Ji Hou Chih-Yao Ma Sam Tsai Jialiang Wang Rui Wang Peizhao Zhang Simon Vandenhende Xiaofang Wang Abhimanyu Dubey Matthew Yu Abhishek Kadian Filip Radenovic Dhruv Mahajan Kunpeng Li Yue Zhao Vladan Petrovic Mitesh\u00a0Kumar Singh Simran Motwani Yi Wen Yiwen Song Roshan Sumbaly Vignesh Ramanathan Zijian He Peter Vajda and Devi Parikh. 2023. Emu: Enhancing Image Generation Models Using Photogenic Needles in a Haystack. arxiv:https:\/\/arXiv.org\/abs\/2309.15807\u00a0[cs.CV]"},{"key":"e_1_3_3_3_11_1","unstructured":"Prafulla Dhariwal and Alex Nichol. 2021. Diffusion Models Beat GANs on Image Synthesis. arxiv:https:\/\/arXiv.org\/abs\/2105.05233\u00a0[cs.LG]"},{"key":"e_1_3_3_3_12_1","unstructured":"Tim Dockhorn Arash Vahdat and Karsten Kreis. 2022. GENIE: Higher-Order Denoising Diffusion Solvers. arxiv:https:\/\/arXiv.org\/abs\/2210.05475\u00a0[stat.ML]"},{"key":"e_1_3_3_3_13_1","doi-asserted-by":"crossref","unstructured":"Patrick Esser Johnathan Chiu Parmida Atighehchian Jonathan Granskog and Anastasis Germanidis. 2023. Structure and Content-Guided Video Synthesis with Diffusion Models. arxiv:https:\/\/arXiv.org\/abs\/2302.03011\u00a0[cs.CV]","DOI":"10.1109\/ICCV51070.2023.00675"},{"key":"e_1_3_3_3_14_1","unstructured":"Patrick Esser Sumith Kulal Andreas Blattmann Rahim Entezari Jonas M\u00fcller Harry Saini Yam Levi Dominik Lorenz Axel Sauer Frederic Boesel Dustin Podell Tim Dockhorn Zion English Kyle Lacey Alex Goodwin Yannik Marek and Robin Rombach. 2024. Scaling Rectified Flow Transformers for High-Resolution Image Synthesis. arxiv:https:\/\/arXiv.org\/abs\/2403.03206\u00a0[cs.CV]"},{"key":"e_1_3_3_3_15_1","unstructured":"Robert Geirhos Patricia Rubisch Claudio Michaelis Matthias Bethge Felix\u00a0A Wichmann and Wieland Brendel. 2018. ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. ICLR (2018)."},{"key":"e_1_3_3_3_16_1","unstructured":"Jonathan Heek Emiel Hoogeboom and Tim Salimans. 2024. Multistep Consistency Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2403.06807 (2024)."},{"key":"e_1_3_3_3_17_1","unstructured":"Jonathan Ho William Chan Chitwan Saharia Jay Whang Ruiqi Gao Alexey Gritsenko Diederik\u00a0P. Kingma Ben Poole Mohammad Norouzi David\u00a0J. Fleet and Tim Salimans. 2022. Imagen Video: High Definition Video Generation with Diffusion Models. arxiv:https:\/\/arXiv.org\/abs\/2210.02303\u00a0[cs.CV]"},{"key":"e_1_3_3_3_18_1","unstructured":"Jonathan Ho Ajay Jain and Pieter Abbeel. 2020. Denoising Diffusion Probabilistic Models. arxiv:https:\/\/arXiv.org\/abs\/2006.11239\u00a0[cs.LG]"},{"key":"e_1_3_3_3_19_1","unstructured":"Jonathan Ho and Tim Salimans. 2022. Classifier-free diffusion guidance. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2207.12598 (2022)."},{"key":"e_1_3_3_3_20_1","unstructured":"Jordan Hoffmann Sebastian Borgeaud Arthur Mensch Elena Buchatskaya Trevor Cai Eliza Rutherford Diego de Las\u00a0Casas Lisa\u00a0Anne Hendricks Johannes Welbl Aidan Clark Tom Hennigan Eric Noland Katie Millican George van\u00a0den Driessche Bogdan Damoc Aurelia Guy Simon Osindero Karen Simonyan Erich Elsen Jack\u00a0W. Rae Oriol Vinyals and Laurent Sifre. 2022. Training Compute-Optimal Large Language Models. arxiv:https:\/\/arXiv.org\/abs\/2203.15556\u00a0[cs.CL]"},{"key":"e_1_3_3_3_21_1","unstructured":"Aapo Hyv\u00e4rinen and Peter Dayan. 2005. Estimation of non-normalized statistical models by score matching. Journal of Machine Learning Research 6 4 (2005)."},{"key":"e_1_3_3_3_22_1","unstructured":"Priyank Jaini Kevin Clark and Robert Geirhos. 2023. Intriguing properties of generative classifiers. ICLR (2023)."},{"key":"e_1_3_3_3_23_1","unstructured":"Xuan Ju Ailing Zeng Yuxuan Bian Shaoteng Liu and Qiang Xu. 2023. Direct inversion: Boosting diffusion-based editing with 3 lines of code. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.01506 (2023)."},{"key":"e_1_3_3_3_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"e_1_3_3_3_25_1","unstructured":"Jared Kaplan Sam McCandlish Tom Henighan Tom\u00a0B. Brown Benjamin Chess Rewon Child Scott Gray Alec Radford Jeffrey Wu and Dario Amodei. 2020. Scaling Laws for Neural Language Models. arxiv:https:\/\/arXiv.org\/abs\/2001.08361\u00a0[cs.LG]"},{"key":"e_1_3_3_3_26_1","unstructured":"Tero Karras Miika Aittala Timo Aila and Samuli Laine. 2022. Elucidating the design space of diffusion-based generative models. Advances in Neural Information Processing Systems 35 (2022) 26565\u201326577."},{"key":"e_1_3_3_3_27_1","unstructured":"Shanchuan Lin Anran Wang and Xiao Yang. 2024. SDXL-Lightning: Progressive Adversarial Diffusion Distillation. arxiv:https:\/\/arXiv.org\/abs\/2402.13929\u00a0[cs.CV]"},{"key":"e_1_3_3_3_28_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"e_1_3_3_3_29_1","volume-title":"The Eleventh International Conference on Learning Representations","author":"Lipman Yaron","year":"2023","unstructured":"Yaron Lipman, Ricky T.\u00a0Q. Chen, Heli Ben-Hamu, Maximilian Nickel, and Matthew Le. 2023. Flow Matching for Generative Modeling. In The Eleventh International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=PqvMRDCJT9t"},{"key":"e_1_3_3_3_30_1","unstructured":"Xingchao Liu Chengyue Gong and Qiang Liu. 2022. Flow Straight and Fast: Learning to Generate and Transfer Data with Rectified Flow. arxiv:https:\/\/arXiv.org\/abs\/2209.03003\u00a0[cs.LG]"},{"key":"e_1_3_3_3_31_1","unstructured":"Grace Luo Lisa Dunlap Dong\u00a0Huk Park Aleksander Holynski and Trevor Darrell. 2024. Diffusion hyperfeatures: Searching through time and space for semantic correspondence. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_3_3_32_1","unstructured":"Simian Luo Yiqin Tan Longbo Huang Jian Li and Hang Zhao. 2023a. Latent consistency models: Synthesizing high-resolution images with few-step inference. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.04378 (2023)."},{"key":"e_1_3_3_3_33_1","unstructured":"Simian Luo Yiqin Tan Suraj Patil Daniel Gu Patrick von Platen Apolin\u00e1rio Passos Longbo Huang Jian Li and Hang Zhao. 2023b. Lcm-lora: A universal stable-diffusion acceleration module. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2311.05556 (2023)."},{"key":"e_1_3_3_3_34_1","doi-asserted-by":"crossref","unstructured":"Chenlin Meng Robin Rombach Ruiqi Gao Diederik\u00a0P. Kingma Stefano Ermon Jonathan Ho and Tim Salimans. 2023. On Distillation of Guided Diffusion Models. arxiv:https:\/\/arXiv.org\/abs\/2210.03142\u00a0[cs.CV]","DOI":"10.1109\/CVPR52729.2023.01374"},{"key":"e_1_3_3_3_35_1","unstructured":"Maxime Oquab Timoth\u00e9e Darcet Th\u00e9o Moutakanni Huy Vo Marc Szafraniec Vasil Khalidov Pierre Fernandez Daniel Haziza Francisco Massa Alaaeldin El-Nouby et\u00a0al. 2023. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2304.07193 (2023)."},{"key":"e_1_3_3_3_36_1","unstructured":"William Peebles and Saining Xie. 2023. Scalable Diffusion Models with Transformers. arxiv:https:\/\/arXiv.org\/abs\/2212.09748\u00a0[cs.CV]"},{"key":"e_1_3_3_3_37_1","unstructured":"Dustin Podell Zion English Kyle Lacey Andreas Blattmann Tim Dockhorn Jonas M\u00fcller Joe Penna and Robin Rombach. 2023. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2307.01952 (2023)."},{"key":"e_1_3_3_3_38_1","unstructured":"Alec Radford Jong\u00a0Wook Kim Chris Hallacy Aditya Ramesh Gabriel Goh Sandhini Agarwal Girish Sastry Amanda Askell Pamela Mishkin Jack Clark Gretchen Krueger and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. arxiv:https:\/\/arXiv.org\/abs\/2103.00020\u00a0[cs.CV]"},{"key":"e_1_3_3_3_39_1","unstructured":"Rafael Rafailov Archit Sharma Eric Mitchell Stefano Ermon Christopher\u00a0D Manning and Chelsea Finn. 2023. Direct Preference Optimization: Your Language Model is Secretly a Reward Model. arXiv:https:\/\/arXiv.org\/abs\/2305.18290 (2023)."},{"key":"e_1_3_3_3_40_1","unstructured":"Aditya Ramesh Prafulla Dhariwal Alex Nichol Casey Chu and Mark Chen. 2022. Hierarchical Text-Conditional Image Generation with CLIP Latents. arxiv:https:\/\/arXiv.org\/abs\/2204.06125\u00a0[cs.CV]"},{"key":"e_1_3_3_3_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"e_1_3_3_3_42_1","doi-asserted-by":"crossref","unstructured":"Chitwan Saharia William Chan Saurabh Saxena Lala Li Jay Whang Emily\u00a0L Denton Kamyar Ghasemipour Raphael Gontijo\u00a0Lopes Burcu Karagol\u00a0Ayan Tim Salimans et\u00a0al. 2022. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems (2022).","DOI":"10.1145\/3528233.3530757"},{"key":"e_1_3_3_3_43_1","unstructured":"Tim Salimans and Jonathan Ho. 2022. Progressive Distillation for Fast Sampling of Diffusion Models. arxiv:https:\/\/arXiv.org\/abs\/2202.00512\u00a0[cs.LG]"},{"key":"e_1_3_3_3_44_1","unstructured":"Tim Salimans Thomas Mensink Jonathan Heek and Emiel Hoogeboom. 2024. Multistep Distillation of Diffusion Models via Moment Matching. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2406.04103 (2024)."},{"key":"e_1_3_3_3_45_1","unstructured":"Axel Sauer Kashyap Chitta Jens M\u00fcller and Andreas Geiger. 2021. Projected gans converge faster. Advances in Neural Information Processing Systems 34 (2021) 17480\u201317492."},{"key":"e_1_3_3_3_46_1","first-page":"30105","volume-title":"International conference on machine learning","author":"Sauer Axel","year":"2023","unstructured":"Axel Sauer, Tero Karras, Samuli Laine, Andreas Geiger, and Timo Aila. 2023a. Stylegan-t: Unlocking the power of gans for fast large-scale text-to-image synthesis. In International conference on machine learning. PMLR, 30105\u201330118."},{"key":"e_1_3_3_3_47_1","unstructured":"Axel Sauer Dominik Lorenz Andreas Blattmann and Robin Rombach. 2023b. Adversarial diffusion distillation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2311.17042 (2023)."},{"key":"e_1_3_3_3_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3528233.3530738"},{"key":"e_1_3_3_3_49_1","doi-asserted-by":"crossref","unstructured":"Juergen Schmidhuber. 2020. Generative Adversarial Networks are Special Cases of Artificial Curiosity (1990) and also Closely Related to Predictability Minimization (1991). arxiv:https:\/\/arXiv.org\/abs\/1906.04493\u00a0[cs.NE]","DOI":"10.1016\/j.neunet.2020.04.008"},{"key":"e_1_3_3_3_50_1","unstructured":"Neta Shaul Juan Perez Ricky T.\u00a0Q. Chen Ali Thabet Albert Pumarola and Yaron Lipman. 2023. Bespoke Solvers for Generative Flow Models. arxiv:https:\/\/arXiv.org\/abs\/2310.19075\u00a0[cs.LG]"},{"key":"e_1_3_3_3_51_1","doi-asserted-by":"crossref","unstructured":"Shelly Sheynin Adam Polyak Uriel Singer Yuval Kirstain Amit Zohar Oron Ashual Devi Parikh and Yaniv Taigman. 2023. Emu edit: Precise image editing via recognition and generation tasks. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2311.10089 (2023).","DOI":"10.1109\/CVPR52733.2024.00847"},{"key":"e_1_3_3_3_52_1","unstructured":"Uriel Singer Adam Polyak Thomas Hayes Xi Yin Jie An Songyang Zhang Qiyuan Hu Harry Yang Oron Ashual Oran Gafni Devi Parikh Sonal Gupta and Yaniv Taigman. 2022. Make-A-Video: Text-to-Video Generation without Text-Video Data. arxiv:https:\/\/arXiv.org\/abs\/2209.14792\u00a0[cs.CV]"},{"key":"e_1_3_3_3_53_1","unstructured":"Jascha\u00a0Narain Sohl-Dickstein Eric\u00a0A. Weiss Niru Maheswaranathan and Surya Ganguli. 2015. Deep Unsupervised Learning using Nonequilibrium Thermodynamics. ArXiv abs\/1503.03585 (2015). https:\/\/api.semanticscholar.org\/CorpusID:14888175"},{"key":"e_1_3_3_3_54_1","unstructured":"Jiaming Song Chenlin Meng and Stefano Ermon. 2022. Denoising Diffusion Implicit Models. arxiv:https:\/\/arXiv.org\/abs\/2010.02502\u00a0[cs.LG]"},{"key":"e_1_3_3_3_55_1","unstructured":"Yang Song and Prafulla Dhariwal. 2023. Improved techniques for training consistency models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.14189 (2023)."},{"key":"e_1_3_3_3_56_1","volume-title":"International conference on machine learning","author":"Song Yang","year":"2023","unstructured":"Yang Song, Prafulla Dhariwal, Mark Chen, and Ilya Sutskever. 2023. Consistency models. In International conference on machine learning."},{"key":"e_1_3_3_3_57_1","unstructured":"Yang Song Jascha\u00a0Narain Sohl-Dickstein Diederik\u00a0P. Kingma Abhishek Kumar Stefano Ermon and Ben Poole. 2020. Score-Based Generative Modeling through Stochastic Differential Equations. ArXiv abs\/2011.13456 (2020). https:\/\/api.semanticscholar.org\/CorpusID:227209335"},{"key":"e_1_3_3_3_58_1","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00323"},{"key":"e_1_3_3_3_59_1","doi-asserted-by":"crossref","unstructured":"Pascal Vincent. 2011. A connection between score matching and denoising autoencoders. Neural computation 23 7 (2011) 1661\u20131674.","DOI":"10.1162\/NECO_a_00142"},{"key":"e_1_3_3_3_60_1","unstructured":"Bram Wallace Meihua Dang Rafael Rafailov Linqi Zhou Aaron Lou Senthil Purushwalkam Stefano Ermon Caiming Xiong Shafiq Joty and Nikhil Naik. 2023. Diffusion Model Alignment Using Direct Preference Optimization. arXiv:https:\/\/arXiv.org\/abs\/2311.12908 (2023)."},{"key":"e_1_3_3_3_61_1","unstructured":"Sirui Xie Zhisheng Xiao Diederik\u00a0P Kingma Tingbo Hou Ying\u00a0Nian Wu Kevin\u00a0Patrick Murphy Tim Salimans Ben Poole and Ruiqi Gao. 2024. EM Distillation for One-step Diffusion Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2405.16852 (2024)."},{"key":"e_1_3_3_3_62_1","unstructured":"Yanwu Xu Yang Zhao Zhisheng Xiao and Tingbo Hou. 2023. UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs. arxiv:https:\/\/arXiv.org\/abs\/2311.09257\u00a0[cs.CV]"},{"key":"e_1_3_3_3_63_1","unstructured":"Tianwei Yin Micha\u00ebl Gharbi Richard Zhang Eli Shechtman Fredo Durand William\u00a0T. Freeman and Taesung Park. 2023. One-step Diffusion with Distribution Matching Distillation. arxiv:https:\/\/arXiv.org\/abs\/2311.18828\u00a0[cs.CV]"},{"key":"e_1_3_3_3_64_1","unstructured":"Jiahui Yu Yuanzhong Xu Jing\u00a0Yu Koh Thang Luong Gunjan Baid Zirui Wang Vijay Vasudevan Alexander Ku Yinfei Yang Burcu\u00a0Karagol Ayan et\u00a0al. 2022. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2206.10789 2 3 (2022) 5."},{"key":"e_1_3_3_3_65_1","unstructured":"Kai Zhang Lingbo Mo Wenhu Chen Huan Sun and Yu Su. 2024b. Magicbrush: A manually annotated dataset for instruction-guided image editing. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_3_3_66_1","unstructured":"Qinsheng Zhang and Yongxin Chen. 2023. Fast Sampling of Diffusion Models with Exponential Integrator. arxiv:https:\/\/arXiv.org\/abs\/2204.13902\u00a0[cs.LG]"},{"key":"e_1_3_3_3_67_1","unstructured":"Shu Zhang Xinyi Yang Yihao Feng Can Qin Chia-Chih Chen Ning Yu Zeyuan Chen Huan Wang Silvio Savarese Stefano Ermon et\u00a0al. 2023. HIVE: Harnessing Human Feedback for Instructional Visual Editing. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2303.09618 (2023)."},{"key":"e_1_3_3_3_68_1","unstructured":"Zhixing Zhang Yanyu Li Yushu Wu Yanwu Xu Anil Kag Ivan Skorokhodov Willi Menapace Aliaksandr Siarohin Junli Cao Dimitris Metaxas et\u00a0al. 2024a. SF-V: Single Forward Video Generation Model. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2406.04324 (2024)."},{"key":"e_1_3_3_3_69_1","unstructured":"Jianbin Zheng Minghui Hu Zhongyi Fan Chaoyue Wang Changxing Ding Dacheng Tao and Tat-Jen Cham. 2024. Trajectory Consistency Distillation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2402.19159 (2024)."}],"event":{"name":"SA '24: SIGGRAPH Asia 2024 Conference Papers","sponsor":["SIGGRAPH ACM Special Interest Group on Computer Graphics and Interactive Techniques"],"location":"Tokyo Japan","acronym":"SA '24"},"container-title":["SIGGRAPH Asia 2024 Conference Papers"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3680528.3687625","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,14]],"date-time":"2025-01-14T23:12:37Z","timestamp":1736896357000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3680528.3687625"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,3]]},"references-count":68,"alternative-id":["10.1145\/3680528.3687625","10.1145\/3680528"],"URL":"https:\/\/doi.org\/10.1145\/3680528.3687625","relation":{},"subject":[],"published":{"date-parts":[[2024,12,3]]},"assertion":[{"value":"2024-12-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}