{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T14:29:24Z","timestamp":1742912964590,"version":"3.40.3"},"publisher-location":"Cham","reference-count":54,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031728471"},{"type":"electronic","value":"9783031728488"}],"license":[{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72848-8_19","type":"book-chapter","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T13:36:06Z","timestamp":1732800966000},"page":"323-340","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["VQA-Diff: Exploiting VQA and\u00a0Diffusion for\u00a0Zero-Shot Image-to-3D Vehicle Asset Generation in\u00a0Autonomous Driving"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1143-3242","authenticated-orcid":false,"given":"Yibo","family":"Liu","sequence":"first","affiliation":[]},{"given":"Zheyuan","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Guile","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4901-3596","authenticated-orcid":false,"given":"Yuan","family":"Ren","sequence":"additional","affiliation":[]},{"given":"Kejian","family":"Lin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5272-3425","authenticated-orcid":false,"given":"Bingbing","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4911-6739","authenticated-orcid":false,"given":"Jinjun","family":"Shan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,29]]},"reference":[{"key":"19_CR1","doi-asserted-by":"crossref","unstructured":"Bansal, A., Zollhoefer, M.: Neural pixel composition for 3D-4D view synthesis from multi-views. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 290\u2013299 (2023)","DOI":"10.1109\/CVPR52729.2023.00036"},{"key":"19_CR2","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: a multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5855\u20135864 (2021)","DOI":"10.1109\/ICCV48922.2021.00580"},{"key":"19_CR3","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-NeRF 360: unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5470\u20135479 (2022)","DOI":"10.1109\/CVPR52688.2022.00539"},{"key":"19_CR4","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: InstructPix2Pix: learning to follow image editing instructions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18392\u201318402 (2023)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"19_CR5","doi-asserted-by":"crossref","unstructured":"Cao, A., Johnson, J.: HexPlane: a fast representation for dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 130\u2013141 (2023)","DOI":"10.1109\/CVPR52729.2023.00021"},{"key":"19_CR6","unstructured":"Chang, A.X., et\u00a0al.: ShapeNet: an information-rich 3D model repository. arXiv preprint arXiv:1512.03012 (2015)"},{"key":"19_CR7","doi-asserted-by":"crossref","unstructured":"Deitke, M., et al.: Objaverse: a universe of annotated 3D objects. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13142\u201313153 (2023)","DOI":"10.1109\/CVPR52729.2023.01263"},{"key":"19_CR8","doi-asserted-by":"crossref","unstructured":"Duggal, S., et al.: Mending neural implicit modeling for 3D vehicle reconstruction in the wild. In: Proceedings of IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1900\u20131909 (2022)","DOI":"10.1109\/WACV51458.2022.00035"},{"key":"19_CR9","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: EVA: exploring the limits of masked visual representation learning at scale. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19358\u201319369 (2023)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"19_CR10","unstructured":"Gao, J., et al.: GET3D: a generative model of high quality 3D textured shapes learned from images. In: Advances in Neural Information Processing Systems 35, pp. 31841\u201331854 (2022)"},{"key":"19_CR11","unstructured":"Hao, Y., Chi, Z., Dong, L., Wei, F.: Optimizing prompts for text-to-image generation. In: Advances in Neural Information Processing Systems 36 (2024)"},{"key":"19_CR12","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In: Advances in Neural Information Processing Systems 30 (2017)"},{"key":"19_CR13","doi-asserted-by":"crossref","unstructured":"Jang, W., Agapito, L.: CodeNeRF: disentangled neural radiance fields for object categories. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12949\u201312958 (2021)","DOI":"10.1109\/ICCV48922.2021.01271"},{"key":"19_CR14","doi-asserted-by":"crossref","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3D Gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4), 139-1 (2023)","DOI":"10.1145\/3592433"},{"key":"19_CR15","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: Proceedings of the International Conference on Learning Representations (Poster) (2015)"},{"key":"19_CR16","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)"},{"key":"19_CR17","doi-asserted-by":"crossref","unstructured":"Lee, B., Lee, H., Sun, X., Ali, U., Park, E.: Deblurring 3D Gaussian splatting. arXiv preprint arXiv:2401.00834 (2024)","DOI":"10.1007\/978-3-031-73636-0_8"},{"key":"19_CR18","unstructured":"Li, D., Li, J., Hoi, S.: BLIP-Diffusion: pre-trained subject representation for controllable text-to-image generation and editing. In: Advances in Neural Information Processing Systems 36 (2024)"},{"key":"19_CR19","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML (2023)"},{"key":"19_CR20","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"19_CR21","doi-asserted-by":"crossref","unstructured":"Lin, Z., et al.: Evaluating text-to-visual generation with image-to-text generation. arXiv preprint arXiv:2404.01291 (2024)","DOI":"10.1007\/978-3-031-72673-6_20"},{"key":"19_CR22","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: Advances in Neural Information Processing Systems 36 (2024)"},{"key":"19_CR23","unstructured":"Liu, M., et\u00a0al.: One-2-3-45: any single image to 3D mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928 (2023)"},{"key":"19_CR24","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Van\u00a0Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: zero-shot one image to 3D object. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9298\u20139309 (2023)","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"19_CR25","doi-asserted-by":"crossref","unstructured":"Liu, Y., et al.: MV-DeepSDF: implicit modeling with multi-sweep point clouds for 3D vehicle reconstruction in autonomous driving. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8306\u20138316 (2023)","DOI":"10.1109\/ICCV51070.2023.00763"},{"key":"19_CR26","doi-asserted-by":"crossref","unstructured":"Long, X., et\u00a0al.: Wonder3D: single image to 3D using cross-domain diffusion. arXiv preprint arXiv:2310.15008 (2023)","DOI":"10.1109\/CVPR52733.2024.00951"},{"key":"19_CR27","doi-asserted-by":"publisher","first-page":"210","DOI":"10.1007\/978-3-031-19824-3_13","volume-title":"European Conference on Computer Vision 2022","author":"X Long","year":"2022","unstructured":"Long, X., Lin, C., Wang, P., Komura, T., Wang, W.: SparseNeuS: fast generalizable neural surface reconstruction from sparse views. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13692, pp. 210\u2013227. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19824-3_13"},{"issue":"1","key":"19_CR28","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"issue":"4","key":"19_CR29","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph. (TOG) 41(4), 1\u201315 (2022)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"19_CR30","doi-asserted-by":"crossref","unstructured":"Niedermayr, S., Stumpfegger, J., Westermann, R.: Compressed 3D Gaussian splatting for accelerated novel view synthesis. arXiv preprint arXiv:2401.02436 (2023)","DOI":"10.1109\/CVPR52733.2024.00985"},{"key":"19_CR31","doi-asserted-by":"crossref","unstructured":"Pavllo, D., Tan, D.J., Rakotosaona, M.J., Tombari, F.: Shape, pose, and appearance from a single image via bootstrapped radiance field inversion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4391\u20134401 (2023)","DOI":"10.1109\/CVPR52729.2023.00427"},{"key":"19_CR32","doi-asserted-by":"crossref","unstructured":"Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: neural radiance fields for dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10318\u201310327 (2021)","DOI":"10.1109\/CVPR46437.2021.01018"},{"key":"19_CR33","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"19_CR34","doi-asserted-by":"crossref","unstructured":"Rebain, D., Matthews, M., Yi, K.M., Lagun, D., Tagliasacchi, A.: LOLNeRF: learn from one look. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1558\u20131567 (2022)","DOI":"10.1109\/CVPR52688.2022.00161"},{"key":"19_CR35","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"19_CR36","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: DreamBooth: fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"19_CR37","doi-asserted-by":"crossref","unstructured":"Sun, P., et\u00a0al.: Scalability in perception for autonomous driving: waymo open dataset. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2446\u20132454 (2020)","DOI":"10.1109\/CVPR42600.2020.00252"},{"key":"19_CR38","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: generative Gaussian splatting for efficient 3D content creation. arXiv preprint arXiv:2309.16653 (2023)"},{"key":"19_CR39","doi-asserted-by":"crossref","unstructured":"Tang, J., et al.: Make-it-3D: high-fidelity 3D creation from a single image with diffusion prior. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 22819\u201322829 (2023)","DOI":"10.1109\/ICCV51070.2023.02086"},{"key":"19_CR40","unstructured":"Wang, J., et al.: CADSim: robust and scalable in-the-wild 3D reconstruction for controllable sensor simulation. In: 6th Annual Conference on Robot Learning (2022). https:\/\/openreview.net\/forum?id=Mp3Y5jd7rnW"},{"key":"19_CR41","unstructured":"Wei, J., et al.: Finetuned language models are zero-shot learners. In: International Conference on Learning Representations (2021)"},{"key":"19_CR42","doi-asserted-by":"crossref","unstructured":"Wu, G., et al.: 4D Gaussian splatting for real-time dynamic scene rendering. arXiv preprint arXiv:2310.08528 (2023)","DOI":"10.1109\/CVPR52733.2024.01920"},{"key":"19_CR43","unstructured":"Wu, R., et\u00a0al.: ReconFusion: 3D reconstruction with diffusion priors. arXiv preprint arXiv:2312.02981 (2023)"},{"key":"19_CR44","doi-asserted-by":"crossref","unstructured":"Xiang, Y., Mottaghi, R., Savarese, S.: Beyond PASCAL: a benchmark for 3D object detection in the wild. In: IEEE Winter Conference on Applications of Computer Vision, pp. 75\u201382. IEEE (2014)","DOI":"10.1109\/WACV.2014.6836101"},{"key":"19_CR45","doi-asserted-by":"crossref","unstructured":"Xie, S., Zhang, Z., Lin, Z., Hinz, T., Zhang, K.: SmartBrush: text and shape guided object inpainting with diffusion model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22428\u201322437 (2023)","DOI":"10.1109\/CVPR52729.2023.02148"},{"key":"19_CR46","doi-asserted-by":"crossref","unstructured":"Yang, Z., Manivasagam, S., Chen, Y., Wang, J., Hu, R., Urtasun, R.: Reconstructing objects in-the-wild for realistic sensor simulation. In: ICRA (2023)","DOI":"10.1109\/ICRA48891.2023.10160535"},{"key":"19_CR47","unstructured":"Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-time photorealistic dynamic scene representation and rendering with 4D Gaussian splatting. arXiv preprint arXiv:2310.10642 (2023)"},{"key":"19_CR48","unstructured":"Yang, Z., et al.: Learning effective NeRFs and SDFs representations with 3D generative adversarial networks for 3D object generation: technical report for ICCV 2023 OmniObject3D challenge. arXiv preprint arXiv:2309.16110 (2023)"},{"key":"19_CR49","doi-asserted-by":"crossref","unstructured":"Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelNeRF: neural radiance fields from one or few images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4578\u20134587 (2021)","DOI":"10.1109\/CVPR46437.2021.00455"},{"key":"19_CR50","doi-asserted-by":"crossref","unstructured":"Yu, X., Rao, Y., Wang, Z., Liu, Z., Lu, J., Zhou, J.: PoinTr: diverse point cloud completion with geometry-aware transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12498\u201312507 (2021)","DOI":"10.1109\/ICCV48922.2021.01227"},{"key":"19_CR51","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"19_CR52","unstructured":"Zhang, S., et\u00a0al.: OPT: open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)"},{"key":"19_CR53","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Tulsiani, S.: SparseFusion: distilling view-conditioned diffusion for 3D reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12588\u201312597 (2023)","DOI":"10.1109\/CVPR52729.2023.01211"},{"key":"19_CR54","doi-asserted-by":"crossref","unstructured":"Zou, Z.X., et al.: Triplane meets Gaussian splatting: fast and generalizable single-view 3D reconstruction with transformers. arXiv preprint arXiv:2312.09147 (2023)","DOI":"10.1109\/CVPR52733.2024.00983"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72848-8_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T14:10:22Z","timestamp":1732803022000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72848-8_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,29]]},"ISBN":["9783031728471","9783031728488"],"references-count":54,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72848-8_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,29]]},"assertion":[{"value":"29 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}